hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c3865b1827c97a761c449a50ba5bd9d5d47a5ae | 428 | py | Python | logger/logger.py | LalithAdhithya/HandWritten-Digit-Recognition-using-pytorch | b7e99f70a95996886dbdfbfe1d336eaf98eb0cf3 | [
"MIT"
] | 1 | 2021-11-20T03:10:09.000Z | 2021-11-20T03:10:09.000Z | logger/logger.py | ketangangal/HandWritten-Digit-Recognition-Pytorch | 622772c23a59dece9d758338ab4be35064e1c88f | [
"MIT"
] | null | null | null | logger/logger.py | ketangangal/HandWritten-Digit-Recognition-Pytorch | 622772c23a59dece9d758338ab4be35064e1c88f | [
"MIT"
] | null | null | null | from datetime import datetime
class Logger:
def __init__(self, file="logger/general/logs.log"):
self.file = file
def info(self, log_type, log_message):
now = datetime.now()
current_time = now.strftime("%d-%m-%Y %H:%M:%S")
with open(self.file,"a+") as file:
file.write("[ " + current_time + " ~" + " Log_Type:" + log_type + " ] ~ " + log_message + "\n")
file.close() | 32.923077 | 107 | 0.565421 | from datetime import datetime
class Logger:
def __init__(self, file="logger/general/logs.log"):
self.file = file
def info(self, log_type, log_message):
now = datetime.now()
current_time = now.strftime("%d-%m-%Y %H:%M:%S")
with open(self.file,"a+") as file:
file.write("[ " + current_time + " ~" + " Log_Type:" + log_type + " ] ~ " + log_message + "\n")
file.close() | true | true |
1c38667ce9ae88fa83fe5e9d1903cb559ab2f6b4 | 4,908 | py | Python | torchtime/transforms/functional.py | VincentSch4rf/torchtime | bebd006cd67b31c342e0658285c9771c27411df0 | [
"Apache-2.0"
] | 4 | 2022-02-21T21:23:16.000Z | 2022-03-28T09:06:14.000Z | torchtime/transforms/functional.py | VincentSch4rf/torchtime | bebd006cd67b31c342e0658285c9771c27411df0 | [
"Apache-2.0"
] | null | null | null | torchtime/transforms/functional.py | VincentSch4rf/torchtime | bebd006cd67b31c342e0658285c9771c27411df0 | [
"Apache-2.0"
] | null | null | null | import warnings
from typing import Any, List, Sequence, Tuple, Optional, Union, Set
import numpy as np
import torch
from torch import Tensor
import torch.nn.functional as F
from ..exceptions import DataConversionWarning
from ..utils import _check_unknown
@torch.jit.unused
def _is_numpy(ts: Any) -> bool:
return isinstance(ts, np.ndarray)
@torch.jit.unused
def _is_numpy_timeseries(ts: Any) -> bool:
return _is_numpy(ts) and ts.ndim in {1, 2}
def pad(series: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "constant") -> Tensor:
if not isinstance(padding, (tuple, list)):
raise TypeError("Got inappropriate padding arg")
if not isinstance(fill, (int, float)):
raise TypeError("Got inappropriate fill arg")
if not isinstance(padding_mode, str):
raise TypeError("Got inappropriate padding_mode arg")
if isinstance(padding, tuple):
padding = list(padding)
if isinstance(padding, list) and len(padding) not in [1, 2]:
raise ValueError("Padding must be an int or a 1 or 2 element tuple, not a " +
"{} element tuple".format(len(padding)))
if padding_mode not in ["constant", "replicate", "reflect"]:
raise ValueError("Padding mode should be either constant, replicate or reflect")
out_dtype = series.dtype
need_cast = False
if (padding_mode != "constant") and series.dtype not in (torch.float32, torch.float64):
# Temporary cast input tensor to float until pytorch issue is resolved :
# https://github.com/pytorch/pytorch/issues/40763
need_cast = True
series = series.to(torch.float32)
series = F.pad(series, padding, mode=padding_mode, value=float(fill))
if need_cast:
series = series.to(out_dtype)
return series
def normalize(tensor: Tensor, mean: Sequence[float], std: Sequence[float], inplace: bool = False) -> Tensor:
"""Normalize a float tensor time series with mean and standard deviation.
.. note::
This transform acts out of place by default, i.e., it does not mutate the input tensor.
See :class:`~torchtime.transforms.Normalize` for more details.
Args:
tensor (Tensor): Float tensor time series of size (C, L) or (B, C, L) to be normalized.
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
inplace(bool,optional): Bool to make this operation inplace.
Returns:
Tensor: Normalized Tensor time series.
"""
if not isinstance(tensor, torch.Tensor):
raise TypeError('Input tensor should be a torch tensor. Got {}.'.format(type(tensor)))
if not tensor.is_floating_point():
raise TypeError('Input tensor should be a float tensor. Got {}.'.format(tensor.dtype))
if tensor.ndim < 2:
raise ValueError('Expected tensor to be a tensor time series of size (..., C, L). Got tensor.size() = '
'{}.'.format(tensor.size()))
if not inplace:
tensor = tensor.clone()
dtype = tensor.dtype
mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)
std = torch.as_tensor(std, dtype=dtype, device=tensor.device)
if (std == 0).any():
raise ValueError('std evaluated to zero after conversion to {}, leading to division by zero.'.format(dtype))
if mean.ndim == 1:
mean = mean.view(-1, 1)
if std.ndim == 1:
std = std.view(-1, 1)
tensor.sub_(mean).div_(std)
return tensor
def column_or_1d(y, *, warn=False) -> np.ndarray:
"""Ravel column or 1d numpy array, else raises an error.
Parameters
----------
y : array-like
warn : bool, default=False
To control display of warnings.
Returns
-------
y : ndarray
"""
y = np.asarray(y)
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning,
stacklevel=2,
)
return np.ravel(y)
raise ValueError(
"y should be a 1d array, got an array of shape {} instead.".format(shape)
)
def encode_labels(targets: List[Any], classes: Optional[List[Any]] = None) -> Tuple[List[Any], Tensor]:
if classes is None:
classes = set(targets)
diff = _check_unknown(targets, classes)
if diff:
raise ValueError(f"y contains previously unseen labels: {str(diff)}")
table = {val: i for i, val in enumerate(classes)}
return classes, torch.as_tensor([table[v] for v in targets])
# r = np.searchsorted(classes, targets)
# return classes, torch.as_tensor(r)
| 34.808511 | 116 | 0.639976 | import warnings
from typing import Any, List, Sequence, Tuple, Optional, Union, Set
import numpy as np
import torch
from torch import Tensor
import torch.nn.functional as F
from ..exceptions import DataConversionWarning
from ..utils import _check_unknown
@torch.jit.unused
def _is_numpy(ts: Any) -> bool:
return isinstance(ts, np.ndarray)
@torch.jit.unused
def _is_numpy_timeseries(ts: Any) -> bool:
return _is_numpy(ts) and ts.ndim in {1, 2}
def pad(series: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "constant") -> Tensor:
if not isinstance(padding, (tuple, list)):
raise TypeError("Got inappropriate padding arg")
if not isinstance(fill, (int, float)):
raise TypeError("Got inappropriate fill arg")
if not isinstance(padding_mode, str):
raise TypeError("Got inappropriate padding_mode arg")
if isinstance(padding, tuple):
padding = list(padding)
if isinstance(padding, list) and len(padding) not in [1, 2]:
raise ValueError("Padding must be an int or a 1 or 2 element tuple, not a " +
"{} element tuple".format(len(padding)))
if padding_mode not in ["constant", "replicate", "reflect"]:
raise ValueError("Padding mode should be either constant, replicate or reflect")
out_dtype = series.dtype
need_cast = False
if (padding_mode != "constant") and series.dtype not in (torch.float32, torch.float64):
need_cast = True
series = series.to(torch.float32)
series = F.pad(series, padding, mode=padding_mode, value=float(fill))
if need_cast:
series = series.to(out_dtype)
return series
def normalize(tensor: Tensor, mean: Sequence[float], std: Sequence[float], inplace: bool = False) -> Tensor:
if not isinstance(tensor, torch.Tensor):
raise TypeError('Input tensor should be a torch tensor. Got {}.'.format(type(tensor)))
if not tensor.is_floating_point():
raise TypeError('Input tensor should be a float tensor. Got {}.'.format(tensor.dtype))
if tensor.ndim < 2:
raise ValueError('Expected tensor to be a tensor time series of size (..., C, L). Got tensor.size() = '
'{}.'.format(tensor.size()))
if not inplace:
tensor = tensor.clone()
dtype = tensor.dtype
mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)
std = torch.as_tensor(std, dtype=dtype, device=tensor.device)
if (std == 0).any():
raise ValueError('std evaluated to zero after conversion to {}, leading to division by zero.'.format(dtype))
if mean.ndim == 1:
mean = mean.view(-1, 1)
if std.ndim == 1:
std = std.view(-1, 1)
tensor.sub_(mean).div_(std)
return tensor
def column_or_1d(y, *, warn=False) -> np.ndarray:
y = np.asarray(y)
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning,
stacklevel=2,
)
return np.ravel(y)
raise ValueError(
"y should be a 1d array, got an array of shape {} instead.".format(shape)
)
def encode_labels(targets: List[Any], classes: Optional[List[Any]] = None) -> Tuple[List[Any], Tensor]:
if classes is None:
classes = set(targets)
diff = _check_unknown(targets, classes)
if diff:
raise ValueError(f"y contains previously unseen labels: {str(diff)}")
table = {val: i for i, val in enumerate(classes)}
return classes, torch.as_tensor([table[v] for v in targets])
| true | true |
1c38670351f57dce763404bbccdc3ada447c883f | 24,105 | py | Python | reservoirpy/_base.py | neuronalX/reservoirpy | b150f097451edfa71dbfaee474149b628cd786d4 | [
"MIT"
] | 18 | 2019-03-01T17:15:17.000Z | 2020-12-08T13:12:49.000Z | reservoirpy/_base.py | neuronalX/FunkyReservoir | 37751e9a6be76298e1c14b3816f191f351bfb606 | [
"MIT"
] | 4 | 2019-02-19T09:25:50.000Z | 2020-06-04T16:01:54.000Z | reservoirpy/_base.py | neuronalX/FunkyReservoir | 37751e9a6be76298e1c14b3816f191f351bfb606 | [
"MIT"
] | 12 | 2019-02-08T08:03:42.000Z | 2020-12-16T09:35:47.000Z | # Author: Nathan Trouvain at 15/02/2022 <nathan.trouvain@inria.fr>
# Licence: MIT License
# Copyright: Xavier Hinaut (2018) <xavier.hinaut@inria.fr>
from abc import ABC, abstractmethod
from contextlib import contextmanager
from typing import Any, Dict, Iterator, Sequence, Union
from uuid import uuid4
import numpy as np
from .type import MappedData, Shape
from .utils import progress
from .utils.validation import check_vector, is_mapping
def _distant_model_inputs(model):
"""Get inputs for distant Nodes in a Model used as feedabck or teacher.
These inputs should be already computed by other Nodes."""
input_data = {}
for p, c in model.edges:
if p in model.input_nodes:
input_data[c.name] = p.state_proxy()
return input_data
def _remove_input_for_feedback(node) -> Union["Node", "Model"]:
"""Remove inputs nodes from feedback Model and gather remaining nodes
into a new Model. Allow getting inputs for feedback model from its input
nodes states."""
from .model import Model
all_nodes = set(node.nodes)
input_nodes = set(node.input_nodes)
filtered_nodes = list(all_nodes - input_nodes)
filtered_edges = [edge for edge in node.edges if edge[0] not in input_nodes]
# return a single Node if Model - Inputs = Node
# else return Model - Inputs = Reduced Model
if len(filtered_nodes) == 1:
return list(filtered_nodes)[0]
return Model(filtered_nodes, filtered_edges, name=str(uuid4()))
def check_one_sequence(
x: Union[np.ndarray, Sequence[np.ndarray]],
expected_dim=None,
caller=None,
allow_timespans=True,
):
caller_name = caller.name + "is " if caller is not None else ""
if expected_dim is not None and not hasattr(expected_dim, "__iter__"):
expected_dim = (expected_dim,)
x_new = check_vector(
x, allow_reshape=True, allow_timespans=allow_timespans, caller=caller
)
data_dim = x_new.shape[1:]
# Check x dimension
if expected_dim is not None:
if len(expected_dim) != len(data_dim):
raise ValueError(
f"{caller_name} expecting {len(expected_dim)} inputs "
f"but received {len(data_dim)}: {x_new}."
)
for dim in expected_dim:
if all([dim != ddim for ddim in data_dim]):
raise ValueError(
f"{caller_name} expecting data of shape "
f"{expected_dim} but received shape {data_dim}."
)
return x_new
# expected_dim = ((m, n), o, (p, q, r), ...)
def check_n_sequences(
x,
expected_dim=None,
allow_n_sequences=True,
allow_n_inputs=True,
allow_timespans=True,
caller=None,
):
if expected_dim is not None:
if not hasattr(expected_dim, "__iter__"):
expected_dim = (expected_dim,)
n_inputs = len(expected_dim)
# I
if n_inputs > 1:
if isinstance(x, (list, tuple)):
x_new = [x[i] for i in range(len(x))]
timesteps = []
for i in range(n_inputs):
dim = (expected_dim[i],)
x_new[i] = check_n_sequences(
x[i],
expected_dim=dim,
caller=caller,
allow_n_sequences=allow_n_sequences,
allow_timespans=allow_timespans,
allow_n_inputs=allow_n_inputs,
)
if isinstance(x_new[i], (list, tuple)):
timesteps.append(tuple([x_.shape[0] for x_ in x_new[i]]))
else:
dim = dim[0]
if not hasattr(dim, "__len__"):
dim = (dim,)
if len(dim) + 2 > len(x_new[i].shape) >= len(dim) + 1:
timesteps.append((x_new[i].shape[0],))
else:
timesteps.append((x_new[i].shape[1],))
if len(np.unique([len(t) for t in timesteps])) > 1 or any(
[
len(np.unique([t[i] for t in timesteps])) > 1
for i in range(len(timesteps[0]))
]
):
raise ValueError("Inputs with different timesteps")
else:
raise ValueError("Expecting several inputs.")
else: # L
dim = expected_dim[0]
if not hasattr(dim, "__len__"):
dim = (dim,)
if isinstance(x, (list, tuple)):
if not allow_n_sequences:
raise TypeError("No lists, only arrays.")
x_new = [x[i] for i in range(len(x))]
for i in range(len(x)):
x_new[i] = check_one_sequence(
x[i],
allow_timespans=allow_timespans,
expected_dim=dim,
caller=caller,
)
else:
if len(x.shape) <= len(dim) + 1: # only one sequence
x_new = check_one_sequence(
x,
expected_dim=dim,
allow_timespans=allow_timespans,
caller=caller,
)
elif len(x.shape) == len(dim) + 2: # several sequences
if not allow_n_sequences:
raise TypeError("No lists, only arrays.")
x_new = x
for i in range(len(x)):
x_new[i] = check_one_sequence(
x[i],
allow_timespans=allow_timespans,
expected_dim=dim,
caller=caller,
)
else: # pragma: no cover
x_new = check_vector(
x,
allow_reshape=True,
allow_timespans=allow_timespans,
caller=caller,
)
else:
if isinstance(x, (list, tuple)):
x_new = [x[i] for i in range(len(x))]
for i in range(len(x)):
if allow_n_inputs:
x_new[i] = check_n_sequences(
x[i],
allow_n_sequences=allow_n_sequences,
allow_timespans=allow_timespans,
allow_n_inputs=False,
caller=caller,
)
elif allow_n_sequences:
x_new[i] = check_n_sequences(
x[i],
allow_n_sequences=False,
allow_timespans=allow_timespans,
allow_n_inputs=False,
caller=caller,
)
else:
raise ValueError("No lists, only arrays.")
else:
x_new = check_one_sequence(
x, allow_timespans=allow_timespans, caller=caller
)
return x_new
def _check_node_io(
x,
receiver_nodes=None,
expected_dim=None,
caller=None,
io_type="input",
allow_n_sequences=True,
allow_n_inputs=True,
allow_timespans=True,
):
noteacher_msg = f"Nodes can not be used as {io_type}" + " for {}."
notonline_msg = "{} is not trained online."
x_new = None
# Caller is a Model
if receiver_nodes is not None:
if not is_mapping(x):
x_new = {n.name: x for n in receiver_nodes}
else:
x_new = x.copy()
for node in receiver_nodes:
if node.name not in x_new:
# Maybe don't fit nodes a second time
if io_type == "target" and node.fitted:
continue
else:
raise ValueError(f"Missing {io_type} data for node {node.name}.")
if (
callable(x_new[node.name])
and hasattr(x_new[node.name], "initialize")
and hasattr(x_new[node.name], "is_initialized")
and hasattr(x_new[node.name], "output_dim")
):
if io_type == "target":
if node.is_trained_online:
register_teacher(
node,
x_new.pop(node.name),
expected_dim=node.output_dim,
)
else:
raise TypeError(
(noteacher_msg + notonline_msg).format(node.name, node.name)
)
else:
raise TypeError(noteacher_msg.format(node.name))
else:
if io_type == "target":
dim = node.output_dim
else:
dim = node.input_dim
x_new[node.name] = check_n_sequences(
x_new[node.name],
expected_dim=dim,
caller=node,
allow_n_sequences=allow_n_sequences,
allow_n_inputs=allow_n_inputs,
allow_timespans=allow_timespans,
)
# Caller is a Node
else:
if (
callable(x)
and hasattr(x, "initialize")
and hasattr(x, "is_initialized")
and hasattr(x, "output_dim")
):
if io_type == "target":
if caller.is_trained_online:
register_teacher(
caller,
x,
expected_dim=expected_dim,
)
else:
raise TypeError(
(noteacher_msg + notonline_msg).format(caller.name, caller.name)
)
else:
raise TypeError(noteacher_msg.format(caller.name))
else:
x_new = check_n_sequences(
x,
expected_dim=expected_dim,
caller=caller,
allow_n_sequences=allow_n_sequences,
allow_n_inputs=allow_n_inputs,
allow_timespans=allow_timespans,
)
# All x are teacher nodes, no data to return
if is_mapping(x_new) and io_type == "target" and len(x_new) == 0:
return None
return x_new
def register_teacher(caller, teacher, expected_dim=None):
target_dim = None
if teacher.is_initialized:
target_dim = teacher.output_dim
if (
expected_dim is not None
and target_dim is not None
and expected_dim != target_dim
):
raise ValueError()
caller._teacher = DistantFeedback(
sender=teacher, receiver=caller, callback_type="teacher"
)
def check_xy(
caller,
x,
y=None,
input_dim=None,
output_dim=None,
allow_n_sequences=True,
allow_n_inputs=True,
allow_timespans=True,
):
"""Prepare one step of input and target data for a Node or a Model.
Preparation may include:
- reshaping data to ([inputs], [sequences], timesteps, features);
- converting non-array objects to array objects;
- checking if n_features is equal to node input or output dimension.
This works on numerical data and teacher nodes.
Parameters
----------
caller: Node or Model
Node or Model requesting inputs/targets preparation.
x : array-like of shape ([inputs], [sequences], timesteps, features)
Input array or sequence of input arrays containing a single timestep of
data.
y : array-like of shape ([sequences], timesteps, features) or Node, optional
Target array containing a single timestep of data, or teacher Node or
Model
yielding target values.
input_dim, output_dim : int or tuple of ints, optional
Expected input and target dimensions, if available.
Returns
-------
array-like of shape ([inputs], 1, n), array-like of shape (1, n) or Node
Processed input and target vectors.
"""
if input_dim is None and hasattr(caller, "input_dim"):
input_dim = caller.input_dim
# caller is a Model
if hasattr(caller, "input_nodes"):
input_nodes = caller.input_nodes
# caller is a Node
else:
input_nodes = None
x_new = _check_node_io(
x,
receiver_nodes=input_nodes,
expected_dim=input_dim,
caller=caller,
io_type="input",
allow_n_sequences=allow_n_sequences,
allow_n_inputs=allow_n_inputs,
allow_timespans=allow_timespans,
)
y_new = y
if y is not None:
# caller is a Model
if hasattr(caller, "trainable_nodes"):
output_dim = None
trainable_nodes = caller.trainable_nodes
# caller is a Node
else:
trainable_nodes = None
if output_dim is None and hasattr(caller, "output_dim"):
output_dim = caller.output_dim
y_new = _check_node_io(
y,
receiver_nodes=trainable_nodes,
expected_dim=output_dim,
caller=caller,
io_type="target",
allow_n_sequences=allow_n_sequences,
allow_timespans=allow_timespans,
allow_n_inputs=False,
)
return x_new, y_new
class DistantFeedback:
def __init__(self, sender, receiver, callback_type="feedback"):
self._sender = sender
self._receiver = receiver
self._callback_type = callback_type
# used to store a reduced version of the feedback if needed
# when feedback is a Model (inputs of the feedback Model are suppressed
# in the reduced version, as we do not need then to re-run them
# because we assume they have already run during the forward call)
self._reduced_sender = None
self._clamped = False
self._clamped_value = None
def __call__(self):
if not self.is_initialized:
self.initialize()
return self.call_distant_node()
@property
def is_initialized(self):
return self._sender.is_initialized
@property
def output_dim(self):
return self._sender.output_dim
@property
def name(self):
return self._sender.name
def call_distant_node(self):
"""Call a distant Model for feedback or teaching
(no need to run the input nodes again)"""
if self._clamped:
self._clamped = False
return self._clamped_value
if self._reduced_sender is not None:
if len(np.unique([n._fb_flag for n in self._sender.nodes])) > 1:
input_data = _distant_model_inputs(self._sender)
if hasattr(self._reduced_sender, "nodes"):
return self._reduced_sender.call(input_data)
else:
reduced_name = self._reduced_sender.name
return self._reduced_sender.call(input_data[reduced_name])
else:
fb_outputs = [n.state() for n in self._sender.output_nodes]
if len(fb_outputs) > 1:
return fb_outputs
else:
return fb_outputs[0]
else:
return self._sender.state_proxy()
def initialize(self):
"""Initialize a distant Model or Node (used as feedback sender or teacher)."""
msg = f"Impossible to get {self._callback_type} "
msg += "from {} for {}: {} is not initialized or has no input/output_dim"
reduced_model = None
if hasattr(self._sender, "input_nodes"):
for n in self._sender.input_nodes:
if not n.is_initialized:
try:
n.initialize()
except RuntimeError:
raise RuntimeError(
msg.format(
self._sender.name,
self._receiver.name,
self._sender.name,
)
)
input_data = _distant_model_inputs(self._sender)
reduced_model = _remove_input_for_feedback(self._sender)
if not reduced_model.is_initialized:
if hasattr(reduced_model, "nodes"):
reduced_model.initialize(x=input_data)
else:
reduced_name = reduced_model.name
reduced_model.initialize(x=input_data[reduced_name])
self._sender._is_initialized = True
else:
try:
self._sender.initialize()
except RuntimeError: # raise more specific error
raise RuntimeError(
msg.format(
self._sender.name, self._receiver.name, self._sender.name
)
)
self._reduced_sender = reduced_model
def zero_feedback(self):
"""A null feedback vector. Returns None if the Node receives
no feedback."""
if hasattr(self._sender, "output_nodes"):
zeros = []
for output in self._sender.output_nodes:
zeros.append(output.zero_state())
if len(zeros) == 1:
return zeros[0]
else:
return zeros
else:
return self._sender.zero_state()
def clamp(self, value):
self._clamped_value = check_n_sequences(
value,
expected_dim=self._sender.output_dim,
caller=self._sender,
allow_n_sequences=False,
)
self._clamped = True
def call(node, x, from_state=None, stateful=True, reset=False):
"""One-step call, without input check."""
with node.with_state(from_state, stateful=stateful, reset=reset):
state = node._forward(node, x)
node._state = state.astype(node.dtype)
node._flag_feedback()
return state
def train(
node,
X,
Y=None,
call_node=True,
force_teachers=True,
learn_every=1,
from_state=None,
stateful=True,
reset=False,
):
seq_len = X.shape[0]
seq = (
progress(range(seq_len), f"Training {node.name}")
if seq_len > 1
else range(seq_len)
)
with node.with_state(from_state, stateful=stateful, reset=reset):
states = np.zeros((seq_len, node.output_dim))
for i in seq:
x = np.atleast_2d(X[i, :])
y = None
if node._teacher is not None:
y = node._teacher()
elif Y is not None:
y = np.atleast_2d(Y[i, :])
if call_node:
s = call(node, x)
else:
s = node.state()
if force_teachers:
node.set_state_proxy(y)
if i % learn_every == 0 or seq_len == 1:
node._train(node, x=x, y=y)
states[i, :] = s
return states
class _Node(ABC):
"""Node base class for type checking and interface inheritance."""
_factory_id = -1
_registry = list()
_name: str
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
cls._factory_id = -1
cls._registry = list()
def __repr__(self):
klas = type(self).__name__
hypers = [(str(k), str(v)) for k, v in self._hypers.items()]
all_params = ["=".join((k, v)) for k, v in hypers]
all_params += [f"in={self.input_dim}", f"out={self.output_dim}"]
return f"'{self.name}': {klas}(" + ", ".join(all_params) + ")"
def __setstate__(self, state):
curr_name = state.get("name")
if curr_name in type(self)._registry:
new_name = curr_name + "-(copy)"
state["name"] = new_name
self.__dict__ = state
def __del__(self):
try:
type(self)._registry.remove(self._name)
except (ValueError, AttributeError):
pass
def __getattr__(self, item):
if item in ["_params", "_hypers"]:
raise AttributeError()
if item in self._params:
return self._params.get(item)
elif item in self._hypers:
return self._hypers.get(item)
else:
raise AttributeError(f"{self.name} has no attribute '{str(item)}'")
def __call__(self, *args, **kwargs) -> np.ndarray:
return self.call(*args, **kwargs)
def __rshift__(self, other: Union["_Node", Sequence["_Node"]]) -> "Model":
from .ops import link
return link(self, other)
def __rrshift__(self, other: Union["_Node", Sequence["_Node"]]) -> "Model":
from .ops import link
return link(other, self)
def __and__(self, other: Union["_Node", Sequence["_Node"]]) -> "Model":
from .ops import merge
return merge(self, other)
def _get_name(self, name=None):
if name is None:
type(self)._factory_id += 1
_id = self._factory_id
name = f"{type(self).__name__}-{_id}"
if name in type(self)._registry:
raise NameError(
f"Name '{name}' is already taken "
f"by another node. Node names should "
f"be unique."
)
type(self)._registry.append(name)
return name
@property
def name(self) -> str:
"""Name of the Node or Model."""
return self._name
@name.setter
def name(self, value):
type(self)._registry.remove(self.name)
self._name = self._get_name(value)
@property
def params(self) -> Dict[str, Any]:
"""Parameters of the Node or Model."""
return self._params
@property
def hypers(self) -> Dict[str, Any]:
"""Hyperparameters of the Node or Model."""
return self._hypers
@property
def is_initialized(self) -> bool:
return self._is_initialized
@property
@abstractmethod
def input_dim(self) -> Shape:
raise NotImplementedError()
@property
@abstractmethod
def output_dim(self) -> Shape:
raise NotImplementedError()
@property
@abstractmethod
def is_trained_offline(self) -> bool:
raise NotImplementedError()
@property
@abstractmethod
def is_trained_online(self) -> bool:
raise NotImplementedError()
@property
@abstractmethod
def is_trainable(self) -> bool:
raise NotImplementedError()
@property
@abstractmethod
def fitted(self) -> bool:
raise NotImplementedError()
@is_trainable.setter
@abstractmethod
def is_trainable(self, value: bool):
raise NotImplementedError()
def get_param(self, name: str) -> Any:
if name in self._params:
return self._params.get(name)
elif name in self._hypers:
return self._hypers.get(name)
else:
raise NameError(f"No parameter named '{name}' found in node {self}")
@abstractmethod
def copy(
self, name: str = None, copy_feedback: bool = False, shallow: bool = False
) -> "_Node":
raise NotImplementedError()
@abstractmethod
def initialize(self, x: MappedData = None, y: MappedData = None):
raise NotImplementedError()
@abstractmethod
def reset(self, to_state: np.ndarray = None) -> "_Node":
raise NotImplementedError()
@contextmanager
@abstractmethod
def with_state(self, state=None, stateful=False, reset=False) -> Iterator["_Node"]:
raise NotImplementedError()
@contextmanager
@abstractmethod
def with_feedback(
self, feedback=None, stateful=False, reset=False
) -> Iterator["_Node"]:
raise NotImplementedError()
| 32.097204 | 88 | 0.541713 |
from abc import ABC, abstractmethod
from contextlib import contextmanager
from typing import Any, Dict, Iterator, Sequence, Union
from uuid import uuid4
import numpy as np
from .type import MappedData, Shape
from .utils import progress
from .utils.validation import check_vector, is_mapping
def _distant_model_inputs(model):
input_data = {}
for p, c in model.edges:
if p in model.input_nodes:
input_data[c.name] = p.state_proxy()
return input_data
def _remove_input_for_feedback(node) -> Union["Node", "Model"]:
from .model import Model
all_nodes = set(node.nodes)
input_nodes = set(node.input_nodes)
filtered_nodes = list(all_nodes - input_nodes)
filtered_edges = [edge for edge in node.edges if edge[0] not in input_nodes]
if len(filtered_nodes) == 1:
return list(filtered_nodes)[0]
return Model(filtered_nodes, filtered_edges, name=str(uuid4()))
def check_one_sequence(
x: Union[np.ndarray, Sequence[np.ndarray]],
expected_dim=None,
caller=None,
allow_timespans=True,
):
caller_name = caller.name + "is " if caller is not None else ""
if expected_dim is not None and not hasattr(expected_dim, "__iter__"):
expected_dim = (expected_dim,)
x_new = check_vector(
x, allow_reshape=True, allow_timespans=allow_timespans, caller=caller
)
data_dim = x_new.shape[1:]
if expected_dim is not None:
if len(expected_dim) != len(data_dim):
raise ValueError(
f"{caller_name} expecting {len(expected_dim)} inputs "
f"but received {len(data_dim)}: {x_new}."
)
for dim in expected_dim:
if all([dim != ddim for ddim in data_dim]):
raise ValueError(
f"{caller_name} expecting data of shape "
f"{expected_dim} but received shape {data_dim}."
)
return x_new
def check_n_sequences(
x,
expected_dim=None,
allow_n_sequences=True,
allow_n_inputs=True,
allow_timespans=True,
caller=None,
):
if expected_dim is not None:
if not hasattr(expected_dim, "__iter__"):
expected_dim = (expected_dim,)
n_inputs = len(expected_dim)
if n_inputs > 1:
if isinstance(x, (list, tuple)):
x_new = [x[i] for i in range(len(x))]
timesteps = []
for i in range(n_inputs):
dim = (expected_dim[i],)
x_new[i] = check_n_sequences(
x[i],
expected_dim=dim,
caller=caller,
allow_n_sequences=allow_n_sequences,
allow_timespans=allow_timespans,
allow_n_inputs=allow_n_inputs,
)
if isinstance(x_new[i], (list, tuple)):
timesteps.append(tuple([x_.shape[0] for x_ in x_new[i]]))
else:
dim = dim[0]
if not hasattr(dim, "__len__"):
dim = (dim,)
if len(dim) + 2 > len(x_new[i].shape) >= len(dim) + 1:
timesteps.append((x_new[i].shape[0],))
else:
timesteps.append((x_new[i].shape[1],))
if len(np.unique([len(t) for t in timesteps])) > 1 or any(
[
len(np.unique([t[i] for t in timesteps])) > 1
for i in range(len(timesteps[0]))
]
):
raise ValueError("Inputs with different timesteps")
else:
raise ValueError("Expecting several inputs.")
else:
dim = expected_dim[0]
if not hasattr(dim, "__len__"):
dim = (dim,)
if isinstance(x, (list, tuple)):
if not allow_n_sequences:
raise TypeError("No lists, only arrays.")
x_new = [x[i] for i in range(len(x))]
for i in range(len(x)):
x_new[i] = check_one_sequence(
x[i],
allow_timespans=allow_timespans,
expected_dim=dim,
caller=caller,
)
else:
if len(x.shape) <= len(dim) + 1:
x_new = check_one_sequence(
x,
expected_dim=dim,
allow_timespans=allow_timespans,
caller=caller,
)
elif len(x.shape) == len(dim) + 2:
if not allow_n_sequences:
raise TypeError("No lists, only arrays.")
x_new = x
for i in range(len(x)):
x_new[i] = check_one_sequence(
x[i],
allow_timespans=allow_timespans,
expected_dim=dim,
caller=caller,
)
else:
x_new = check_vector(
x,
allow_reshape=True,
allow_timespans=allow_timespans,
caller=caller,
)
else:
if isinstance(x, (list, tuple)):
x_new = [x[i] for i in range(len(x))]
for i in range(len(x)):
if allow_n_inputs:
x_new[i] = check_n_sequences(
x[i],
allow_n_sequences=allow_n_sequences,
allow_timespans=allow_timespans,
allow_n_inputs=False,
caller=caller,
)
elif allow_n_sequences:
x_new[i] = check_n_sequences(
x[i],
allow_n_sequences=False,
allow_timespans=allow_timespans,
allow_n_inputs=False,
caller=caller,
)
else:
raise ValueError("No lists, only arrays.")
else:
x_new = check_one_sequence(
x, allow_timespans=allow_timespans, caller=caller
)
return x_new
def _check_node_io(
x,
receiver_nodes=None,
expected_dim=None,
caller=None,
io_type="input",
allow_n_sequences=True,
allow_n_inputs=True,
allow_timespans=True,
):
noteacher_msg = f"Nodes can not be used as {io_type}" + " for {}."
notonline_msg = "{} is not trained online."
x_new = None
if receiver_nodes is not None:
if not is_mapping(x):
x_new = {n.name: x for n in receiver_nodes}
else:
x_new = x.copy()
for node in receiver_nodes:
if node.name not in x_new:
if io_type == "target" and node.fitted:
continue
else:
raise ValueError(f"Missing {io_type} data for node {node.name}.")
if (
callable(x_new[node.name])
and hasattr(x_new[node.name], "initialize")
and hasattr(x_new[node.name], "is_initialized")
and hasattr(x_new[node.name], "output_dim")
):
if io_type == "target":
if node.is_trained_online:
register_teacher(
node,
x_new.pop(node.name),
expected_dim=node.output_dim,
)
else:
raise TypeError(
(noteacher_msg + notonline_msg).format(node.name, node.name)
)
else:
raise TypeError(noteacher_msg.format(node.name))
else:
if io_type == "target":
dim = node.output_dim
else:
dim = node.input_dim
x_new[node.name] = check_n_sequences(
x_new[node.name],
expected_dim=dim,
caller=node,
allow_n_sequences=allow_n_sequences,
allow_n_inputs=allow_n_inputs,
allow_timespans=allow_timespans,
)
# Caller is a Node
else:
if (
callable(x)
and hasattr(x, "initialize")
and hasattr(x, "is_initialized")
and hasattr(x, "output_dim")
):
if io_type == "target":
if caller.is_trained_online:
register_teacher(
caller,
x,
expected_dim=expected_dim,
)
else:
raise TypeError(
(noteacher_msg + notonline_msg).format(caller.name, caller.name)
)
else:
raise TypeError(noteacher_msg.format(caller.name))
else:
x_new = check_n_sequences(
x,
expected_dim=expected_dim,
caller=caller,
allow_n_sequences=allow_n_sequences,
allow_n_inputs=allow_n_inputs,
allow_timespans=allow_timespans,
)
# All x are teacher nodes, no data to return
if is_mapping(x_new) and io_type == "target" and len(x_new) == 0:
return None
return x_new
def register_teacher(caller, teacher, expected_dim=None):
target_dim = None
if teacher.is_initialized:
target_dim = teacher.output_dim
if (
expected_dim is not None
and target_dim is not None
and expected_dim != target_dim
):
raise ValueError()
caller._teacher = DistantFeedback(
sender=teacher, receiver=caller, callback_type="teacher"
)
def check_xy(
caller,
x,
y=None,
input_dim=None,
output_dim=None,
allow_n_sequences=True,
allow_n_inputs=True,
allow_timespans=True,
):
if input_dim is None and hasattr(caller, "input_dim"):
input_dim = caller.input_dim
# caller is a Model
if hasattr(caller, "input_nodes"):
input_nodes = caller.input_nodes
# caller is a Node
else:
input_nodes = None
x_new = _check_node_io(
x,
receiver_nodes=input_nodes,
expected_dim=input_dim,
caller=caller,
io_type="input",
allow_n_sequences=allow_n_sequences,
allow_n_inputs=allow_n_inputs,
allow_timespans=allow_timespans,
)
y_new = y
if y is not None:
# caller is a Model
if hasattr(caller, "trainable_nodes"):
output_dim = None
trainable_nodes = caller.trainable_nodes
# caller is a Node
else:
trainable_nodes = None
if output_dim is None and hasattr(caller, "output_dim"):
output_dim = caller.output_dim
y_new = _check_node_io(
y,
receiver_nodes=trainable_nodes,
expected_dim=output_dim,
caller=caller,
io_type="target",
allow_n_sequences=allow_n_sequences,
allow_timespans=allow_timespans,
allow_n_inputs=False,
)
return x_new, y_new
class DistantFeedback:
def __init__(self, sender, receiver, callback_type="feedback"):
self._sender = sender
self._receiver = receiver
self._callback_type = callback_type
# used to store a reduced version of the feedback if needed
# when feedback is a Model (inputs of the feedback Model are suppressed
# in the reduced version, as we do not need then to re-run them
# because we assume they have already run during the forward call)
self._reduced_sender = None
self._clamped = False
self._clamped_value = None
def __call__(self):
if not self.is_initialized:
self.initialize()
return self.call_distant_node()
@property
def is_initialized(self):
return self._sender.is_initialized
@property
def output_dim(self):
return self._sender.output_dim
@property
def name(self):
return self._sender.name
def call_distant_node(self):
if self._clamped:
self._clamped = False
return self._clamped_value
if self._reduced_sender is not None:
if len(np.unique([n._fb_flag for n in self._sender.nodes])) > 1:
input_data = _distant_model_inputs(self._sender)
if hasattr(self._reduced_sender, "nodes"):
return self._reduced_sender.call(input_data)
else:
reduced_name = self._reduced_sender.name
return self._reduced_sender.call(input_data[reduced_name])
else:
fb_outputs = [n.state() for n in self._sender.output_nodes]
if len(fb_outputs) > 1:
return fb_outputs
else:
return fb_outputs[0]
else:
return self._sender.state_proxy()
def initialize(self):
msg = f"Impossible to get {self._callback_type} "
msg += "from {} for {}: {} is not initialized or has no input/output_dim"
reduced_model = None
if hasattr(self._sender, "input_nodes"):
for n in self._sender.input_nodes:
if not n.is_initialized:
try:
n.initialize()
except RuntimeError:
raise RuntimeError(
msg.format(
self._sender.name,
self._receiver.name,
self._sender.name,
)
)
input_data = _distant_model_inputs(self._sender)
reduced_model = _remove_input_for_feedback(self._sender)
if not reduced_model.is_initialized:
if hasattr(reduced_model, "nodes"):
reduced_model.initialize(x=input_data)
else:
reduced_name = reduced_model.name
reduced_model.initialize(x=input_data[reduced_name])
self._sender._is_initialized = True
else:
try:
self._sender.initialize()
except RuntimeError: # raise more specific error
raise RuntimeError(
msg.format(
self._sender.name, self._receiver.name, self._sender.name
)
)
self._reduced_sender = reduced_model
def zero_feedback(self):
if hasattr(self._sender, "output_nodes"):
zeros = []
for output in self._sender.output_nodes:
zeros.append(output.zero_state())
if len(zeros) == 1:
return zeros[0]
else:
return zeros
else:
return self._sender.zero_state()
def clamp(self, value):
self._clamped_value = check_n_sequences(
value,
expected_dim=self._sender.output_dim,
caller=self._sender,
allow_n_sequences=False,
)
self._clamped = True
def call(node, x, from_state=None, stateful=True, reset=False):
with node.with_state(from_state, stateful=stateful, reset=reset):
state = node._forward(node, x)
node._state = state.astype(node.dtype)
node._flag_feedback()
return state
def train(
node,
X,
Y=None,
call_node=True,
force_teachers=True,
learn_every=1,
from_state=None,
stateful=True,
reset=False,
):
seq_len = X.shape[0]
seq = (
progress(range(seq_len), f"Training {node.name}")
if seq_len > 1
else range(seq_len)
)
with node.with_state(from_state, stateful=stateful, reset=reset):
states = np.zeros((seq_len, node.output_dim))
for i in seq:
x = np.atleast_2d(X[i, :])
y = None
if node._teacher is not None:
y = node._teacher()
elif Y is not None:
y = np.atleast_2d(Y[i, :])
if call_node:
s = call(node, x)
else:
s = node.state()
if force_teachers:
node.set_state_proxy(y)
if i % learn_every == 0 or seq_len == 1:
node._train(node, x=x, y=y)
states[i, :] = s
return states
class _Node(ABC):
_factory_id = -1
_registry = list()
_name: str
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
cls._factory_id = -1
cls._registry = list()
def __repr__(self):
klas = type(self).__name__
hypers = [(str(k), str(v)) for k, v in self._hypers.items()]
all_params = ["=".join((k, v)) for k, v in hypers]
all_params += [f"in={self.input_dim}", f"out={self.output_dim}"]
return f"'{self.name}': {klas}(" + ", ".join(all_params) + ")"
def __setstate__(self, state):
curr_name = state.get("name")
if curr_name in type(self)._registry:
new_name = curr_name + "-(copy)"
state["name"] = new_name
self.__dict__ = state
def __del__(self):
try:
type(self)._registry.remove(self._name)
except (ValueError, AttributeError):
pass
def __getattr__(self, item):
if item in ["_params", "_hypers"]:
raise AttributeError()
if item in self._params:
return self._params.get(item)
elif item in self._hypers:
return self._hypers.get(item)
else:
raise AttributeError(f"{self.name} has no attribute '{str(item)}'")
def __call__(self, *args, **kwargs) -> np.ndarray:
return self.call(*args, **kwargs)
def __rshift__(self, other: Union["_Node", Sequence["_Node"]]) -> "Model":
from .ops import link
return link(self, other)
def __rrshift__(self, other: Union["_Node", Sequence["_Node"]]) -> "Model":
from .ops import link
return link(other, self)
def __and__(self, other: Union["_Node", Sequence["_Node"]]) -> "Model":
from .ops import merge
return merge(self, other)
def _get_name(self, name=None):
if name is None:
type(self)._factory_id += 1
_id = self._factory_id
name = f"{type(self).__name__}-{_id}"
if name in type(self)._registry:
raise NameError(
f"Name '{name}' is already taken "
f"by another node. Node names should "
f"be unique."
)
type(self)._registry.append(name)
return name
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, value):
type(self)._registry.remove(self.name)
self._name = self._get_name(value)
@property
def params(self) -> Dict[str, Any]:
return self._params
@property
def hypers(self) -> Dict[str, Any]:
return self._hypers
@property
def is_initialized(self) -> bool:
return self._is_initialized
@property
@abstractmethod
def input_dim(self) -> Shape:
raise NotImplementedError()
@property
@abstractmethod
def output_dim(self) -> Shape:
raise NotImplementedError()
@property
@abstractmethod
def is_trained_offline(self) -> bool:
raise NotImplementedError()
@property
@abstractmethod
def is_trained_online(self) -> bool:
raise NotImplementedError()
@property
@abstractmethod
def is_trainable(self) -> bool:
raise NotImplementedError()
@property
@abstractmethod
def fitted(self) -> bool:
raise NotImplementedError()
@is_trainable.setter
@abstractmethod
def is_trainable(self, value: bool):
raise NotImplementedError()
def get_param(self, name: str) -> Any:
if name in self._params:
return self._params.get(name)
elif name in self._hypers:
return self._hypers.get(name)
else:
raise NameError(f"No parameter named '{name}' found in node {self}")
@abstractmethod
def copy(
self, name: str = None, copy_feedback: bool = False, shallow: bool = False
) -> "_Node":
raise NotImplementedError()
@abstractmethod
def initialize(self, x: MappedData = None, y: MappedData = None):
raise NotImplementedError()
@abstractmethod
def reset(self, to_state: np.ndarray = None) -> "_Node":
raise NotImplementedError()
@contextmanager
@abstractmethod
def with_state(self, state=None, stateful=False, reset=False) -> Iterator["_Node"]:
raise NotImplementedError()
@contextmanager
@abstractmethod
def with_feedback(
self, feedback=None, stateful=False, reset=False
) -> Iterator["_Node"]:
raise NotImplementedError()
| true | true |
1c38676aebd896c554453761a10d47ee56f4f383 | 2,582 | py | Python | e3nn/nn/_fc.py | simonbatzner/e3nn | 9f5e336d5443d26a04d37162c10eb851beb0f5c5 | [
"MIT"
] | null | null | null | e3nn/nn/_fc.py | simonbatzner/e3nn | 9f5e336d5443d26a04d37162c10eb851beb0f5c5 | [
"MIT"
] | null | null | null | e3nn/nn/_fc.py | simonbatzner/e3nn | 9f5e336d5443d26a04d37162c10eb851beb0f5c5 | [
"MIT"
] | null | null | null | import torch
from e3nn.math import normalize2mom
from e3nn.util.jit import compile_mode
def _identity(x):
return x
# This is a static network that can be traced
@compile_mode('trace')
class FullyConnectedNet(torch.nn.Module):
r"""Fully-connected Neural Network
Parameters
----------
hs : list of int
input, internal and output dimensions
act : function
activation function :math:`\phi`, it will be automatically normalized by a scaling factor such that
.. math::
\int_{-\infty}^{\infty} \phi(z)^2 \frac{e^{-z^2/2}}{\sqrt{2\pi}} dz = 1
"""
def __init__(self, hs, act=None, variance_in=1, variance_out=1, out_act=False):
super().__init__()
self.hs = tuple(hs)
weights = []
for h1, h2 in zip(self.hs, self.hs[1:]):
weights.append(torch.nn.Parameter(torch.randn(h1, h2)))
self.weights = torch.nn.ParameterList(weights)
if act is None:
act = _identity
self.act = normalize2mom(act)
self.variance_in = variance_in
self.variance_out = variance_out
self.out_act = out_act
def __repr__(self):
return f"{self.__class__.__name__}{self.hs}"
def forward(self, x):
"""evaluate the network
Parameters
----------
x : `torch.Tensor`
tensor of shape ``(batch, hs[0])``
Returns
-------
`torch.Tensor`
tensor of shape ``(batch, hs[-1])``
"""
with torch.autograd.profiler.record_function(repr(self)):
for i, W in enumerate(self.weights):
h_in, _h_out = W.shape
if i == 0: # first layer
W = W / (h_in * self.variance_in)**0.5
if i > 0: # not first layer
W = W / h_in**0.5
if i == len(self.weights) - 1 and not self.out_act: # last layer
W = W * self.variance_out**0.5
x = x @ W
if i < len(self.weights) - 1: # not last layer
x = self.act(x)
if i == len(self.weights) - 1 and self.out_act: # last layer
x = self.act(x)
if i == len(self.weights) - 1 and self.out_act: # last layer
x = x * self.variance_out**0.5
return x
def _make_tracing_inputs(self, n: int = 1):
import random
return [
{'forward': (torch.randn(random.randint(1, 5), self.hs[0]),)}
for _ in range(n)
]
| 28.688889 | 107 | 0.524787 | import torch
from e3nn.math import normalize2mom
from e3nn.util.jit import compile_mode
def _identity(x):
return x
@compile_mode('trace')
class FullyConnectedNet(torch.nn.Module):
def __init__(self, hs, act=None, variance_in=1, variance_out=1, out_act=False):
super().__init__()
self.hs = tuple(hs)
weights = []
for h1, h2 in zip(self.hs, self.hs[1:]):
weights.append(torch.nn.Parameter(torch.randn(h1, h2)))
self.weights = torch.nn.ParameterList(weights)
if act is None:
act = _identity
self.act = normalize2mom(act)
self.variance_in = variance_in
self.variance_out = variance_out
self.out_act = out_act
def __repr__(self):
return f"{self.__class__.__name__}{self.hs}"
def forward(self, x):
with torch.autograd.profiler.record_function(repr(self)):
for i, W in enumerate(self.weights):
h_in, _h_out = W.shape
if i == 0:
W = W / (h_in * self.variance_in)**0.5
if i > 0:
W = W / h_in**0.5
if i == len(self.weights) - 1 and not self.out_act:
W = W * self.variance_out**0.5
x = x @ W
if i < len(self.weights) - 1:
x = self.act(x)
if i == len(self.weights) - 1 and self.out_act:
x = self.act(x)
if i == len(self.weights) - 1 and self.out_act:
x = x * self.variance_out**0.5
return x
def _make_tracing_inputs(self, n: int = 1):
import random
return [
{'forward': (torch.randn(random.randint(1, 5), self.hs[0]),)}
for _ in range(n)
]
| true | true |
1c3868f4caf7e0f6a4337cab535f29ba1bfe9bbd | 5,414 | py | Python | tests/utils_tests/test_crypto.py | rushuifang/django | b94764e178056a2118eff3f53f567207219e737d | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 2 | 2020-01-17T17:35:21.000Z | 2020-01-17T17:35:27.000Z | tests/utils_tests/test_crypto.py | rushuifang/django | b94764e178056a2118eff3f53f567207219e737d | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | null | null | null | tests/utils_tests/test_crypto.py | rushuifang/django | b94764e178056a2118eff3f53f567207219e737d | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 2 | 2021-05-13T13:56:12.000Z | 2021-06-13T19:56:44.000Z | import hashlib
import unittest
from django.utils.crypto import constant_time_compare, pbkdf2, salted_hmac
class TestUtilsCryptoMisc(unittest.TestCase):
def test_constant_time_compare(self):
# It's hard to test for constant time, just test the result.
self.assertTrue(constant_time_compare(b'spam', b'spam'))
self.assertFalse(constant_time_compare(b'spam', b'eggs'))
self.assertTrue(constant_time_compare('spam', 'spam'))
self.assertFalse(constant_time_compare('spam', 'eggs'))
def test_salted_hmac(self):
tests = [
((b'salt', b'value'), {}, 'b51a2e619c43b1ca4f91d15c57455521d71d61eb'),
(('salt', 'value'), {}, 'b51a2e619c43b1ca4f91d15c57455521d71d61eb'),
(
('salt', 'value'),
{'secret': 'abcdefg'},
'8bbee04ccddfa24772d1423a0ba43bd0c0e24b76',
),
(
('salt', 'value'),
{'secret': 'x' * hashlib.sha1().block_size},
'bd3749347b412b1b0a9ea65220e55767ac8e96b0',
),
]
for args, kwargs, digest in tests:
with self.subTest(args=args, kwargs=kwargs):
self.assertEqual(salted_hmac(*args, **kwargs).hexdigest(), digest)
class TestUtilsCryptoPBKDF2(unittest.TestCase):
# http://tools.ietf.org/html/draft-josefsson-pbkdf2-test-vectors-06
rfc_vectors = [
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 1,
"dklen": 20,
"digest": hashlib.sha1,
},
"result": "0c60c80f961f0e71f3a9b524af6012062fe037a6",
},
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 2,
"dklen": 20,
"digest": hashlib.sha1,
},
"result": "ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957",
},
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 4096,
"dklen": 20,
"digest": hashlib.sha1,
},
"result": "4b007901b765489abead49d926f721d065a429c1",
},
# # this takes way too long :(
# {
# "args": {
# "password": "password",
# "salt": "salt",
# "iterations": 16777216,
# "dklen": 20,
# "digest": hashlib.sha1,
# },
# "result": "eefe3d61cd4da4e4e9945b3d6ba2158c2634e984",
# },
{
"args": {
"password": "passwordPASSWORDpassword",
"salt": "saltSALTsaltSALTsaltSALTsaltSALTsalt",
"iterations": 4096,
"dklen": 25,
"digest": hashlib.sha1,
},
"result": "3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038",
},
{
"args": {
"password": "pass\0word",
"salt": "sa\0lt",
"iterations": 4096,
"dklen": 16,
"digest": hashlib.sha1,
},
"result": "56fa6aa75548099dcc37d7f03425e0c3",
},
]
regression_vectors = [
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 1,
"dklen": 20,
"digest": hashlib.sha256,
},
"result": "120fb6cffcf8b32c43e7225256c4f837a86548c9",
},
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 1,
"dklen": 20,
"digest": hashlib.sha512,
},
"result": "867f70cf1ade02cff3752599a3a53dc4af34c7a6",
},
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 1000,
"dklen": 0,
"digest": hashlib.sha512,
},
"result": ("afe6c5530785b6cc6b1c6453384731bd5ee432ee"
"549fd42fb6695779ad8a1c5bf59de69c48f774ef"
"c4007d5298f9033c0241d5ab69305e7b64eceeb8d"
"834cfec"),
},
# Check leading zeros are not stripped (#17481)
{
"args": {
"password": b'\xba',
"salt": "salt",
"iterations": 1,
"dklen": 20,
"digest": hashlib.sha1,
},
"result": '0053d3b91a7f1e54effebd6d68771e8a6e0b2c5b',
},
]
def test_public_vectors(self):
for vector in self.rfc_vectors:
result = pbkdf2(**vector['args'])
self.assertEqual(result.hex(), vector['result'])
def test_regression_vectors(self):
for vector in self.regression_vectors:
result = pbkdf2(**vector['args'])
self.assertEqual(result.hex(), vector['result'])
def test_default_hmac_alg(self):
kwargs = {'password': b'password', 'salt': b'salt', 'iterations': 1, 'dklen': 20}
self.assertEqual(pbkdf2(**kwargs), hashlib.pbkdf2_hmac(hash_name=hashlib.sha256().name, **kwargs))
| 33.214724 | 106 | 0.476912 | import hashlib
import unittest
from django.utils.crypto import constant_time_compare, pbkdf2, salted_hmac
class TestUtilsCryptoMisc(unittest.TestCase):
def test_constant_time_compare(self):
self.assertTrue(constant_time_compare(b'spam', b'spam'))
self.assertFalse(constant_time_compare(b'spam', b'eggs'))
self.assertTrue(constant_time_compare('spam', 'spam'))
self.assertFalse(constant_time_compare('spam', 'eggs'))
def test_salted_hmac(self):
tests = [
((b'salt', b'value'), {}, 'b51a2e619c43b1ca4f91d15c57455521d71d61eb'),
(('salt', 'value'), {}, 'b51a2e619c43b1ca4f91d15c57455521d71d61eb'),
(
('salt', 'value'),
{'secret': 'abcdefg'},
'8bbee04ccddfa24772d1423a0ba43bd0c0e24b76',
),
(
('salt', 'value'),
{'secret': 'x' * hashlib.sha1().block_size},
'bd3749347b412b1b0a9ea65220e55767ac8e96b0',
),
]
for args, kwargs, digest in tests:
with self.subTest(args=args, kwargs=kwargs):
self.assertEqual(salted_hmac(*args, **kwargs).hexdigest(), digest)
class TestUtilsCryptoPBKDF2(unittest.TestCase):
# http://tools.ietf.org/html/draft-josefsson-pbkdf2-test-vectors-06
rfc_vectors = [
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 1,
"dklen": 20,
"digest": hashlib.sha1,
},
"result": "0c60c80f961f0e71f3a9b524af6012062fe037a6",
},
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 2,
"dklen": 20,
"digest": hashlib.sha1,
},
"result": "ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957",
},
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 4096,
"dklen": 20,
"digest": hashlib.sha1,
},
"result": "4b007901b765489abead49d926f721d065a429c1",
},
# # this takes way too long :(
# {
# "args": {
# "password": "password",
# "salt": "salt",
# "iterations": 16777216,
# "dklen": 20,
# "digest": hashlib.sha1,
# },
# "result": "eefe3d61cd4da4e4e9945b3d6ba2158c2634e984",
# },
{
"args": {
"password": "passwordPASSWORDpassword",
"salt": "saltSALTsaltSALTsaltSALTsaltSALTsalt",
"iterations": 4096,
"dklen": 25,
"digest": hashlib.sha1,
},
"result": "3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038",
},
{
"args": {
"password": "pass\0word",
"salt": "sa\0lt",
"iterations": 4096,
"dklen": 16,
"digest": hashlib.sha1,
},
"result": "56fa6aa75548099dcc37d7f03425e0c3",
},
]
regression_vectors = [
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 1,
"dklen": 20,
"digest": hashlib.sha256,
},
"result": "120fb6cffcf8b32c43e7225256c4f837a86548c9",
},
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 1,
"dklen": 20,
"digest": hashlib.sha512,
},
"result": "867f70cf1ade02cff3752599a3a53dc4af34c7a6",
},
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 1000,
"dklen": 0,
"digest": hashlib.sha512,
},
"result": ("afe6c5530785b6cc6b1c6453384731bd5ee432ee"
"549fd42fb6695779ad8a1c5bf59de69c48f774ef"
"c4007d5298f9033c0241d5ab69305e7b64eceeb8d"
"834cfec"),
},
# Check leading zeros are not stripped (#17481)
{
"args": {
"password": b'\xba',
"salt": "salt",
"iterations": 1,
"dklen": 20,
"digest": hashlib.sha1,
},
"result": '0053d3b91a7f1e54effebd6d68771e8a6e0b2c5b',
},
]
def test_public_vectors(self):
for vector in self.rfc_vectors:
result = pbkdf2(**vector['args'])
self.assertEqual(result.hex(), vector['result'])
def test_regression_vectors(self):
for vector in self.regression_vectors:
result = pbkdf2(**vector['args'])
self.assertEqual(result.hex(), vector['result'])
def test_default_hmac_alg(self):
kwargs = {'password': b'password', 'salt': b'salt', 'iterations': 1, 'dklen': 20}
self.assertEqual(pbkdf2(**kwargs), hashlib.pbkdf2_hmac(hash_name=hashlib.sha256().name, **kwargs))
| true | true |
1c386af021f88e573d27c874eebd300562f672b0 | 48,057 | py | Python | builder/frameworks/espidf.py | lkaino/platform-espressif32 | 7406e9efb90c4a5c11bb7e08fc4869a38cf42ea8 | [
"Apache-2.0"
] | 7 | 2021-08-13T09:14:35.000Z | 2022-01-31T20:24:41.000Z | builder/frameworks/espidf.py | lkaino/platform-espressif32 | 7406e9efb90c4a5c11bb7e08fc4869a38cf42ea8 | [
"Apache-2.0"
] | 4 | 2022-01-26T07:31:25.000Z | 2022-03-13T10:35:25.000Z | builder/frameworks/espidf.py | lkaino/platform-espressif32 | 7406e9efb90c4a5c11bb7e08fc4869a38cf42ea8 | [
"Apache-2.0"
] | 4 | 2021-11-18T07:10:30.000Z | 2022-03-24T13:44:10.000Z | # Copyright 2020-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Espressif IDF
Espressif IoT Development Framework for ESP32 MCU
https://github.com/espressif/esp-idf
"""
import copy
import json
import subprocess
import sys
import os
import click
import semantic_version
from SCons.Script import (
ARGUMENTS,
COMMAND_LINE_TARGETS,
DefaultEnvironment,
)
from platformio import fs
from platformio.proc import exec_command
from platformio.util import get_systype
from platformio.builder.tools.piolib import ProjectAsLibBuilder
from platformio.package.version import get_original_version, pepver_to_semver
env = DefaultEnvironment()
env.SConscript("_embed_files.py", exports="env")
platform = env.PioPlatform()
board = env.BoardConfig()
mcu = board.get("build.mcu", "esp32")
idf_variant = mcu.lower()
FRAMEWORK_DIR = platform.get_package_dir("framework-espidf")
TOOLCHAIN_DIR = platform.get_package_dir(
"toolchain-%s"
% (
"riscv32-esp"
if mcu == "esp32c3"
else ("xtensa-esp32s2" if mcu == "esp32s2" else "xtensa-esp32")
)
)
assert os.path.isdir(FRAMEWORK_DIR)
assert os.path.isdir(TOOLCHAIN_DIR)
if "arduino" in env.subst("$PIOFRAMEWORK"):
ARDUINO_FRAMEWORK_DIR = platform.get_package_dir("framework-arduinoespressif32")
# Possible package names in 'package@version' format is not compatible with CMake
if "@" in os.path.basename(ARDUINO_FRAMEWORK_DIR):
new_path = os.path.join(
os.path.dirname(ARDUINO_FRAMEWORK_DIR),
os.path.basename(ARDUINO_FRAMEWORK_DIR).replace("@", "-"),
)
os.rename(ARDUINO_FRAMEWORK_DIR, new_path)
ARDUINO_FRAMEWORK_DIR = new_path
assert ARDUINO_FRAMEWORK_DIR and os.path.isdir(ARDUINO_FRAMEWORK_DIR)
BUILD_DIR = env.subst("$BUILD_DIR")
PROJECT_DIR = env.subst("$PROJECT_DIR")
PROJECT_SRC_DIR = env.subst("$PROJECT_SRC_DIR")
CMAKE_API_REPLY_PATH = os.path.join(".cmake", "api", "v1", "reply")
SDKCONFIG_PATH = board.get(
"build.esp-idf.sdkconfig_path",
os.path.join(PROJECT_DIR, "sdkconfig.%s" % env.subst("$PIOENV")),
)
def get_project_lib_includes(env):
project = ProjectAsLibBuilder(env, "$PROJECT_DIR")
project.install_dependencies()
project.search_deps_recursive()
paths = []
for lb in env.GetLibBuilders():
if not lb.dependent:
continue
lb.env.PrependUnique(CPPPATH=lb.get_include_dirs())
paths.extend(lb.env["CPPPATH"])
DefaultEnvironment().Replace(__PIO_LIB_BUILDERS=None)
return paths
def is_cmake_reconfigure_required(cmake_api_reply_dir):
cmake_cache_file = os.path.join(BUILD_DIR, "CMakeCache.txt")
cmake_txt_files = [
os.path.join(PROJECT_DIR, "CMakeLists.txt"),
os.path.join(PROJECT_SRC_DIR, "CMakeLists.txt"),
]
cmake_preconf_dir = os.path.join(BUILD_DIR, "config")
deafult_sdk_config = os.path.join(PROJECT_DIR, "sdkconfig.defaults")
for d in (cmake_api_reply_dir, cmake_preconf_dir):
if not os.path.isdir(d) or not os.listdir(d):
return True
if not os.path.isfile(cmake_cache_file):
return True
if not os.path.isfile(os.path.join(BUILD_DIR, "build.ninja")):
return True
if not os.path.isfile(SDKCONFIG_PATH) or os.path.getmtime(
SDKCONFIG_PATH
) > os.path.getmtime(cmake_cache_file):
return True
if os.path.isfile(deafult_sdk_config) and os.path.getmtime(
deafult_sdk_config
) > os.path.getmtime(cmake_cache_file):
return True
if any(
os.path.getmtime(f) > os.path.getmtime(cmake_cache_file)
for f in cmake_txt_files + [cmake_preconf_dir, FRAMEWORK_DIR]
):
return True
return False
def is_proper_idf_project():
return all(
os.path.isfile(path)
for path in (
os.path.join(PROJECT_DIR, "CMakeLists.txt"),
os.path.join(PROJECT_SRC_DIR, "CMakeLists.txt"),
)
)
def collect_src_files():
return [
f
for f in env.MatchSourceFiles("$PROJECT_SRC_DIR", env.get("SRC_FILTER"))
if not f.endswith((".h", ".hpp"))
]
def normalize_path(path):
if PROJECT_DIR in path:
path = path.replace(PROJECT_DIR, "${CMAKE_SOURCE_DIR}")
return fs.to_unix_path(path)
def create_default_project_files():
root_cmake_tpl = """cmake_minimum_required(VERSION 3.16.0)
include($ENV{IDF_PATH}/tools/cmake/project.cmake)
project(%s)
"""
prj_cmake_tpl = """# This file was automatically generated for projects
# without default 'CMakeLists.txt' file.
FILE(GLOB_RECURSE app_sources %s/*.*)
idf_component_register(SRCS ${app_sources})
"""
if not os.listdir(PROJECT_SRC_DIR):
# create a default main file to make CMake happy during first init
with open(os.path.join(PROJECT_SRC_DIR, "main.c"), "w") as fp:
fp.write("void app_main() {}")
project_dir = PROJECT_DIR
if not os.path.isfile(os.path.join(project_dir, "CMakeLists.txt")):
with open(os.path.join(project_dir, "CMakeLists.txt"), "w") as fp:
fp.write(root_cmake_tpl % os.path.basename(project_dir))
project_src_dir = PROJECT_SRC_DIR
if not os.path.isfile(os.path.join(project_src_dir, "CMakeLists.txt")):
with open(os.path.join(project_src_dir, "CMakeLists.txt"), "w") as fp:
fp.write(prj_cmake_tpl % normalize_path(PROJECT_SRC_DIR))
def get_cmake_code_model(src_dir, build_dir, extra_args=None):
cmake_api_dir = os.path.join(build_dir, ".cmake", "api", "v1")
cmake_api_query_dir = os.path.join(cmake_api_dir, "query")
cmake_api_reply_dir = os.path.join(cmake_api_dir, "reply")
query_file = os.path.join(cmake_api_query_dir, "codemodel-v2")
if not os.path.isfile(query_file):
os.makedirs(os.path.dirname(query_file))
open(query_file, "a").close() # create an empty file
if not is_proper_idf_project():
create_default_project_files()
if is_cmake_reconfigure_required(cmake_api_reply_dir):
run_cmake(src_dir, build_dir, extra_args)
if not os.path.isdir(cmake_api_reply_dir) or not os.listdir(cmake_api_reply_dir):
sys.stderr.write("Error: Couldn't find CMake API response file\n")
env.Exit(1)
codemodel = {}
for target in os.listdir(cmake_api_reply_dir):
if target.startswith("codemodel-v2"):
with open(os.path.join(cmake_api_reply_dir, target), "r") as fp:
codemodel = json.load(fp)
assert codemodel["version"]["major"] == 2
return codemodel
def populate_idf_env_vars(idf_env):
idf_env["IDF_PATH"] = FRAMEWORK_DIR
additional_packages = [
os.path.join(TOOLCHAIN_DIR, "bin"),
platform.get_package_dir("tool-ninja"),
os.path.join(platform.get_package_dir("tool-cmake"), "bin"),
os.path.dirname(env.subst("$PYTHONEXE")),
]
if mcu != "esp32c3":
additional_packages.append(
os.path.join(platform.get_package_dir("toolchain-%sulp" % mcu), "bin"),
)
if "windows" in get_systype():
additional_packages.append(platform.get_package_dir("tool-mconf"))
idf_env["PATH"] = os.pathsep.join(additional_packages + [idf_env["PATH"]])
# Some users reported that the `IDF_TOOLS_PATH` var can seep into the
# underlying build system. Unsetting it is a safe workaround.
if "IDF_TOOLS_PATH" in idf_env:
del idf_env["IDF_TOOLS_PATH"]
def get_target_config(project_configs, target_index, cmake_api_reply_dir):
target_json = project_configs.get("targets")[target_index].get("jsonFile", "")
target_config_file = os.path.join(cmake_api_reply_dir, target_json)
if not os.path.isfile(target_config_file):
sys.stderr.write("Error: Couldn't find target config %s\n" % target_json)
env.Exit(1)
with open(target_config_file) as fp:
return json.load(fp)
def load_target_configurations(cmake_codemodel, cmake_api_reply_dir):
configs = {}
project_configs = cmake_codemodel.get("configurations")[0]
for config in project_configs.get("projects", []):
for target_index in config.get("targetIndexes", []):
target_config = get_target_config(
project_configs, target_index, cmake_api_reply_dir
)
configs[target_config["name"]] = target_config
return configs
def build_library(default_env, lib_config, project_src_dir, prepend_dir=None):
lib_name = lib_config["nameOnDisk"]
lib_path = lib_config["paths"]["build"]
if prepend_dir:
lib_path = os.path.join(prepend_dir, lib_path)
lib_objects = compile_source_files(
lib_config, default_env, project_src_dir, prepend_dir
)
return default_env.Library(
target=os.path.join("$BUILD_DIR", lib_path, lib_name), source=lib_objects
)
def get_app_includes(app_config):
plain_includes = []
sys_includes = []
cg = app_config["compileGroups"][0]
for inc in cg.get("includes", []):
inc_path = inc["path"]
if inc.get("isSystem", False):
sys_includes.append(inc_path)
else:
plain_includes.append(inc_path)
return {"plain_includes": plain_includes, "sys_includes": sys_includes}
def extract_defines(compile_group):
result = []
result.extend(
[
d.get("define").replace('"', '\\"').strip()
for d in compile_group.get("defines", [])
]
)
for f in compile_group.get("compileCommandFragments", []):
if f.get("fragment", "").startswith("-D"):
result.append(f["fragment"][2:])
return result
def get_app_defines(app_config):
return extract_defines(app_config["compileGroups"][0])
def extract_link_args(target_config):
def _add_to_libpath(lib_path, link_args):
if lib_path not in link_args["LIBPATH"]:
link_args["LIBPATH"].append(lib_path)
def _add_archive(archive_path, link_args):
archive_name = os.path.basename(archive_path)
if archive_name not in link_args["LIBS"]:
_add_to_libpath(os.path.dirname(archive_path), link_args)
link_args["LIBS"].append(archive_name)
link_args = {"LINKFLAGS": [], "LIBS": [], "LIBPATH": [], "__LIB_DEPS": []}
for f in target_config.get("link", {}).get("commandFragments", []):
fragment = f.get("fragment", "").strip()
fragment_role = f.get("role", "").strip()
if not fragment or not fragment_role:
continue
args = click.parser.split_arg_string(fragment)
if fragment_role == "flags":
link_args["LINKFLAGS"].extend(args)
elif fragment_role == "libraries":
if fragment.startswith("-l"):
link_args["LIBS"].extend(args)
elif fragment.startswith("-L"):
lib_path = fragment.replace("-L", "").strip().strip('"')
_add_to_libpath(lib_path, link_args)
elif fragment.startswith("-") and not fragment.startswith("-l"):
# CMake mistakenly marks LINKFLAGS as libraries
link_args["LINKFLAGS"].extend(args)
elif fragment.endswith(".a"):
archive_path = fragment
# process static archives
if archive_path.startswith(FRAMEWORK_DIR):
# In case of precompiled archives from framework package
_add_archive(archive_path, link_args)
else:
# In case of archives within project
if archive_path.startswith(".."):
# Precompiled archives from project component
_add_archive(
os.path.normpath(os.path.join(BUILD_DIR, archive_path)),
link_args,
)
else:
# Internally built libraries used for dependency resolution
link_args["__LIB_DEPS"].append(os.path.basename(archive_path))
return link_args
def filter_args(args, allowed, ignore=None):
if not allowed:
return []
ignore = ignore or []
result = []
i = 0
length = len(args)
while i < length:
if any(args[i].startswith(f) for f in allowed) and not any(
args[i].startswith(f) for f in ignore
):
result.append(args[i])
if i + 1 < length and not args[i + 1].startswith("-"):
i += 1
result.append(args[i])
i += 1
return result
def get_app_flags(app_config, default_config):
def _extract_flags(config):
flags = {}
for cg in config["compileGroups"]:
flags[cg["language"]] = []
for ccfragment in cg["compileCommandFragments"]:
fragment = ccfragment.get("fragment", "")
if not fragment.strip() or fragment.startswith("-D"):
continue
flags[cg["language"]].extend(
click.parser.split_arg_string(fragment.strip())
)
return flags
app_flags = _extract_flags(app_config)
default_flags = _extract_flags(default_config)
# Flags are sorted because CMake randomly populates build flags in code model
return {
"ASFLAGS": sorted(app_flags.get("ASM", default_flags.get("ASM"))),
"CFLAGS": sorted(app_flags.get("C", default_flags.get("C"))),
"CXXFLAGS": sorted(app_flags.get("CXX", default_flags.get("CXX"))),
}
def get_sdk_configuration():
config_path = os.path.join(BUILD_DIR, "config", "sdkconfig.json")
if not os.path.isfile(config_path):
print('Warning: Could not find "sdkconfig.json" file\n')
try:
with open(config_path, "r") as fp:
return json.load(fp)
except:
return {}
def find_framework_service_files(search_path, sdk_config):
result = {}
result["lf_files"] = list()
result["kconfig_files"] = list()
result["kconfig_build_files"] = list()
for d in os.listdir(search_path):
path = os.path.join(search_path, d)
if not os.path.isdir(path):
continue
for f in os.listdir(path):
# Skip hardware specific files as they will be added later
if f == "linker.lf" and not os.path.basename(path).startswith(
("esp32", "riscv")
):
result["lf_files"].append(os.path.join(path, f))
elif f == "Kconfig.projbuild":
result["kconfig_build_files"].append(os.path.join(path, f))
elif f == "Kconfig":
result["kconfig_files"].append(os.path.join(path, f))
if mcu == "esp32c3":
result["lf_files"].append(
os.path.join(FRAMEWORK_DIR, "components", "riscv", "linker.lf")
)
result["lf_files"].extend(
[
os.path.join(
FRAMEWORK_DIR,
"components",
"esp_common",
"common.lf"),
os.path.join(
FRAMEWORK_DIR,
"components",
"esp_common",
"soc.lf"),
os.path.join(
FRAMEWORK_DIR,
"components",
"esp_system",
"app.lf"),
os.path.join(FRAMEWORK_DIR, "components", "newlib", "newlib.lf"),
os.path.join(FRAMEWORK_DIR, "components", "newlib", "system_libs.lf"),
]
)
if sdk_config.get("SPIRAM_CACHE_WORKAROUND", False):
result["lf_files"].append(
os.path.join(
FRAMEWORK_DIR, "components", "newlib", "esp32-spiram-rom-functions-c.lf"
)
)
return result
def create_custom_libraries_list(ldgen_libraries_file, ignore_targets):
if not os.path.isfile(ldgen_libraries_file):
sys.stderr.write("Error: Couldn't find the list of framework libraries\n")
env.Exit(1)
pio_libraries_file = ldgen_libraries_file + "_pio"
if os.path.isfile(pio_libraries_file):
return pio_libraries_file
lib_paths = []
with open(ldgen_libraries_file, "r") as fp:
lib_paths = fp.readlines()
with open(pio_libraries_file, "w") as fp:
for lib_path in lib_paths:
if all(
"lib%s.a" % t.replace("__idf_", "") not in lib_path
for t in ignore_targets
):
fp.write(lib_path)
return pio_libraries_file
def generate_project_ld_script(sdk_config, ignore_targets=None):
ignore_targets = ignore_targets or []
project_files = find_framework_service_files(
os.path.join(FRAMEWORK_DIR, "components"), sdk_config
)
# Create a new file to avoid automatically generated library entry as files from
# this library are built internally by PlatformIO
libraries_list = create_custom_libraries_list(
os.path.join(BUILD_DIR, "ldgen_libraries"), ignore_targets
)
# Rework the memory template linker script, following components/esp_system/ld.cmake
args = {
"preprocess" : os.path.join(
TOOLCHAIN_DIR,
"bin",
env.subst("$CC")),
"ld_output": os.path.join("$BUILD_DIR", "memory.ld"),
"ld_dir": os.path.join(FRAMEWORK_DIR,
"components",
"esp_system",
"ld"),
"ld_input": os.path.join(
FRAMEWORK_DIR,
"components",
"esp_system",
"ld",
idf_variant,
"memory.ld.in",
),
"project_output": os.path.join("$BUILD_DIR", "%s.project.ld" % idf_variant),
"config": os.path.join("$BUILD_DIR", "config"),
"flags" : '-C -P -x c -E -o '
}
cmd = (
'"{preprocess}" {flags} "{ld_output}" -I "{config}" -I "{ld_dir}" "{ld_input}"'
).format(**args)
env.Command(
os.path.join("$BUILD_DIR", "memory.ld"),
os.path.join(
FRAMEWORK_DIR,
"components",
"esp_system",
"ld",
idf_variant,
"memory.ld.in",
),
env.VerboseAction(cmd, "Generating memory linker script $TARGET"),
)
args = {
"script": os.path.join(FRAMEWORK_DIR, "tools", "ldgen", "ldgen.py"),
"config": SDKCONFIG_PATH,
"fragments": " ".join(['"%s"' % f for f in project_files.get("lf_files")]),
# "fragments": "".join(['%s;' % f for f in project_files.get("lf_files")]).strip(';'),
"kconfig": os.path.join(FRAMEWORK_DIR, "Kconfig"),
"env_file": os.path.join("$BUILD_DIR", "config.env"),
"libraries_list": libraries_list,
"section_input": os.path.join(
FRAMEWORK_DIR,
"components",
"esp_system",
"ld",
idf_variant,
"sections.ld.in",
),
"objdump": os.path.join(
TOOLCHAIN_DIR,
"bin",
env.subst("$CC").replace("-gcc", "-objdump"),
),
}
cmd = (
'"$PYTHONEXE" "{script}" --input $SOURCE '
'--config "{config}" --fragments {fragments} --output $TARGET '
'--kconfig "{kconfig}" --env-file "{env_file}" '
'--libraries-file "{libraries_list}" '
'--objdump "{objdump}"'
).format(**args)
return env.Command(
os.path.join("$BUILD_DIR", "sections.ld"),
os.path.join(
FRAMEWORK_DIR,
"components",
"esp_system",
"ld",
idf_variant,
"sections.ld.in",
),
env.VerboseAction(cmd, "Generating project linker script $TARGET"),
)
def prepare_build_envs(config, default_env):
build_envs = []
target_compile_groups = config.get("compileGroups")
is_build_type_debug = (
set(["debug", "sizedata"]) & set(COMMAND_LINE_TARGETS)
or default_env.GetProjectOption("build_type") == "debug"
)
for cg in target_compile_groups:
includes = []
sys_includes = []
for inc in cg.get("includes", []):
inc_path = inc["path"]
if inc.get("isSystem", False):
sys_includes.append(inc_path)
else:
includes.append(inc_path)
defines = extract_defines(cg)
compile_commands = cg.get("compileCommandFragments", [])
build_env = default_env.Clone()
for cc in compile_commands:
build_flags = cc.get("fragment")
if not build_flags.startswith("-D"):
build_env.AppendUnique(**build_env.ParseFlags(build_flags))
build_env.AppendUnique(CPPDEFINES=defines, CPPPATH=includes)
if sys_includes:
build_env.Append(CCFLAGS=[("-isystem", inc) for inc in sys_includes])
build_env.Append(ASFLAGS=build_env.get("CCFLAGS", [])[:])
build_env.ProcessUnFlags(default_env.get("BUILD_UNFLAGS"))
if is_build_type_debug:
build_env.ConfigureDebugFlags()
build_envs.append(build_env)
return build_envs
def compile_source_files(config, default_env, project_src_dir, prepend_dir=None):
build_envs = prepare_build_envs(config, default_env)
objects = []
components_dir = fs.to_unix_path(os.path.join(FRAMEWORK_DIR, "components"))
for source in config.get("sources", []):
if source["path"].endswith(".rule"):
continue
compile_group_idx = source.get("compileGroupIndex")
if compile_group_idx is not None:
src_dir = config["paths"]["source"]
if not os.path.isabs(src_dir):
src_dir = os.path.join(project_src_dir, config["paths"]["source"])
src_path = source.get("path")
if not os.path.isabs(src_path):
# For cases when sources are located near CMakeLists.txt
src_path = os.path.join(project_src_dir, src_path)
obj_path = os.path.join("$BUILD_DIR", prepend_dir or "")
if src_path.startswith(components_dir):
obj_path = os.path.join(
obj_path, os.path.relpath(src_path, components_dir)
)
else:
if not os.path.isabs(source["path"]):
obj_path = os.path.join(obj_path, source["path"])
else:
obj_path = os.path.join(obj_path, os.path.basename(src_path))
objects.append(
build_envs[compile_group_idx].StaticObject(
target=os.path.splitext(obj_path)[0] + ".o",
source=os.path.realpath(src_path),
)
)
return objects
def run_tool(cmd):
idf_env = os.environ.copy()
populate_idf_env_vars(idf_env)
result = exec_command(cmd, env=idf_env)
if result["returncode"] != 0:
sys.stderr.write(result["out"] + "\n")
sys.stderr.write(result["err"] + "\n")
env.Exit(1)
if int(ARGUMENTS.get("PIOVERBOSE", 0)):
print(result["out"])
print(result["err"])
def RunMenuconfig(target, source, env):
idf_env = os.environ.copy()
populate_idf_env_vars(idf_env)
rc = subprocess.call(
[
os.path.join(platform.get_package_dir("tool-cmake"), "bin", "cmake"),
"--build",
BUILD_DIR,
"--target",
"menuconfig",
],
env=idf_env,
)
if rc != 0:
sys.stderr.write("Error: Couldn't execute 'menuconfig' target.\n")
env.Exit(1)
def run_cmake(src_dir, build_dir, extra_args=None):
cmd = [
os.path.join(platform.get_package_dir("tool-cmake") or "", "bin", "cmake"),
"-S",
src_dir,
"-B",
build_dir,
"-G",
"Ninja",
]
if extra_args:
cmd.extend(extra_args)
run_tool(cmd)
def find_lib_deps(components_map, elf_config, link_args, ignore_components=None):
ignore_components = ignore_components or []
result = [
components_map[d["id"]]["lib"]
for d in elf_config.get("dependencies", [])
if components_map.get(d["id"], {})
and not d["id"].startswith(tuple(ignore_components))
]
implicit_lib_deps = link_args.get("__LIB_DEPS", [])
for component in components_map.values():
component_config = component["config"]
if (
component_config["type"] not in ("STATIC_LIBRARY", "OBJECT_LIBRARY")
or component_config["name"] in ignore_components
):
continue
if (
component_config["nameOnDisk"] in implicit_lib_deps
and component["lib"] not in result
):
result.append(component["lib"])
return result
def fix_ld_paths(extra_flags):
peripheral_framework_path = os.path.join(FRAMEWORK_DIR, "components", "soc", idf_variant, "ld")
rom_framework_path = os.path.join(FRAMEWORK_DIR, "components", "esp_rom", idf_variant, "ld")
bl_framework_path = os.path.join(FRAMEWORK_DIR, "components", "bootloader", "subproject", "main", "ld", idf_variant)
# ESP linker scripts changed path in ESP-IDF 4.4+, so add missing paths to linker's search path
try:
ld_index = extra_flags.index("%s.peripherals.ld" % idf_variant)
extra_flags[ld_index-1:ld_index-1] = [ "-L", peripheral_framework_path, "-L", rom_framework_path, "-L", bl_framework_path]
except:
print("Error while parsing the flags")
return extra_flags
def build_bootloader():
bootloader_src_dir = os.path.join(
FRAMEWORK_DIR, "components", "bootloader", "subproject"
)
code_model = get_cmake_code_model(
bootloader_src_dir,
os.path.join(BUILD_DIR, "bootloader"),
[
"-DIDF_TARGET=" + idf_variant,
"-DPYTHON_DEPS_CHECKED=1",
"-DPYTHON=" + env.subst("$PYTHONEXE"),
"-DIDF_PATH=" + FRAMEWORK_DIR,
"-DSDKCONFIG=" + SDKCONFIG_PATH,
"-DLEGACY_INCLUDE_COMMON_HEADERS=",
"-DEXTRA_COMPONENT_DIRS="
+ os.path.join(FRAMEWORK_DIR, "components", "bootloader"),
],
)
if not code_model:
sys.stderr.write("Error: Couldn't find code model for bootloader\n")
env.Exit(1)
target_configs = load_target_configurations(
code_model,
os.path.join(BUILD_DIR, "bootloader", ".cmake", "api", "v1", "reply"),
)
elf_config = get_project_elf(target_configs)
if not elf_config:
sys.stderr.write(
"Error: Couldn't load the main firmware target of the project\n"
)
env.Exit(1)
bootloader_env = env.Clone()
components_map = get_components_map(
target_configs, ["STATIC_LIBRARY", "OBJECT_LIBRARY"]
)
build_components(bootloader_env, components_map, bootloader_src_dir, "bootloader")
link_args = extract_link_args(elf_config)
extra_flags = filter_args(link_args["LINKFLAGS"], ["-T", "-u"])
extra_flags = fix_ld_paths(extra_flags)
link_args["LINKFLAGS"] = sorted(
list(set(link_args["LINKFLAGS"]) - set(extra_flags))
)
bootloader_env.MergeFlags(link_args)
bootloader_env.Append(LINKFLAGS=extra_flags)
bootloader_libs = find_lib_deps(components_map, elf_config, link_args)
bootloader_env.Prepend(__RPATH="-Wl,--start-group ")
bootloader_env.Append(
CPPDEFINES=["__BOOTLOADER_BUILD"], _LIBDIRFLAGS=" -Wl,--end-group"
)
return bootloader_env.ElfToBin(
os.path.join("$BUILD_DIR", "bootloader"),
bootloader_env.Program(
os.path.join("$BUILD_DIR", "bootloader.elf"), bootloader_libs
),
)
def get_targets_by_type(target_configs, target_types, ignore_targets=None):
ignore_targets = ignore_targets or []
result = []
for target_config in target_configs.values():
if (
target_config["type"] in target_types
and target_config["name"] not in ignore_targets
):
result.append(target_config)
return result
def get_components_map(target_configs, target_types, ignore_components=None):
result = {}
for config in get_targets_by_type(target_configs, target_types, ignore_components):
result[config["id"]] = {"config": config}
return result
def build_components(env, components_map, project_src_dir, prepend_dir=None):
for k, v in components_map.items():
components_map[k]["lib"] = build_library(
env, v["config"], project_src_dir, prepend_dir
)
def get_project_elf(target_configs):
exec_targets = get_targets_by_type(target_configs, ["EXECUTABLE"])
if len(exec_targets) > 1:
print(
"Warning: Multiple elf targets found. The %s will be used!"
% exec_targets[0]["name"]
)
return exec_targets[0]
def generate_default_component():
# Used to force CMake generate build environments for all supported languages
prj_cmake_tpl = """# Warning! Do not delete this auto-generated file.
file(GLOB component_sources *.c* *.S)
idf_component_register(SRCS ${component_sources})
"""
dummy_component_path = os.path.join(BUILD_DIR, "__pio_env")
if not os.path.isdir(dummy_component_path):
os.makedirs(dummy_component_path)
for ext in (".cpp", ".c", ".S"):
dummy_file = os.path.join(dummy_component_path, "__dummy" + ext)
if not os.path.isfile(dummy_file):
open(dummy_file, "a").close()
component_cmake = os.path.join(dummy_component_path, "CMakeLists.txt")
if not os.path.isfile(component_cmake):
with open(component_cmake, "w") as fp:
fp.write(prj_cmake_tpl)
return dummy_component_path
def find_default_component(target_configs):
for config in target_configs:
if "__pio_env" in config:
return config
return ""
def create_version_file():
version_file = os.path.join(FRAMEWORK_DIR, "version.txt")
if not os.path.isfile(version_file):
with open(version_file, "w") as fp:
package_version = platform.get_package_version("framework-espidf")
fp.write(get_original_version(package_version) or package_version)
def generate_empty_partition_image(binary_path, image_size):
empty_partition = env.Command(
binary_path,
None,
env.VerboseAction(
'"$PYTHONEXE" "%s" %s $TARGET'
% (
os.path.join(
FRAMEWORK_DIR,
"components",
"partition_table",
"gen_empty_partition.py",
),
image_size,
),
"Generating an empty partition $TARGET",
),
)
env.Depends("$BUILD_DIR/$PROGNAME$PROGSUFFIX", empty_partition)
def get_partition_info(pt_path, pt_offset, pt_params):
if not os.path.isfile(pt_path):
sys.stderr.write(
"Missing partition table file `%s`\n" % os.path.basename(pt_path)
)
env.Exit(1)
cmd = [
env.subst("$PYTHONEXE"),
os.path.join(FRAMEWORK_DIR, "components", "partition_table", "parttool.py"),
"-q",
"--partition-table-offset",
hex(pt_offset),
"--partition-table-file",
pt_path,
"get_partition_info",
"--info",
"size",
"offset",
]
if pt_params["name"] == "boot":
cmd.append("--partition-boot-default")
else:
cmd.extend(
[
"--partition-type",
pt_params["type"],
"--partition-subtype",
pt_params["subtype"],
]
)
result = exec_command(cmd)
if result["returncode"] != 0:
sys.stderr.write(
"Couldn't extract information for %s/%s from the partition table\n"
% (pt_params["type"], pt_params["subtype"])
)
sys.stderr.write(result["out"] + "\n")
sys.stderr.write(result["err"] + "\n")
env.Exit(1)
size = offset = 0
if result["out"].strip():
size, offset = result["out"].strip().split(" ", 1)
return {"size": size, "offset": offset}
def get_app_partition_offset(pt_table, pt_offset):
# Get the default boot partition offset
app_params = get_partition_info(pt_table, pt_offset, {"name": "boot"})
return app_params.get("offset", "0x10000")
def generate_mbedtls_bundle(sdk_config):
bundle_path = os.path.join("$BUILD_DIR", "x509_crt_bundle")
if os.path.isfile(env.subst(bundle_path)):
return
default_crt_dir = os.path.join(
FRAMEWORK_DIR, "components", "mbedtls", "esp_crt_bundle"
)
cmd = [env.subst("$PYTHONEXE"), os.path.join(default_crt_dir, "gen_crt_bundle.py")]
crt_args = ["--input"]
if sdk_config.get("MBEDTLS_CERTIFICATE_BUNDLE_DEFAULT_FULL", False):
crt_args.append(os.path.join(default_crt_dir, "cacrt_all.pem"))
elif sdk_config.get("MBEDTLS_CERTIFICATE_BUNDLE_DEFAULT_CMN", False):
crt_args.append(os.path.join(default_crt_dir, "cacrt_all.pem"))
cmd.extend(
["--filter", os.path.join(default_crt_dir, "cmn_crt_authorities.csv")]
)
if sdk_config.get("MBEDTLS_CUSTOM_CERTIFICATE_BUNDLE", False):
cert_path = sdk_config.get("MBEDTLS_CUSTOM_CERTIFICATE_BUNDLE_PATH", "")
if os.path.isfile(cert_path) or os.path.isdir(cert_path):
crt_args.append(os.path.abspath(cert_path))
else:
print("Warning! Couldn't find custom certificate bundle %s" % cert_path)
crt_args.append("-q")
# Use exec_command to change working directory
exec_command(cmd + crt_args, cwd=BUILD_DIR)
bundle_path = os.path.join("$BUILD_DIR", "x509_crt_bundle")
env.Execute(
env.VerboseAction(
" ".join(
[
os.path.join(
env.PioPlatform().get_package_dir("tool-cmake"),
"bin",
"cmake",
),
"-DDATA_FILE=" + bundle_path,
"-DSOURCE_FILE=%s.S" % bundle_path,
"-DFILE_TYPE=BINARY",
"-P",
os.path.join(
FRAMEWORK_DIR,
"tools",
"cmake",
"scripts",
"data_file_embed_asm.cmake",
),
]
),
"Generating assembly for certificate bundle...",
)
)
def install_python_deps():
def _get_installed_pip_packages():
result = {}
packages = {}
pip_output = subprocess.check_output(
[env.subst("$PYTHONEXE"), "-m", "pip", "list", "--format=json"]
)
try:
packages = json.loads(pip_output)
except:
print("Warning! Couldn't extract the list of installed Python packages.")
return {}
for p in packages:
result[p["name"]] = pepver_to_semver(p["version"])
return result
deps = {
# https://github.com/platformio/platform-espressif32/issues/635
"cryptography": ">=2.1.4,<35.0.0",
"future": ">=0.15.2",
"pyparsing": ">=2.0.3,<2.4.0",
"kconfiglib": "==13.7.1",
}
installed_packages = _get_installed_pip_packages()
packages_to_install = []
for package, spec in deps.items():
if package not in installed_packages:
packages_to_install.append(package)
else:
version_spec = semantic_version.Spec(spec)
if not version_spec.match(installed_packages[package]):
packages_to_install.append(package)
if packages_to_install:
env.Execute(
env.VerboseAction(
(
'"$PYTHONEXE" -m pip install -U --force-reinstall '
+ " ".join(['"%s%s"' % (p, deps[p]) for p in packages_to_install])
),
"Installing ESP-IDF's Python dependencies",
)
)
# a special "esp-windows-curses" python package is required on Windows for Menuconfig
if "windows" in get_systype():
import pkg_resources
if "esp-windows-curses" not in {pkg.key for pkg in pkg_resources.working_set}:
env.Execute(
env.VerboseAction(
'$PYTHONEXE -m pip install "file://%s/tools/kconfig_new/esp-windows-curses" windows-curses'
% FRAMEWORK_DIR,
"Installing windows-curses package",
)
)
#
# ESP-IDF requires Python packages with specific versions
#
install_python_deps()
# ESP-IDF package doesn't contain .git folder, instead package version is specified
# in a special file "version.h" in the root folder of the package
create_version_file()
#
# Generate final linker script
#
if not board.get("build.ldscript", ""):
linker_script = env.Command(
os.path.join("$BUILD_DIR", "memory.ld"),
board.get(
"build.esp-idf.ldscript",
os.path.join(
FRAMEWORK_DIR, "components", "esp_system", "ld", idf_variant, "memory.ld.in"
),
),
env.VerboseAction(
'$CC -I"$BUILD_DIR/config" -I"' +
os.path.join(FRAMEWORK_DIR, "components", "esp_system", "ld") +
'" -C -P -x c -E $SOURCE -o $TARGET',
"Generating LD script $TARGET",
),
)
env.Depends("$BUILD_DIR/$PROGNAME$PROGSUFFIX", linker_script)
env.Replace(LDSCRIPT_PATH="memory.ld")
#
# Generate partition table
#
fwpartitions_dir = os.path.join(FRAMEWORK_DIR, "components", "partition_table")
partitions_csv = board.get("build.partitions", "partitions_singleapp.csv")
env.Replace(
PARTITIONS_TABLE_CSV=os.path.abspath(
os.path.join(fwpartitions_dir, partitions_csv)
if os.path.isfile(os.path.join(fwpartitions_dir, partitions_csv))
else partitions_csv
)
)
partition_table = env.Command(
os.path.join("$BUILD_DIR", "partitions.bin"),
"$PARTITIONS_TABLE_CSV",
env.VerboseAction(
'"$PYTHONEXE" "%s" -q --flash-size "%s" $SOURCE $TARGET'
% (
os.path.join(
FRAMEWORK_DIR, "components", "partition_table", "gen_esp32part.py"
),
board.get("upload.flash_size", "4MB"),
),
"Generating partitions $TARGET",
),
)
env.Depends("$BUILD_DIR/$PROGNAME$PROGSUFFIX", partition_table)
#
# Current build script limitations
#
if any(" " in p for p in (FRAMEWORK_DIR, BUILD_DIR)):
sys.stderr.write("Error: Detected a whitespace character in project paths.\n")
env.Exit(1)
if not os.path.isdir(PROJECT_SRC_DIR):
sys.stderr.write(
"Error: Missing the `%s` folder with project sources.\n"
% os.path.basename(PROJECT_SRC_DIR)
)
env.Exit(1)
if env.subst("$SRC_FILTER"):
print(
(
"Warning: the 'src_filter' option cannot be used with ESP-IDF. Select source "
"files to build in the project CMakeLists.txt file.\n"
)
)
if os.path.isfile(os.path.join(PROJECT_SRC_DIR, "sdkconfig.h")):
print(
"Warning! Starting with ESP-IDF v4.0, new project structure is required: \n"
"https://docs.platformio.org/en/latest/frameworks/espidf.html#project-structure"
)
#
# Initial targets loading
#
# By default 'main' folder is used to store source files. In case when a user has
# default 'src' folder we need to add this as an extra component. If there is no 'main'
# folder CMake won't generate dependencies properly
extra_components = [generate_default_component()]
if PROJECT_SRC_DIR != os.path.join(PROJECT_DIR, "main"):
extra_components.append(PROJECT_SRC_DIR)
if "arduino" in env.subst("$PIOFRAMEWORK"):
print(
"Warning! Arduino framework as an ESP-IDF component doesn't handle "
"the `variant` field! The default `esp32` variant will be used."
)
extra_components.append(ARDUINO_FRAMEWORK_DIR)
print("Reading CMake configuration...")
project_codemodel = get_cmake_code_model(
PROJECT_DIR,
BUILD_DIR,
[
"-DIDF_TARGET=" + idf_variant,
"-DPYTHON_DEPS_CHECKED=1",
"-DEXTRA_COMPONENT_DIRS:PATH=" + ";".join(extra_components),
"-DPYTHON=" + env.subst("$PYTHONEXE"),
"-DSDKCONFIG=" + SDKCONFIG_PATH,
]
+ click.parser.split_arg_string(board.get("build.cmake_extra_args", "")),
)
# At this point the sdkconfig file should be generated by the underlying build system
assert os.path.isfile(SDKCONFIG_PATH), (
"Missing auto-generated SDK configuration file `%s`" % SDKCONFIG_PATH
)
if not project_codemodel:
sys.stderr.write("Error: Couldn't find code model generated by CMake\n")
env.Exit(1)
target_configs = load_target_configurations(
project_codemodel, os.path.join(BUILD_DIR, CMAKE_API_REPLY_PATH)
)
sdk_config = get_sdk_configuration()
project_target_name = "__idf_%s" % os.path.basename(PROJECT_SRC_DIR)
if project_target_name not in target_configs:
sys.stderr.write("Error: Couldn't find the main target of the project!\n")
env.Exit(1)
if project_target_name != "__idf_main" and "__idf_main" in target_configs:
sys.stderr.write(
(
"Warning! Detected two different targets with project sources. Please use "
"either %s or specify 'main' folder in 'platformio.ini' file.\n"
% project_target_name
)
)
env.Exit(1)
project_ld_scipt = generate_project_ld_script(
sdk_config, [project_target_name, "__pio_env"]
)
env.Depends("$BUILD_DIR/$PROGNAME$PROGSUFFIX", project_ld_scipt)
elf_config = get_project_elf(target_configs)
default_config_name = find_default_component(target_configs)
framework_components_map = get_components_map(
target_configs,
["STATIC_LIBRARY", "OBJECT_LIBRARY"],
[project_target_name, default_config_name],
)
build_components(env, framework_components_map, PROJECT_DIR)
if not elf_config:
sys.stderr.write("Error: Couldn't load the main firmware target of the project\n")
env.Exit(1)
for component_config in framework_components_map.values():
env.Depends(project_ld_scipt, component_config["lib"])
project_config = target_configs.get(project_target_name, {})
default_config = target_configs.get(default_config_name, {})
project_defines = get_app_defines(project_config)
project_flags = get_app_flags(project_config, default_config)
link_args = extract_link_args(elf_config)
app_includes = get_app_includes(elf_config)
project_lib_includes = get_project_lib_includes(env)
#
# Compile bootloader
#
env.Depends("$BUILD_DIR/$PROGNAME$PROGSUFFIX", build_bootloader())
#
# Target: ESP-IDF menuconfig
#
env.AddPlatformTarget(
"menuconfig",
None,
[env.VerboseAction(RunMenuconfig, "Running menuconfig...")],
"Run Menuconfig",
)
#
# Process main parts of the framework
#
libs = find_lib_deps(
framework_components_map, elf_config, link_args, [project_target_name]
)
# Extra flags which need to be explicitly specified in LINKFLAGS section because SCons
# cannot merge them correctly
extra_flags = filter_args(link_args["LINKFLAGS"], ["-T", "-u"])
extra_flags = fix_ld_paths(extra_flags)
link_args["LINKFLAGS"] = sorted(list(set(link_args["LINKFLAGS"]) - set(extra_flags)))
# remove the main linker script flags '-T memory.ld' since it already appears later on
try:
ld_index = extra_flags.index("memory.ld")
extra_flags.pop(ld_index)
extra_flags.pop(ld_index - 1)
pass
except:
print("Warning! Couldn't find the main linker script in the CMake code model.")
#
# Process project sources
#
# Remove project source files from following build stages as they're
# built as part of the framework
def _skip_prj_source_files(node):
if node.srcnode().get_path().lower().startswith(PROJECT_SRC_DIR.lower()):
return None
return node
env.AddBuildMiddleware(_skip_prj_source_files)
# Project files should be compiled only when a special
# option is enabled when running 'test' command
if "__test" not in COMMAND_LINE_TARGETS or env.GetProjectOption(
"test_build_project_src"
):
project_env = env.Clone()
if project_target_name != "__idf_main":
# Manually add dependencies to CPPPATH since ESP-IDF build system doesn't generate
# this info if the folder with sources is not named 'main'
# https://docs.espressif.com/projects/esp-idf/en/latest/api-guides/build-system.html#rename-main
project_env.AppendUnique(CPPPATH=app_includes["plain_includes"])
# Add include dirs from PlatformIO build system to project CPPPATH so
# they're visible to PIOBUILDFILES
project_env.Append(
CPPPATH=["$PROJECT_INCLUDE_DIR", "$PROJECT_SRC_DIR"] + project_lib_includes
)
env.Append(
PIOBUILDFILES=compile_source_files(
target_configs.get(project_target_name),
project_env,
project_env.subst("$PROJECT_DIR"),
)
)
partition_table_offset = sdk_config.get("PARTITION_TABLE_OFFSET", 0x8000)
project_flags.update(link_args)
env.MergeFlags(project_flags)
env.Prepend(
CPPPATH=app_includes["plain_includes"],
CPPDEFINES=project_defines,
LINKFLAGS=extra_flags,
LIBS=libs,
FLASH_EXTRA_IMAGES=[
(
board.get(
"upload.bootloader_offset", "0x0" if mcu == "esp32c3" else "0x1000"
),
os.path.join("$BUILD_DIR", "bootloader.bin"),
),
(
board.get("upload.partition_table_offset", hex(partition_table_offset)),
os.path.join("$BUILD_DIR", "partitions.bin"),
),
],
)
#
# Generate mbedtls bundle
#
if sdk_config.get("MBEDTLS_CERTIFICATE_BUNDLE", False):
generate_mbedtls_bundle(sdk_config)
#
# To embed firmware checksum a special argument for esptool.py is required
#
action = copy.deepcopy(env["BUILDERS"]["ElfToBin"].action)
action.cmd_list = env["BUILDERS"]["ElfToBin"].action.cmd_list.replace(
"-o", "--elf-sha256-offset 0xb0 -o"
)
env["BUILDERS"]["ElfToBin"].action = action
#
# Compile ULP sources in 'ulp' folder
#
ulp_dir = os.path.join(PROJECT_DIR, "ulp")
if os.path.isdir(ulp_dir) and os.listdir(ulp_dir) and mcu != "esp32c3":
env.SConscript("ulp.py", exports="env sdk_config project_config idf_variant")
#
# Process OTA partition and image
#
ota_partition_params = get_partition_info(
env.subst("$PARTITIONS_TABLE_CSV"),
partition_table_offset,
{"name": "ota", "type": "data", "subtype": "ota"},
)
if ota_partition_params["size"] and ota_partition_params["offset"]:
# Generate an empty image if OTA is enabled in partition table
ota_partition_image = os.path.join("$BUILD_DIR", "ota_data_initial.bin")
generate_empty_partition_image(ota_partition_image, ota_partition_params["size"])
env.Append(
FLASH_EXTRA_IMAGES=[
(
board.get(
"upload.ota_partition_offset", ota_partition_params["offset"]
),
ota_partition_image,
)
]
)
#
# Configure application partition offset
#
env.Replace(
ESP32_APP_OFFSET=get_app_partition_offset(
env.subst("$PARTITIONS_TABLE_CSV"), partition_table_offset
)
)
# Propagate application offset to debug configurations
env["IDE_EXTRA_DATA"].update({"application_offset": env.subst("$ESP32_APP_OFFSET")})
| 32.781037 | 130 | 0.626652 |
import copy
import json
import subprocess
import sys
import os
import click
import semantic_version
from SCons.Script import (
ARGUMENTS,
COMMAND_LINE_TARGETS,
DefaultEnvironment,
)
from platformio import fs
from platformio.proc import exec_command
from platformio.util import get_systype
from platformio.builder.tools.piolib import ProjectAsLibBuilder
from platformio.package.version import get_original_version, pepver_to_semver
env = DefaultEnvironment()
env.SConscript("_embed_files.py", exports="env")
platform = env.PioPlatform()
board = env.BoardConfig()
mcu = board.get("build.mcu", "esp32")
idf_variant = mcu.lower()
FRAMEWORK_DIR = platform.get_package_dir("framework-espidf")
TOOLCHAIN_DIR = platform.get_package_dir(
"toolchain-%s"
% (
"riscv32-esp"
if mcu == "esp32c3"
else ("xtensa-esp32s2" if mcu == "esp32s2" else "xtensa-esp32")
)
)
assert os.path.isdir(FRAMEWORK_DIR)
assert os.path.isdir(TOOLCHAIN_DIR)
if "arduino" in env.subst("$PIOFRAMEWORK"):
ARDUINO_FRAMEWORK_DIR = platform.get_package_dir("framework-arduinoespressif32")
if "@" in os.path.basename(ARDUINO_FRAMEWORK_DIR):
new_path = os.path.join(
os.path.dirname(ARDUINO_FRAMEWORK_DIR),
os.path.basename(ARDUINO_FRAMEWORK_DIR).replace("@", "-"),
)
os.rename(ARDUINO_FRAMEWORK_DIR, new_path)
ARDUINO_FRAMEWORK_DIR = new_path
assert ARDUINO_FRAMEWORK_DIR and os.path.isdir(ARDUINO_FRAMEWORK_DIR)
BUILD_DIR = env.subst("$BUILD_DIR")
PROJECT_DIR = env.subst("$PROJECT_DIR")
PROJECT_SRC_DIR = env.subst("$PROJECT_SRC_DIR")
CMAKE_API_REPLY_PATH = os.path.join(".cmake", "api", "v1", "reply")
SDKCONFIG_PATH = board.get(
"build.esp-idf.sdkconfig_path",
os.path.join(PROJECT_DIR, "sdkconfig.%s" % env.subst("$PIOENV")),
)
def get_project_lib_includes(env):
project = ProjectAsLibBuilder(env, "$PROJECT_DIR")
project.install_dependencies()
project.search_deps_recursive()
paths = []
for lb in env.GetLibBuilders():
if not lb.dependent:
continue
lb.env.PrependUnique(CPPPATH=lb.get_include_dirs())
paths.extend(lb.env["CPPPATH"])
DefaultEnvironment().Replace(__PIO_LIB_BUILDERS=None)
return paths
def is_cmake_reconfigure_required(cmake_api_reply_dir):
cmake_cache_file = os.path.join(BUILD_DIR, "CMakeCache.txt")
cmake_txt_files = [
os.path.join(PROJECT_DIR, "CMakeLists.txt"),
os.path.join(PROJECT_SRC_DIR, "CMakeLists.txt"),
]
cmake_preconf_dir = os.path.join(BUILD_DIR, "config")
deafult_sdk_config = os.path.join(PROJECT_DIR, "sdkconfig.defaults")
for d in (cmake_api_reply_dir, cmake_preconf_dir):
if not os.path.isdir(d) or not os.listdir(d):
return True
if not os.path.isfile(cmake_cache_file):
return True
if not os.path.isfile(os.path.join(BUILD_DIR, "build.ninja")):
return True
if not os.path.isfile(SDKCONFIG_PATH) or os.path.getmtime(
SDKCONFIG_PATH
) > os.path.getmtime(cmake_cache_file):
return True
if os.path.isfile(deafult_sdk_config) and os.path.getmtime(
deafult_sdk_config
) > os.path.getmtime(cmake_cache_file):
return True
if any(
os.path.getmtime(f) > os.path.getmtime(cmake_cache_file)
for f in cmake_txt_files + [cmake_preconf_dir, FRAMEWORK_DIR]
):
return True
return False
def is_proper_idf_project():
return all(
os.path.isfile(path)
for path in (
os.path.join(PROJECT_DIR, "CMakeLists.txt"),
os.path.join(PROJECT_SRC_DIR, "CMakeLists.txt"),
)
)
def collect_src_files():
return [
f
for f in env.MatchSourceFiles("$PROJECT_SRC_DIR", env.get("SRC_FILTER"))
if not f.endswith((".h", ".hpp"))
]
def normalize_path(path):
if PROJECT_DIR in path:
path = path.replace(PROJECT_DIR, "${CMAKE_SOURCE_DIR}")
return fs.to_unix_path(path)
def create_default_project_files():
root_cmake_tpl = """cmake_minimum_required(VERSION 3.16.0)
include($ENV{IDF_PATH}/tools/cmake/project.cmake)
project(%s)
"""
prj_cmake_tpl = """# This file was automatically generated for projects
# without default 'CMakeLists.txt' file.
FILE(GLOB_RECURSE app_sources %s/*.*)
idf_component_register(SRCS ${app_sources})
"""
if not os.listdir(PROJECT_SRC_DIR):
with open(os.path.join(PROJECT_SRC_DIR, "main.c"), "w") as fp:
fp.write("void app_main() {}")
project_dir = PROJECT_DIR
if not os.path.isfile(os.path.join(project_dir, "CMakeLists.txt")):
with open(os.path.join(project_dir, "CMakeLists.txt"), "w") as fp:
fp.write(root_cmake_tpl % os.path.basename(project_dir))
project_src_dir = PROJECT_SRC_DIR
if not os.path.isfile(os.path.join(project_src_dir, "CMakeLists.txt")):
with open(os.path.join(project_src_dir, "CMakeLists.txt"), "w") as fp:
fp.write(prj_cmake_tpl % normalize_path(PROJECT_SRC_DIR))
def get_cmake_code_model(src_dir, build_dir, extra_args=None):
cmake_api_dir = os.path.join(build_dir, ".cmake", "api", "v1")
cmake_api_query_dir = os.path.join(cmake_api_dir, "query")
cmake_api_reply_dir = os.path.join(cmake_api_dir, "reply")
query_file = os.path.join(cmake_api_query_dir, "codemodel-v2")
if not os.path.isfile(query_file):
os.makedirs(os.path.dirname(query_file))
open(query_file, "a").close()
if not is_proper_idf_project():
create_default_project_files()
if is_cmake_reconfigure_required(cmake_api_reply_dir):
run_cmake(src_dir, build_dir, extra_args)
if not os.path.isdir(cmake_api_reply_dir) or not os.listdir(cmake_api_reply_dir):
sys.stderr.write("Error: Couldn't find CMake API response file\n")
env.Exit(1)
codemodel = {}
for target in os.listdir(cmake_api_reply_dir):
if target.startswith("codemodel-v2"):
with open(os.path.join(cmake_api_reply_dir, target), "r") as fp:
codemodel = json.load(fp)
assert codemodel["version"]["major"] == 2
return codemodel
def populate_idf_env_vars(idf_env):
idf_env["IDF_PATH"] = FRAMEWORK_DIR
additional_packages = [
os.path.join(TOOLCHAIN_DIR, "bin"),
platform.get_package_dir("tool-ninja"),
os.path.join(platform.get_package_dir("tool-cmake"), "bin"),
os.path.dirname(env.subst("$PYTHONEXE")),
]
if mcu != "esp32c3":
additional_packages.append(
os.path.join(platform.get_package_dir("toolchain-%sulp" % mcu), "bin"),
)
if "windows" in get_systype():
additional_packages.append(platform.get_package_dir("tool-mconf"))
idf_env["PATH"] = os.pathsep.join(additional_packages + [idf_env["PATH"]])
# Some users reported that the `IDF_TOOLS_PATH` var can seep into the
# underlying build system. Unsetting it is a safe workaround.
if "IDF_TOOLS_PATH" in idf_env:
del idf_env["IDF_TOOLS_PATH"]
def get_target_config(project_configs, target_index, cmake_api_reply_dir):
target_json = project_configs.get("targets")[target_index].get("jsonFile", "")
target_config_file = os.path.join(cmake_api_reply_dir, target_json)
if not os.path.isfile(target_config_file):
sys.stderr.write("Error: Couldn't find target config %s\n" % target_json)
env.Exit(1)
with open(target_config_file) as fp:
return json.load(fp)
def load_target_configurations(cmake_codemodel, cmake_api_reply_dir):
configs = {}
project_configs = cmake_codemodel.get("configurations")[0]
for config in project_configs.get("projects", []):
for target_index in config.get("targetIndexes", []):
target_config = get_target_config(
project_configs, target_index, cmake_api_reply_dir
)
configs[target_config["name"]] = target_config
return configs
def build_library(default_env, lib_config, project_src_dir, prepend_dir=None):
lib_name = lib_config["nameOnDisk"]
lib_path = lib_config["paths"]["build"]
if prepend_dir:
lib_path = os.path.join(prepend_dir, lib_path)
lib_objects = compile_source_files(
lib_config, default_env, project_src_dir, prepend_dir
)
return default_env.Library(
target=os.path.join("$BUILD_DIR", lib_path, lib_name), source=lib_objects
)
def get_app_includes(app_config):
plain_includes = []
sys_includes = []
cg = app_config["compileGroups"][0]
for inc in cg.get("includes", []):
inc_path = inc["path"]
if inc.get("isSystem", False):
sys_includes.append(inc_path)
else:
plain_includes.append(inc_path)
return {"plain_includes": plain_includes, "sys_includes": sys_includes}
def extract_defines(compile_group):
result = []
result.extend(
[
d.get("define").replace('"', '\\"').strip()
for d in compile_group.get("defines", [])
]
)
for f in compile_group.get("compileCommandFragments", []):
if f.get("fragment", "").startswith("-D"):
result.append(f["fragment"][2:])
return result
def get_app_defines(app_config):
return extract_defines(app_config["compileGroups"][0])
def extract_link_args(target_config):
def _add_to_libpath(lib_path, link_args):
if lib_path not in link_args["LIBPATH"]:
link_args["LIBPATH"].append(lib_path)
def _add_archive(archive_path, link_args):
archive_name = os.path.basename(archive_path)
if archive_name not in link_args["LIBS"]:
_add_to_libpath(os.path.dirname(archive_path), link_args)
link_args["LIBS"].append(archive_name)
link_args = {"LINKFLAGS": [], "LIBS": [], "LIBPATH": [], "__LIB_DEPS": []}
for f in target_config.get("link", {}).get("commandFragments", []):
fragment = f.get("fragment", "").strip()
fragment_role = f.get("role", "").strip()
if not fragment or not fragment_role:
continue
args = click.parser.split_arg_string(fragment)
if fragment_role == "flags":
link_args["LINKFLAGS"].extend(args)
elif fragment_role == "libraries":
if fragment.startswith("-l"):
link_args["LIBS"].extend(args)
elif fragment.startswith("-L"):
lib_path = fragment.replace("-L", "").strip().strip('"')
_add_to_libpath(lib_path, link_args)
elif fragment.startswith("-") and not fragment.startswith("-l"):
# CMake mistakenly marks LINKFLAGS as libraries
link_args["LINKFLAGS"].extend(args)
elif fragment.endswith(".a"):
archive_path = fragment
# process static archives
if archive_path.startswith(FRAMEWORK_DIR):
# In case of precompiled archives from framework package
_add_archive(archive_path, link_args)
else:
# In case of archives within project
if archive_path.startswith(".."):
# Precompiled archives from project component
_add_archive(
os.path.normpath(os.path.join(BUILD_DIR, archive_path)),
link_args,
)
else:
# Internally built libraries used for dependency resolution
link_args["__LIB_DEPS"].append(os.path.basename(archive_path))
return link_args
def filter_args(args, allowed, ignore=None):
if not allowed:
return []
ignore = ignore or []
result = []
i = 0
length = len(args)
while i < length:
if any(args[i].startswith(f) for f in allowed) and not any(
args[i].startswith(f) for f in ignore
):
result.append(args[i])
if i + 1 < length and not args[i + 1].startswith("-"):
i += 1
result.append(args[i])
i += 1
return result
def get_app_flags(app_config, default_config):
def _extract_flags(config):
flags = {}
for cg in config["compileGroups"]:
flags[cg["language"]] = []
for ccfragment in cg["compileCommandFragments"]:
fragment = ccfragment.get("fragment", "")
if not fragment.strip() or fragment.startswith("-D"):
continue
flags[cg["language"]].extend(
click.parser.split_arg_string(fragment.strip())
)
return flags
app_flags = _extract_flags(app_config)
default_flags = _extract_flags(default_config)
# Flags are sorted because CMake randomly populates build flags in code model
return {
"ASFLAGS": sorted(app_flags.get("ASM", default_flags.get("ASM"))),
"CFLAGS": sorted(app_flags.get("C", default_flags.get("C"))),
"CXXFLAGS": sorted(app_flags.get("CXX", default_flags.get("CXX"))),
}
def get_sdk_configuration():
config_path = os.path.join(BUILD_DIR, "config", "sdkconfig.json")
if not os.path.isfile(config_path):
print('Warning: Could not find "sdkconfig.json" file\n')
try:
with open(config_path, "r") as fp:
return json.load(fp)
except:
return {}
def find_framework_service_files(search_path, sdk_config):
result = {}
result["lf_files"] = list()
result["kconfig_files"] = list()
result["kconfig_build_files"] = list()
for d in os.listdir(search_path):
path = os.path.join(search_path, d)
if not os.path.isdir(path):
continue
for f in os.listdir(path):
# Skip hardware specific files as they will be added later
if f == "linker.lf" and not os.path.basename(path).startswith(
("esp32", "riscv")
):
result["lf_files"].append(os.path.join(path, f))
elif f == "Kconfig.projbuild":
result["kconfig_build_files"].append(os.path.join(path, f))
elif f == "Kconfig":
result["kconfig_files"].append(os.path.join(path, f))
if mcu == "esp32c3":
result["lf_files"].append(
os.path.join(FRAMEWORK_DIR, "components", "riscv", "linker.lf")
)
result["lf_files"].extend(
[
os.path.join(
FRAMEWORK_DIR,
"components",
"esp_common",
"common.lf"),
os.path.join(
FRAMEWORK_DIR,
"components",
"esp_common",
"soc.lf"),
os.path.join(
FRAMEWORK_DIR,
"components",
"esp_system",
"app.lf"),
os.path.join(FRAMEWORK_DIR, "components", "newlib", "newlib.lf"),
os.path.join(FRAMEWORK_DIR, "components", "newlib", "system_libs.lf"),
]
)
if sdk_config.get("SPIRAM_CACHE_WORKAROUND", False):
result["lf_files"].append(
os.path.join(
FRAMEWORK_DIR, "components", "newlib", "esp32-spiram-rom-functions-c.lf"
)
)
return result
def create_custom_libraries_list(ldgen_libraries_file, ignore_targets):
if not os.path.isfile(ldgen_libraries_file):
sys.stderr.write("Error: Couldn't find the list of framework libraries\n")
env.Exit(1)
pio_libraries_file = ldgen_libraries_file + "_pio"
if os.path.isfile(pio_libraries_file):
return pio_libraries_file
lib_paths = []
with open(ldgen_libraries_file, "r") as fp:
lib_paths = fp.readlines()
with open(pio_libraries_file, "w") as fp:
for lib_path in lib_paths:
if all(
"lib%s.a" % t.replace("__idf_", "") not in lib_path
for t in ignore_targets
):
fp.write(lib_path)
return pio_libraries_file
def generate_project_ld_script(sdk_config, ignore_targets=None):
ignore_targets = ignore_targets or []
project_files = find_framework_service_files(
os.path.join(FRAMEWORK_DIR, "components"), sdk_config
)
# Create a new file to avoid automatically generated library entry as files from
# this library are built internally by PlatformIO
libraries_list = create_custom_libraries_list(
os.path.join(BUILD_DIR, "ldgen_libraries"), ignore_targets
)
# Rework the memory template linker script, following components/esp_system/ld.cmake
args = {
"preprocess" : os.path.join(
TOOLCHAIN_DIR,
"bin",
env.subst("$CC")),
"ld_output": os.path.join("$BUILD_DIR", "memory.ld"),
"ld_dir": os.path.join(FRAMEWORK_DIR,
"components",
"esp_system",
"ld"),
"ld_input": os.path.join(
FRAMEWORK_DIR,
"components",
"esp_system",
"ld",
idf_variant,
"memory.ld.in",
),
"project_output": os.path.join("$BUILD_DIR", "%s.project.ld" % idf_variant),
"config": os.path.join("$BUILD_DIR", "config"),
"flags" : '-C -P -x c -E -o '
}
cmd = (
'"{preprocess}" {flags} "{ld_output}" -I "{config}" -I "{ld_dir}" "{ld_input}"'
).format(**args)
env.Command(
os.path.join("$BUILD_DIR", "memory.ld"),
os.path.join(
FRAMEWORK_DIR,
"components",
"esp_system",
"ld",
idf_variant,
"memory.ld.in",
),
env.VerboseAction(cmd, "Generating memory linker script $TARGET"),
)
args = {
"script": os.path.join(FRAMEWORK_DIR, "tools", "ldgen", "ldgen.py"),
"config": SDKCONFIG_PATH,
"fragments": " ".join(['"%s"' % f for f in project_files.get("lf_files")]),
# "fragments": "".join(['%s;' % f for f in project_files.get("lf_files")]).strip(';'),
"kconfig": os.path.join(FRAMEWORK_DIR, "Kconfig"),
"env_file": os.path.join("$BUILD_DIR", "config.env"),
"libraries_list": libraries_list,
"section_input": os.path.join(
FRAMEWORK_DIR,
"components",
"esp_system",
"ld",
idf_variant,
"sections.ld.in",
),
"objdump": os.path.join(
TOOLCHAIN_DIR,
"bin",
env.subst("$CC").replace("-gcc", "-objdump"),
),
}
cmd = (
'"$PYTHONEXE" "{script}" --input $SOURCE '
'--config "{config}" --fragments {fragments} --output $TARGET '
'--kconfig "{kconfig}" --env-file "{env_file}" '
'--libraries-file "{libraries_list}" '
'--objdump "{objdump}"'
).format(**args)
return env.Command(
os.path.join("$BUILD_DIR", "sections.ld"),
os.path.join(
FRAMEWORK_DIR,
"components",
"esp_system",
"ld",
idf_variant,
"sections.ld.in",
),
env.VerboseAction(cmd, "Generating project linker script $TARGET"),
)
def prepare_build_envs(config, default_env):
build_envs = []
target_compile_groups = config.get("compileGroups")
is_build_type_debug = (
set(["debug", "sizedata"]) & set(COMMAND_LINE_TARGETS)
or default_env.GetProjectOption("build_type") == "debug"
)
for cg in target_compile_groups:
includes = []
sys_includes = []
for inc in cg.get("includes", []):
inc_path = inc["path"]
if inc.get("isSystem", False):
sys_includes.append(inc_path)
else:
includes.append(inc_path)
defines = extract_defines(cg)
compile_commands = cg.get("compileCommandFragments", [])
build_env = default_env.Clone()
for cc in compile_commands:
build_flags = cc.get("fragment")
if not build_flags.startswith("-D"):
build_env.AppendUnique(**build_env.ParseFlags(build_flags))
build_env.AppendUnique(CPPDEFINES=defines, CPPPATH=includes)
if sys_includes:
build_env.Append(CCFLAGS=[("-isystem", inc) for inc in sys_includes])
build_env.Append(ASFLAGS=build_env.get("CCFLAGS", [])[:])
build_env.ProcessUnFlags(default_env.get("BUILD_UNFLAGS"))
if is_build_type_debug:
build_env.ConfigureDebugFlags()
build_envs.append(build_env)
return build_envs
def compile_source_files(config, default_env, project_src_dir, prepend_dir=None):
build_envs = prepare_build_envs(config, default_env)
objects = []
components_dir = fs.to_unix_path(os.path.join(FRAMEWORK_DIR, "components"))
for source in config.get("sources", []):
if source["path"].endswith(".rule"):
continue
compile_group_idx = source.get("compileGroupIndex")
if compile_group_idx is not None:
src_dir = config["paths"]["source"]
if not os.path.isabs(src_dir):
src_dir = os.path.join(project_src_dir, config["paths"]["source"])
src_path = source.get("path")
if not os.path.isabs(src_path):
# For cases when sources are located near CMakeLists.txt
src_path = os.path.join(project_src_dir, src_path)
obj_path = os.path.join("$BUILD_DIR", prepend_dir or "")
if src_path.startswith(components_dir):
obj_path = os.path.join(
obj_path, os.path.relpath(src_path, components_dir)
)
else:
if not os.path.isabs(source["path"]):
obj_path = os.path.join(obj_path, source["path"])
else:
obj_path = os.path.join(obj_path, os.path.basename(src_path))
objects.append(
build_envs[compile_group_idx].StaticObject(
target=os.path.splitext(obj_path)[0] + ".o",
source=os.path.realpath(src_path),
)
)
return objects
def run_tool(cmd):
idf_env = os.environ.copy()
populate_idf_env_vars(idf_env)
result = exec_command(cmd, env=idf_env)
if result["returncode"] != 0:
sys.stderr.write(result["out"] + "\n")
sys.stderr.write(result["err"] + "\n")
env.Exit(1)
if int(ARGUMENTS.get("PIOVERBOSE", 0)):
print(result["out"])
print(result["err"])
def RunMenuconfig(target, source, env):
idf_env = os.environ.copy()
populate_idf_env_vars(idf_env)
rc = subprocess.call(
[
os.path.join(platform.get_package_dir("tool-cmake"), "bin", "cmake"),
"--build",
BUILD_DIR,
"--target",
"menuconfig",
],
env=idf_env,
)
if rc != 0:
sys.stderr.write("Error: Couldn't execute 'menuconfig' target.\n")
env.Exit(1)
def run_cmake(src_dir, build_dir, extra_args=None):
cmd = [
os.path.join(platform.get_package_dir("tool-cmake") or "", "bin", "cmake"),
"-S",
src_dir,
"-B",
build_dir,
"-G",
"Ninja",
]
if extra_args:
cmd.extend(extra_args)
run_tool(cmd)
def find_lib_deps(components_map, elf_config, link_args, ignore_components=None):
ignore_components = ignore_components or []
result = [
components_map[d["id"]]["lib"]
for d in elf_config.get("dependencies", [])
if components_map.get(d["id"], {})
and not d["id"].startswith(tuple(ignore_components))
]
implicit_lib_deps = link_args.get("__LIB_DEPS", [])
for component in components_map.values():
component_config = component["config"]
if (
component_config["type"] not in ("STATIC_LIBRARY", "OBJECT_LIBRARY")
or component_config["name"] in ignore_components
):
continue
if (
component_config["nameOnDisk"] in implicit_lib_deps
and component["lib"] not in result
):
result.append(component["lib"])
return result
def fix_ld_paths(extra_flags):
peripheral_framework_path = os.path.join(FRAMEWORK_DIR, "components", "soc", idf_variant, "ld")
rom_framework_path = os.path.join(FRAMEWORK_DIR, "components", "esp_rom", idf_variant, "ld")
bl_framework_path = os.path.join(FRAMEWORK_DIR, "components", "bootloader", "subproject", "main", "ld", idf_variant)
# ESP linker scripts changed path in ESP-IDF 4.4+, so add missing paths to linker's search path
try:
ld_index = extra_flags.index("%s.peripherals.ld" % idf_variant)
extra_flags[ld_index-1:ld_index-1] = [ "-L", peripheral_framework_path, "-L", rom_framework_path, "-L", bl_framework_path]
except:
print("Error while parsing the flags")
return extra_flags
def build_bootloader():
bootloader_src_dir = os.path.join(
FRAMEWORK_DIR, "components", "bootloader", "subproject"
)
code_model = get_cmake_code_model(
bootloader_src_dir,
os.path.join(BUILD_DIR, "bootloader"),
[
"-DIDF_TARGET=" + idf_variant,
"-DPYTHON_DEPS_CHECKED=1",
"-DPYTHON=" + env.subst("$PYTHONEXE"),
"-DIDF_PATH=" + FRAMEWORK_DIR,
"-DSDKCONFIG=" + SDKCONFIG_PATH,
"-DLEGACY_INCLUDE_COMMON_HEADERS=",
"-DEXTRA_COMPONENT_DIRS="
+ os.path.join(FRAMEWORK_DIR, "components", "bootloader"),
],
)
if not code_model:
sys.stderr.write("Error: Couldn't find code model for bootloader\n")
env.Exit(1)
target_configs = load_target_configurations(
code_model,
os.path.join(BUILD_DIR, "bootloader", ".cmake", "api", "v1", "reply"),
)
elf_config = get_project_elf(target_configs)
if not elf_config:
sys.stderr.write(
"Error: Couldn't load the main firmware target of the project\n"
)
env.Exit(1)
bootloader_env = env.Clone()
components_map = get_components_map(
target_configs, ["STATIC_LIBRARY", "OBJECT_LIBRARY"]
)
build_components(bootloader_env, components_map, bootloader_src_dir, "bootloader")
link_args = extract_link_args(elf_config)
extra_flags = filter_args(link_args["LINKFLAGS"], ["-T", "-u"])
extra_flags = fix_ld_paths(extra_flags)
link_args["LINKFLAGS"] = sorted(
list(set(link_args["LINKFLAGS"]) - set(extra_flags))
)
bootloader_env.MergeFlags(link_args)
bootloader_env.Append(LINKFLAGS=extra_flags)
bootloader_libs = find_lib_deps(components_map, elf_config, link_args)
bootloader_env.Prepend(__RPATH="-Wl,--start-group ")
bootloader_env.Append(
CPPDEFINES=["__BOOTLOADER_BUILD"], _LIBDIRFLAGS=" -Wl,--end-group"
)
return bootloader_env.ElfToBin(
os.path.join("$BUILD_DIR", "bootloader"),
bootloader_env.Program(
os.path.join("$BUILD_DIR", "bootloader.elf"), bootloader_libs
),
)
def get_targets_by_type(target_configs, target_types, ignore_targets=None):
ignore_targets = ignore_targets or []
result = []
for target_config in target_configs.values():
if (
target_config["type"] in target_types
and target_config["name"] not in ignore_targets
):
result.append(target_config)
return result
def get_components_map(target_configs, target_types, ignore_components=None):
result = {}
for config in get_targets_by_type(target_configs, target_types, ignore_components):
result[config["id"]] = {"config": config}
return result
def build_components(env, components_map, project_src_dir, prepend_dir=None):
for k, v in components_map.items():
components_map[k]["lib"] = build_library(
env, v["config"], project_src_dir, prepend_dir
)
def get_project_elf(target_configs):
exec_targets = get_targets_by_type(target_configs, ["EXECUTABLE"])
if len(exec_targets) > 1:
print(
"Warning: Multiple elf targets found. The %s will be used!"
% exec_targets[0]["name"]
)
return exec_targets[0]
def generate_default_component():
# Used to force CMake generate build environments for all supported languages
prj_cmake_tpl = """# Warning! Do not delete this auto-generated file.
file(GLOB component_sources *.c* *.S)
idf_component_register(SRCS ${component_sources})
"""
dummy_component_path = os.path.join(BUILD_DIR, "__pio_env")
if not os.path.isdir(dummy_component_path):
os.makedirs(dummy_component_path)
for ext in (".cpp", ".c", ".S"):
dummy_file = os.path.join(dummy_component_path, "__dummy" + ext)
if not os.path.isfile(dummy_file):
open(dummy_file, "a").close()
component_cmake = os.path.join(dummy_component_path, "CMakeLists.txt")
if not os.path.isfile(component_cmake):
with open(component_cmake, "w") as fp:
fp.write(prj_cmake_tpl)
return dummy_component_path
def find_default_component(target_configs):
for config in target_configs:
if "__pio_env" in config:
return config
return ""
def create_version_file():
version_file = os.path.join(FRAMEWORK_DIR, "version.txt")
if not os.path.isfile(version_file):
with open(version_file, "w") as fp:
package_version = platform.get_package_version("framework-espidf")
fp.write(get_original_version(package_version) or package_version)
def generate_empty_partition_image(binary_path, image_size):
empty_partition = env.Command(
binary_path,
None,
env.VerboseAction(
'"$PYTHONEXE" "%s" %s $TARGET'
% (
os.path.join(
FRAMEWORK_DIR,
"components",
"partition_table",
"gen_empty_partition.py",
),
image_size,
),
"Generating an empty partition $TARGET",
),
)
env.Depends("$BUILD_DIR/$PROGNAME$PROGSUFFIX", empty_partition)
def get_partition_info(pt_path, pt_offset, pt_params):
if not os.path.isfile(pt_path):
sys.stderr.write(
"Missing partition table file `%s`\n" % os.path.basename(pt_path)
)
env.Exit(1)
cmd = [
env.subst("$PYTHONEXE"),
os.path.join(FRAMEWORK_DIR, "components", "partition_table", "parttool.py"),
"-q",
"--partition-table-offset",
hex(pt_offset),
"--partition-table-file",
pt_path,
"get_partition_info",
"--info",
"size",
"offset",
]
if pt_params["name"] == "boot":
cmd.append("--partition-boot-default")
else:
cmd.extend(
[
"--partition-type",
pt_params["type"],
"--partition-subtype",
pt_params["subtype"],
]
)
result = exec_command(cmd)
if result["returncode"] != 0:
sys.stderr.write(
"Couldn't extract information for %s/%s from the partition table\n"
% (pt_params["type"], pt_params["subtype"])
)
sys.stderr.write(result["out"] + "\n")
sys.stderr.write(result["err"] + "\n")
env.Exit(1)
size = offset = 0
if result["out"].strip():
size, offset = result["out"].strip().split(" ", 1)
return {"size": size, "offset": offset}
def get_app_partition_offset(pt_table, pt_offset):
# Get the default boot partition offset
app_params = get_partition_info(pt_table, pt_offset, {"name": "boot"})
return app_params.get("offset", "0x10000")
def generate_mbedtls_bundle(sdk_config):
bundle_path = os.path.join("$BUILD_DIR", "x509_crt_bundle")
if os.path.isfile(env.subst(bundle_path)):
return
default_crt_dir = os.path.join(
FRAMEWORK_DIR, "components", "mbedtls", "esp_crt_bundle"
)
cmd = [env.subst("$PYTHONEXE"), os.path.join(default_crt_dir, "gen_crt_bundle.py")]
crt_args = ["--input"]
if sdk_config.get("MBEDTLS_CERTIFICATE_BUNDLE_DEFAULT_FULL", False):
crt_args.append(os.path.join(default_crt_dir, "cacrt_all.pem"))
elif sdk_config.get("MBEDTLS_CERTIFICATE_BUNDLE_DEFAULT_CMN", False):
crt_args.append(os.path.join(default_crt_dir, "cacrt_all.pem"))
cmd.extend(
["--filter", os.path.join(default_crt_dir, "cmn_crt_authorities.csv")]
)
if sdk_config.get("MBEDTLS_CUSTOM_CERTIFICATE_BUNDLE", False):
cert_path = sdk_config.get("MBEDTLS_CUSTOM_CERTIFICATE_BUNDLE_PATH", "")
if os.path.isfile(cert_path) or os.path.isdir(cert_path):
crt_args.append(os.path.abspath(cert_path))
else:
print("Warning! Couldn't find custom certificate bundle %s" % cert_path)
crt_args.append("-q")
# Use exec_command to change working directory
exec_command(cmd + crt_args, cwd=BUILD_DIR)
bundle_path = os.path.join("$BUILD_DIR", "x509_crt_bundle")
env.Execute(
env.VerboseAction(
" ".join(
[
os.path.join(
env.PioPlatform().get_package_dir("tool-cmake"),
"bin",
"cmake",
),
"-DDATA_FILE=" + bundle_path,
"-DSOURCE_FILE=%s.S" % bundle_path,
"-DFILE_TYPE=BINARY",
"-P",
os.path.join(
FRAMEWORK_DIR,
"tools",
"cmake",
"scripts",
"data_file_embed_asm.cmake",
),
]
),
"Generating assembly for certificate bundle...",
)
)
def install_python_deps():
def _get_installed_pip_packages():
result = {}
packages = {}
pip_output = subprocess.check_output(
[env.subst("$PYTHONEXE"), "-m", "pip", "list", "--format=json"]
)
try:
packages = json.loads(pip_output)
except:
print("Warning! Couldn't extract the list of installed Python packages.")
return {}
for p in packages:
result[p["name"]] = pepver_to_semver(p["version"])
return result
deps = {
# https://github.com/platformio/platform-espressif32/issues/635
"cryptography": ">=2.1.4,<35.0.0",
"future": ">=0.15.2",
"pyparsing": ">=2.0.3,<2.4.0",
"kconfiglib": "==13.7.1",
}
installed_packages = _get_installed_pip_packages()
packages_to_install = []
for package, spec in deps.items():
if package not in installed_packages:
packages_to_install.append(package)
else:
version_spec = semantic_version.Spec(spec)
if not version_spec.match(installed_packages[package]):
packages_to_install.append(package)
if packages_to_install:
env.Execute(
env.VerboseAction(
(
'"$PYTHONEXE" -m pip install -U --force-reinstall '
+ " ".join(['"%s%s"' % (p, deps[p]) for p in packages_to_install])
),
"Installing ESP-IDF's Python dependencies",
)
)
# a special "esp-windows-curses" python package is required on Windows for Menuconfig
if "windows" in get_systype():
import pkg_resources
if "esp-windows-curses" not in {pkg.key for pkg in pkg_resources.working_set}:
env.Execute(
env.VerboseAction(
'$PYTHONEXE -m pip install "file://%s/tools/kconfig_new/esp-windows-curses" windows-curses'
% FRAMEWORK_DIR,
"Installing windows-curses package",
)
)
#
# ESP-IDF requires Python packages with specific versions
#
install_python_deps()
# ESP-IDF package doesn't contain .git folder, instead package version is specified
# in a special file "version.h" in the root folder of the package
create_version_file()
#
# Generate final linker script
#
if not board.get("build.ldscript", ""):
linker_script = env.Command(
os.path.join("$BUILD_DIR", "memory.ld"),
board.get(
"build.esp-idf.ldscript",
os.path.join(
FRAMEWORK_DIR, "components", "esp_system", "ld", idf_variant, "memory.ld.in"
),
),
env.VerboseAction(
'$CC -I"$BUILD_DIR/config" -I"' +
os.path.join(FRAMEWORK_DIR, "components", "esp_system", "ld") +
'" -C -P -x c -E $SOURCE -o $TARGET',
"Generating LD script $TARGET",
),
)
env.Depends("$BUILD_DIR/$PROGNAME$PROGSUFFIX", linker_script)
env.Replace(LDSCRIPT_PATH="memory.ld")
#
# Generate partition table
#
fwpartitions_dir = os.path.join(FRAMEWORK_DIR, "components", "partition_table")
partitions_csv = board.get("build.partitions", "partitions_singleapp.csv")
env.Replace(
PARTITIONS_TABLE_CSV=os.path.abspath(
os.path.join(fwpartitions_dir, partitions_csv)
if os.path.isfile(os.path.join(fwpartitions_dir, partitions_csv))
else partitions_csv
)
)
partition_table = env.Command(
os.path.join("$BUILD_DIR", "partitions.bin"),
"$PARTITIONS_TABLE_CSV",
env.VerboseAction(
'"$PYTHONEXE" "%s" -q --flash-size "%s" $SOURCE $TARGET'
% (
os.path.join(
FRAMEWORK_DIR, "components", "partition_table", "gen_esp32part.py"
),
board.get("upload.flash_size", "4MB"),
),
"Generating partitions $TARGET",
),
)
env.Depends("$BUILD_DIR/$PROGNAME$PROGSUFFIX", partition_table)
#
# Current build script limitations
#
if any(" " in p for p in (FRAMEWORK_DIR, BUILD_DIR)):
sys.stderr.write("Error: Detected a whitespace character in project paths.\n")
env.Exit(1)
if not os.path.isdir(PROJECT_SRC_DIR):
sys.stderr.write(
"Error: Missing the `%s` folder with project sources.\n"
% os.path.basename(PROJECT_SRC_DIR)
)
env.Exit(1)
if env.subst("$SRC_FILTER"):
print(
(
"Warning: the 'src_filter' option cannot be used with ESP-IDF. Select source "
"files to build in the project CMakeLists.txt file.\n"
)
)
if os.path.isfile(os.path.join(PROJECT_SRC_DIR, "sdkconfig.h")):
print(
"Warning! Starting with ESP-IDF v4.0, new project structure is required: \n"
"https://docs.platformio.org/en/latest/frameworks/espidf.html
)
#
# Initial targets loading
#
# By default 'main' folder is used to store source files. In case when a user has
# default 'src' folder we need to add this as an extra component. If there is no 'main'
# folder CMake won't generate dependencies properly
extra_components = [generate_default_component()]
if PROJECT_SRC_DIR != os.path.join(PROJECT_DIR, "main"):
extra_components.append(PROJECT_SRC_DIR)
if "arduino" in env.subst("$PIOFRAMEWORK"):
print(
"Warning! Arduino framework as an ESP-IDF component doesn't handle "
"the `variant` field! The default `esp32` variant will be used."
)
extra_components.append(ARDUINO_FRAMEWORK_DIR)
print("Reading CMake configuration...")
project_codemodel = get_cmake_code_model(
PROJECT_DIR,
BUILD_DIR,
[
"-DIDF_TARGET=" + idf_variant,
"-DPYTHON_DEPS_CHECKED=1",
"-DEXTRA_COMPONENT_DIRS:PATH=" + ";".join(extra_components),
"-DPYTHON=" + env.subst("$PYTHONEXE"),
"-DSDKCONFIG=" + SDKCONFIG_PATH,
]
+ click.parser.split_arg_string(board.get("build.cmake_extra_args", "")),
)
# At this point the sdkconfig file should be generated by the underlying build system
assert os.path.isfile(SDKCONFIG_PATH), (
"Missing auto-generated SDK configuration file `%s`" % SDKCONFIG_PATH
)
if not project_codemodel:
sys.stderr.write("Error: Couldn't find code model generated by CMake\n")
env.Exit(1)
target_configs = load_target_configurations(
project_codemodel, os.path.join(BUILD_DIR, CMAKE_API_REPLY_PATH)
)
sdk_config = get_sdk_configuration()
project_target_name = "__idf_%s" % os.path.basename(PROJECT_SRC_DIR)
if project_target_name not in target_configs:
sys.stderr.write("Error: Couldn't find the main target of the project!\n")
env.Exit(1)
if project_target_name != "__idf_main" and "__idf_main" in target_configs:
sys.stderr.write(
(
"Warning! Detected two different targets with project sources. Please use "
"either %s or specify 'main' folder in 'platformio.ini' file.\n"
% project_target_name
)
)
env.Exit(1)
project_ld_scipt = generate_project_ld_script(
sdk_config, [project_target_name, "__pio_env"]
)
env.Depends("$BUILD_DIR/$PROGNAME$PROGSUFFIX", project_ld_scipt)
elf_config = get_project_elf(target_configs)
default_config_name = find_default_component(target_configs)
framework_components_map = get_components_map(
target_configs,
["STATIC_LIBRARY", "OBJECT_LIBRARY"],
[project_target_name, default_config_name],
)
build_components(env, framework_components_map, PROJECT_DIR)
if not elf_config:
sys.stderr.write("Error: Couldn't load the main firmware target of the project\n")
env.Exit(1)
for component_config in framework_components_map.values():
env.Depends(project_ld_scipt, component_config["lib"])
project_config = target_configs.get(project_target_name, {})
default_config = target_configs.get(default_config_name, {})
project_defines = get_app_defines(project_config)
project_flags = get_app_flags(project_config, default_config)
link_args = extract_link_args(elf_config)
app_includes = get_app_includes(elf_config)
project_lib_includes = get_project_lib_includes(env)
#
# Compile bootloader
#
env.Depends("$BUILD_DIR/$PROGNAME$PROGSUFFIX", build_bootloader())
#
# Target: ESP-IDF menuconfig
#
env.AddPlatformTarget(
"menuconfig",
None,
[env.VerboseAction(RunMenuconfig, "Running menuconfig...")],
"Run Menuconfig",
)
#
# Process main parts of the framework
#
libs = find_lib_deps(
framework_components_map, elf_config, link_args, [project_target_name]
)
# Extra flags which need to be explicitly specified in LINKFLAGS section because SCons
# cannot merge them correctly
extra_flags = filter_args(link_args["LINKFLAGS"], ["-T", "-u"])
extra_flags = fix_ld_paths(extra_flags)
link_args["LINKFLAGS"] = sorted(list(set(link_args["LINKFLAGS"]) - set(extra_flags)))
# remove the main linker script flags '-T memory.ld' since it already appears later on
try:
ld_index = extra_flags.index("memory.ld")
extra_flags.pop(ld_index)
extra_flags.pop(ld_index - 1)
pass
except:
print("Warning! Couldn't find the main linker script in the CMake code model.")
#
# Process project sources
#
# Remove project source files from following build stages as they're
# built as part of the framework
def _skip_prj_source_files(node):
if node.srcnode().get_path().lower().startswith(PROJECT_SRC_DIR.lower()):
return None
return node
env.AddBuildMiddleware(_skip_prj_source_files)
# Project files should be compiled only when a special
# option is enabled when running 'test' command
if "__test" not in COMMAND_LINE_TARGETS or env.GetProjectOption(
"test_build_project_src"
):
project_env = env.Clone()
if project_target_name != "__idf_main":
# Manually add dependencies to CPPPATH since ESP-IDF build system doesn't generate
# this info if the folder with sources is not named 'main'
# https://docs.espressif.com/projects/esp-idf/en/latest/api-guides/build-system.html#rename-main
project_env.AppendUnique(CPPPATH=app_includes["plain_includes"])
# Add include dirs from PlatformIO build system to project CPPPATH so
# they're visible to PIOBUILDFILES
project_env.Append(
CPPPATH=["$PROJECT_INCLUDE_DIR", "$PROJECT_SRC_DIR"] + project_lib_includes
)
env.Append(
PIOBUILDFILES=compile_source_files(
target_configs.get(project_target_name),
project_env,
project_env.subst("$PROJECT_DIR"),
)
)
partition_table_offset = sdk_config.get("PARTITION_TABLE_OFFSET", 0x8000)
project_flags.update(link_args)
env.MergeFlags(project_flags)
env.Prepend(
CPPPATH=app_includes["plain_includes"],
CPPDEFINES=project_defines,
LINKFLAGS=extra_flags,
LIBS=libs,
FLASH_EXTRA_IMAGES=[
(
board.get(
"upload.bootloader_offset", "0x0" if mcu == "esp32c3" else "0x1000"
),
os.path.join("$BUILD_DIR", "bootloader.bin"),
),
(
board.get("upload.partition_table_offset", hex(partition_table_offset)),
os.path.join("$BUILD_DIR", "partitions.bin"),
),
],
)
#
# Generate mbedtls bundle
#
if sdk_config.get("MBEDTLS_CERTIFICATE_BUNDLE", False):
generate_mbedtls_bundle(sdk_config)
#
# To embed firmware checksum a special argument for esptool.py is required
#
action = copy.deepcopy(env["BUILDERS"]["ElfToBin"].action)
action.cmd_list = env["BUILDERS"]["ElfToBin"].action.cmd_list.replace(
"-o", "--elf-sha256-offset 0xb0 -o"
)
env["BUILDERS"]["ElfToBin"].action = action
#
# Compile ULP sources in 'ulp' folder
#
ulp_dir = os.path.join(PROJECT_DIR, "ulp")
if os.path.isdir(ulp_dir) and os.listdir(ulp_dir) and mcu != "esp32c3":
env.SConscript("ulp.py", exports="env sdk_config project_config idf_variant")
#
# Process OTA partition and image
#
ota_partition_params = get_partition_info(
env.subst("$PARTITIONS_TABLE_CSV"),
partition_table_offset,
{"name": "ota", "type": "data", "subtype": "ota"},
)
if ota_partition_params["size"] and ota_partition_params["offset"]:
# Generate an empty image if OTA is enabled in partition table
ota_partition_image = os.path.join("$BUILD_DIR", "ota_data_initial.bin")
generate_empty_partition_image(ota_partition_image, ota_partition_params["size"])
env.Append(
FLASH_EXTRA_IMAGES=[
(
board.get(
"upload.ota_partition_offset", ota_partition_params["offset"]
),
ota_partition_image,
)
]
)
#
# Configure application partition offset
#
env.Replace(
ESP32_APP_OFFSET=get_app_partition_offset(
env.subst("$PARTITIONS_TABLE_CSV"), partition_table_offset
)
)
# Propagate application offset to debug configurations
env["IDE_EXTRA_DATA"].update({"application_offset": env.subst("$ESP32_APP_OFFSET")})
| true | true |
1c386b015eef9e74ea213d83168770d90c057f18 | 83,694 | py | Python | build/android/gyp/write_build_config.py | gengleilei/wee8 | a7bff18685ddfc7f16de825c9d3a12432d4138d5 | [
"BSD-3-Clause"
] | null | null | null | build/android/gyp/write_build_config.py | gengleilei/wee8 | a7bff18685ddfc7f16de825c9d3a12432d4138d5 | [
"BSD-3-Clause"
] | null | null | null | build/android/gyp/write_build_config.py | gengleilei/wee8 | a7bff18685ddfc7f16de825c9d3a12432d4138d5 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Writes a build_config file.
The build_config file for a target is a json file containing information about
how to build that target based on the target's dependencies. This includes
things like: the javac classpath, the list of android resources dependencies,
etc. It also includes the information needed to create the build_config for
other targets that depend on that one.
Android build scripts should not refer to the build_config directly, and the
build specification should instead pass information in using the special
file-arg syntax (see build_utils.py:ExpandFileArgs). That syntax allows passing
of values in a json dict in a file and looks like this:
--python-arg=@FileArg(build_config_path:javac:classpath)
Note: If paths to input files are passed in this way, it is important that:
1. inputs/deps of the action ensure that the files are available the first
time the action runs.
2. Either (a) or (b)
a. inputs/deps ensure that the action runs whenever one of the files changes
b. the files are added to the action's depfile
NOTE: All paths within .build_config files are relative to $OUTPUT_CHROMIUM_DIR.
This is a technical note describing the format of .build_config files.
Please keep it updated when changing this script. For extraction and
visualization instructions, see build/android/docs/build_config.md
------------- BEGIN_MARKDOWN ---------------------------------------------------
The .build_config file format
===
# Introduction
This document tries to explain the format of `.build_config` generated during
the Android build of Chromium. For a higher-level explanation of these files,
please read
[build/android/docs/build_config.md](build/android/docs/build_config.md).
# The `deps_info` top-level dictionary:
All `.build_config` files have a required `'deps_info'` key, whose value is a
dictionary describing the target and its dependencies. The latter has the
following required keys:
## Required keys in `deps_info`:
* `deps_info['type']`: The target type as a string.
The following types are known by the internal GN build rules and the
build scripts altogether:
* [java_binary](#target_java_binary)
* [java_annotation_processor](#target_java_annotation_processor)
* [junit_binary](#target_junit_binary)
* [java_library](#target_java_library)
* [android_assets](#target_android_assets)
* [android_resources](#target_android_resources)
* [android_apk](#target_android_apk)
* [android_app_bundle_module](#target_android_app_bundle_module)
* [android_app_bundle](#target_android_app_bundle)
* [dist_jar](#target_dist_jar)
* [dist_aar](#target_dist_aar)
* [group](#target_group)
See later sections for more details of some of these.
* `deps_info['path']`: Path to the target's `.build_config` file.
* `deps_info['name']`: Nothing more than the basename of `deps_info['path']`
at the moment.
* `deps_info['deps_configs']`: List of paths to the `.build_config` files of
all *direct* dependencies of the current target.
NOTE: Because the `.build_config` of a given target is always generated
after the `.build_config` of its dependencies, the `write_build_config.py`
script can use chains of `deps_configs` to compute transitive dependencies
for each target when needed.
## Optional keys in `deps_info`:
The following keys will only appear in the `.build_config` files of certain
target types:
* `deps_info['requires_android']`: True to indicate that the corresponding
code uses Android-specific APIs, and thus cannot run on the host within a
regular JVM. May only appear in Java-related targets.
* `deps_info['supports_android']`:
May appear in Java-related targets, and indicates that
the corresponding code doesn't use Java APIs that are not available on
Android. As such it may run either on the host or on an Android device.
* `deps_info['assets']`:
Only seen for the [`android_assets`](#target_android_assets) type. See below.
* `deps_info['package_name']`: Java package name associated with this target.
NOTE: For `android_resources` targets,
this is the package name for the corresponding R class. For `android_apk`
targets, this is the corresponding package name. This does *not* appear for
other target types.
* `deps_info['android_manifest']`:
Path to an AndroidManifest.xml file related to the current target.
* `deps_info['base_module_config']`:
Only seen for the [`android_app_bundle`](#target_android_app_bundle) type.
Path to the base module for the bundle.
* `deps_info['is_base_module']`:
Only seen for the
[`android_app_bundle_module`](#target_android_app_bundle_module)
type. Whether or not this module is the base module for some bundle.
* `deps_info['dependency_zips']`:
List of `deps_info['resources_zip']` entries for all `android_resources`
dependencies for the current target.
* `deps_info['extra_package_names']`:
Always empty for `android_resources` types. Otherwise,
the list of `deps_info['package_name']` entries for all `android_resources`
dependencies for the current target. Computed automatically by
`write_build_config.py`.
* `deps_info['dependency_r_txt_files']`:
Exists only on dist_aar. It is the list of deps_info['r_text_path'] from
transitive dependencies. Computed automatically.
# `.build_config` target types description:
## <a name="target_group">Target type `group`</a>:
This type corresponds to a simple target that is only used to group
dependencies. It matches the `java_group()` GN template. Its only top-level
`deps_info` keys are `supports_android` (always True), and `deps_configs`.
## <a name="target_android_resources">Target type `android_resources`</a>:
This type corresponds to targets that are used to group Android resource files.
For example, all `android_resources` dependencies of an `android_apk` will
end up packaged into the final APK by the build system.
It uses the following keys:
* `deps_info['res_sources_path']`:
Path to file containing a list of resource source files used by the
android_resources target. This replaces `deps_info['resource_dirs']` which is
now no longer used.
* `deps_info['resources_zip']`:
*Required*. Path to the `.resources.zip` file that contains all raw/uncompiled
resource files for this target (and also no `R.txt`, `R.java` or `R.class`).
If `deps_info['resource_dirs']` is missing, this must point to a prebuilt
`.aar` archive containing resources. Otherwise, this will point to a
zip archive generated at build time, wrapping the content of
`deps_info['resource_dirs']` into a single zip file.
* `deps_info['package_name']`:
Java package name that the R class for this target belongs to.
* `deps_info['android_manifest']`:
Optional. Path to the top-level Android manifest file associated with these
resources (if not provided, an empty manifest will be used to generate R.txt).
* `deps_info['resource_overlay']`:
Optional. Whether the resources in resources_zip should override resources with
the same name. Does not affect the behaviour of any android_resources()
dependencies of this target. If a target with resource_overlay=true depends
on another target with resource_overlay=true the target with the dependency
overrides the other.
* `deps_info['r_text_path']`:
Provide the path to the `R.txt` file that describes the resources wrapped by
this target. Normally this file is generated from the content of the resource
directories or zip file, but some targets can provide their own `R.txt` file
if they want.
* `deps_info['srcjar_path']`:
Path to the `.srcjar` file that contains the auto-generated `R.java` source
file corresponding to the content of `deps_info['r_text_path']`. This is
*always* generated from the content of `deps_info['r_text_path']` by the
`build/android/gyp/process_resources.py` script.
* `deps_info['static_library_dependent_classpath_configs']`:
Sub dictionary mapping .build_config paths to lists of jar files. For static
library APKs, this defines which input jars belong to each
static_library_dependent_target.
* `deps_info['static_library_proguard_mapping_output_paths']`:
Additional paths to copy the ProGuard mapping file to for static library
APKs.
## <a name="target_android_assets">Target type `android_assets`</a>:
This type corresponds to targets used to group Android assets, i.e. liberal
files that will be placed under `//assets/` within the final APK.
These use an `deps_info['assets']` key to hold a dictionary of values related
to assets covered by this target.
* `assets['sources']`:
The list of all asset source paths for this target. Each source path can
use an optional `:<zipPath>` suffix, where `<zipPath>` is the final location
of the assets (relative to `//assets/`) within the APK.
* `assets['outputs']`:
Optional. Some of the sources might be renamed before being stored in the
final //assets/ sub-directory. When this happens, this contains a list of
all renamed output file paths
NOTE: When not empty, the first items of `assets['sources']` must match
every item in this list. Extra sources correspond to non-renamed sources.
NOTE: This comes from the `asset_renaming_destinations` parameter for the
`android_assets()` GN template.
* `assets['disable_compression']`:
Optional. Will be True to indicate that these assets should be stored
uncompressed in the final APK. For example, this is necessary for locale
.pak files used by the System WebView feature.
* `assets['treat_as_locale_paks']`:
Optional. Will be True to indicate that these assets are locale `.pak` files
(containing localized strings for C++). These are later processed to generate
a special ``.build_config`.java` source file, listing all supported Locales in
the current build.
## <a name="target_java_library">Target type `java_library`</a>:
This type is used to describe target that wrap Java bytecode, either created
by compiling sources, or providing them with a prebuilt jar.
* `deps_info['unprocessed_jar_path']`:
Path to the original .jar file for this target, before any kind of processing
through Proguard or other tools. For most targets this is generated
from sources, with a name like `$target_name.javac.jar`. However, when using
a prebuilt jar, this will point to the source archive directly.
* `deps_info['device_jar_path']`:
Path to a file that is the result of processing
`deps_info['unprocessed_jar_path']` with various tools (ready to be dexed).
* `deps_info['host_jar_path']`:
Path to a file that is the result of processing
`deps_info['unprocessed_jar_path']` with various tools (use by java_binary).
* `deps_info['interface_jar_path']:
Path to the interface jar generated for this library. This corresponds to
a jar file that only contains declarations. Generated by running the `ijar` on
`deps_info['unprocessed_jar_path']` or the `turbine` tool on source files.
* `deps_info['dex_path']`:
Path to the `.dex` file generated for this target, from
`deps_info['device_jar_path']` unless this comes from a prebuilt `.aar` archive.
* `deps_info['is_prebuilt']`:
True to indicate that this target corresponds to a prebuilt `.jar` file.
In this case, `deps_info['unprocessed_jar_path']` will point to the source
`.jar` file. Otherwise, it will be point to a build-generated file.
* `deps_info['java_sources_file']`:
Path to a single `.sources` file listing all the Java sources that were used
to generate the library (simple text format, one `.jar` path per line).
* `deps_info['lint_android_manifest']`:
Path to an AndroidManifest.xml file to use for this lint target.
* `deps_info['lint_java_sources']`:
The list of all `deps_info['java_sources_file']` entries for all library
dependencies that are chromium code. Note: this is a list of files, where each
file contains a list of Java source files. This is used for lint.
* `deps_info['lint_srcjars']`:
List of all bundled srcjars of all transitive java library targets. Excludes
non-chromium java libraries.
* `deps_info['lint_resource_sources']`:
List of all resource sources files belonging to all transitive resource
dependencies of this target. Excludes resources owned by non-chromium code.
* `deps_info['lint_resource_zips']`:
List of all resource zip files belonging to all transitive resource dependencies
of this target. Excludes resources owned by non-chromium code.
* `deps_info['owned_resource_srcjars']`:
List of all .srcjar files belonging to all *direct* resource dependencies (i.e.
without another java_library in the dependency path) for this target.
* `deps_info['javac']`:
A dictionary containing information about the way the sources in this library
are compiled. Appears also on other Java-related targets. See the [dedicated
section about this](#dict_javac) below for details.
* `deps_info['javac_full_classpath']`:
The classpath used when performing bytecode processing. Essentially the
collection of all `deps_info['unprocessed_jar_path']` entries for the target
and all its dependencies.
* `deps_info['javac_full_interface_classpath']`:
The classpath used when using the errorprone compiler.
* `deps_info['proguard_enabled"]`:
True to indicate that ProGuard processing is enabled for this target.
* `deps_info['proguard_configs"]`:
A list of paths to ProGuard configuration files related to this library.
* `deps_info['extra_classpath_jars']:
For some Java related types, a list of extra `.jar` files to use at build time
but not at runtime.
## <a name="target_java_binary">Target type `java_binary`</a>:
This type corresponds to a Java binary, which is nothing more than a
`java_library` target that also provides a main class name. It thus inherits
all entries from the `java_library` type, and adds:
* `deps_info['main_class']`:
Name of the main Java class that serves as an entry point for the binary.
* `deps_info['device_classpath']`:
The classpath used when running a Java or Android binary. Essentially the
collection of all `deps_info['device_jar_path']` entries for the target and all
its dependencies.
## <a name="target_junit_binary">Target type `junit_binary`</a>:
A target type for JUnit-specific binaries. Identical to
[`java_binary`](#target_java_binary) in the context of `.build_config` files,
except the name.
## <a name="target_java_annotation_processor">Target type \
`java_annotation_processor`</a>:
A target type for Java annotation processors. Identical to
[`java_binary`](#target_java_binary) in the context of `.build_config` files,
except the name, except that it requires a `deps_info['main_class']` entry.
## <a name="target_android_apk">Target type `android_apk`</a>:
Corresponds to an Android APK. Inherits from the
[`java_binary`](#target_java_binary) type and adds:
* `deps_info['apk_path']`:
Path to the raw, unsigned, APK generated by this target.
* `deps_info['incremental_apk_path']`:
Path to the raw, unsigned, incremental APK generated by this target.
* `deps_info['incremental_install_json_path']`:
Path to the JSON file with per-apk details for incremental install.
See `build/android/gyp/incremental/write_installer_json.py` for more
details about its content.
* `deps_info['dist_jar']['all_interface_jars']`:
For `android_apk` and `dist_jar` targets, a list of all interface jar files
that will be merged into the final `.jar` file for distribution.
* `deps_info['final_dex']['path']`:
Path to the final classes.dex file (or classes.zip in case of multi-dex)
for this APK.
* `deps_info['final_dex']['all_dex_files']`:
The list of paths to all `deps_info['dex_path']` entries for all libraries
that comprise this APK. Valid only for debug builds.
* `native['libraries']`
List of native libraries for the primary ABI to be embedded in this APK.
E.g. [ "libchrome.so" ] (i.e. this doesn't include any ABI sub-directory
prefix).
* `native['java_libraries_list']`
The same list as `native['libraries']` as a string holding a Java source
fragment, e.g. `"{\"chrome\"}"`, without any `lib` prefix, and `.so`
suffix (as expected by `System.loadLibrary()`).
* `native['second_abi_libraries']`
List of native libraries for the secondary ABI to be embedded in this APK.
Empty if only a single ABI is supported.
* `native['uncompress_shared_libraries']`
A boolean indicating whether native libraries are stored uncompressed in the
APK.
* `native['loadable_modules']`
A list of native libraries to store within the APK, in addition to those from
`native['libraries']`. These correspond to things like the Chromium linker
or instrumentation libraries.
* `native['secondary_abi_loadable_modules']`
Secondary ABI version of loadable_modules
* `native['library_always_compress']`
A list of library files that we always compress.
* `native['library_renames']`
A list of library files that we prepend "crazy." to their file names.
* `assets`
A list of assets stored compressed in the APK. Each entry has the format
`<source-path>:<destination-path>`, where `<source-path>` is relative to
`$CHROMIUM_OUTPUT_DIR`, and `<destination-path>` is relative to `//assets/`
within the APK.
NOTE: Not to be confused with the `deps_info['assets']` dictionary that
belongs to `android_assets` targets only.
* `uncompressed_assets`
A list of uncompressed assets stored in the APK. Each entry has the format
`<source-path>:<destination-path>` too.
* `compressed_locales_java_list`
A string holding a Java source fragment that gives the list of locales stored
compressed in the `//assets/` directory. E.g. `"{\"am\","\ar\",\"en-US\"}"`.
Note that the files will be stored with the `.pak` extension (e.g.
`//assets/en-US.pak`).
* `uncompressed_locales_java_list`
A string holding a Java source fragment that gives the list of locales stored
uncompressed in the `//assets/stored-locales/` directory. These are used for
the System WebView feature only. Note that the files will be stored with the
`.pak` extension (e.g. `//assets/stored-locales/en-US.apk`).
* `extra_android_manifests`
A list of `deps_configs['android_manifest]` entries, for all resource
dependencies for this target. I.e. a list of paths to manifest files for
all the resources in this APK. These will be merged with the root manifest
file to generate the final one used to build the APK.
* `java_resources_jars`
This is a list of `.jar` files whose *Java* resources should be included in
the final APK. For example, this is used to copy the `.res` files from the
EMMA Coverage tool. The copy will omit any `.class` file and the top-level
`//meta-inf/` directory from the input jars. Everything else will be copied
into the final APK as-is.
NOTE: This has nothing to do with *Android* resources.
* `jni['all_source']`
The list of all `deps_info['java_sources_file']` entries for all library
dependencies for this APK. Note: this is a list of files, where each file
contains a list of Java source files. This is used for JNI registration.
* `deps_info['proguard_all_configs']`:
The collection of all 'deps_info['proguard_configs']` values from this target
and all its dependencies.
* `deps_info['proguard_classpath_jars']`:
The collection of all 'deps_info['extra_classpath_jars']` values from all
dependencies.
* `deps_info['proguard_under_test_mapping']`:
Applicable to apks with proguard enabled that have an apk_under_test. This is
the path to the apk_under_test's output proguard .mapping file.
## <a name="target_android_app_bundle_module">Target type \
`android_app_bundle_module`</a>:
Corresponds to an Android app bundle module. Very similar to an APK and
inherits the same fields, except that this does not generate an installable
file (see `android_app_bundle`), and for the following omitted fields:
* `deps_info['apk_path']`, `deps_info['incremental_apk_path']` and
`deps_info['incremental_install_json_path']` are omitted.
* top-level `dist_jar` is omitted as well.
In addition to `android_apk` targets though come these new fields:
* `deps_info['proto_resources_path']`:
The path of an zip archive containing the APK's resources compiled to the
protocol buffer format (instead of regular binary xml + resources.arsc).
* `deps_info['module_rtxt_path']`:
The path of the R.txt file generated when compiling the resources for the bundle
module.
* `deps_info['module_pathmap_path']`:
The path of the pathmap file generated when compiling the resources for the
bundle module, if resource path shortening is enabled.
* `deps_info['base_allowlist_rtxt_path']`:
Optional path to an R.txt file used as a allowlist for base string resources.
This means that any string resource listed in this file *and* in
`deps_info['module_rtxt_path']` will end up in the base split APK of any
`android_app_bundle` target that uses this target as its base module.
This ensures that such localized strings are available to all bundle installs,
even when language based splits are enabled (e.g. required for WebView strings
inside the Monochrome bundle).
## <a name="target_android_app_bundle">Target type `android_app_bundle`</a>
This target type corresponds to an Android app bundle, and is built from one
or more `android_app_bundle_module` targets listed as dependencies.
## <a name="target_dist_aar">Target type `dist_aar`</a>:
This type corresponds to a target used to generate an `.aar` archive for
distribution. The archive's content is determined by the target's dependencies.
This always has the following entries:
* `deps_info['supports_android']` (always True).
* `deps_info['requires_android']` (always True).
* `deps_info['proguard_configs']` (optional).
## <a name="target_dist_jar">Target type `dist_jar`</a>:
This type is similar to [`dist_aar`](#target_dist_aar) but is not
Android-specific, and used to create a `.jar` file that can be later
redistributed.
This always has the following entries:
* `deps_info['proguard_enabled']` (False by default).
* `deps_info['proguard_configs']` (optional).
* `deps_info['supports_android']` (True by default).
* `deps_info['requires_android']` (False by default).
## <a name="dict_javac">The `deps_info['javac']` dictionary</a>:
This dictionary appears in Java-related targets (e.g. `java_library`,
`android_apk` and others), and contains information related to the compilation
of Java sources, class files, and jars.
* `javac['resource_packages']`
For `java_library` targets, this is the list of package names for all resource
dependencies for the current target. Order must match the one from
`javac['srcjars']`. For other target types, this key does not exist.
* `javac['classpath']`
The classpath used to compile this target when annotation processors are
present.
* `javac['interface_classpath']`
The classpath used to compile this target when annotation processors are
not present. These are also always used to known when a target needs to be
rebuilt.
* `javac['processor_classpath']`
The classpath listing the jars used for annotation processors. I.e. sent as
`-processorpath` when invoking `javac`.
* `javac['processor_classes']`
The list of annotation processor main classes. I.e. sent as `-processor' when
invoking `javac`.
## <a name="android_app_bundle">Target type `android_app_bundle`</a>:
This type corresponds to an Android app bundle (`.aab` file).
--------------- END_MARKDOWN ---------------------------------------------------
"""
from __future__ import print_function
import collections
import itertools
import json
import optparse
import os
import sys
import xml.dom.minidom
from util import build_utils
from util import resource_utils
# Types that should never be used as a dependency of another build config.
_ROOT_TYPES = ('android_apk', 'java_binary', 'java_annotation_processor',
'junit_binary', 'android_app_bundle')
# Types that should not allow code deps to pass through.
_RESOURCE_TYPES = ('android_assets', 'android_resources', 'system_java_library')
def _ExtractMarkdownDocumentation(input_text):
"""Extract Markdown documentation from a list of input strings lines.
This generates a list of strings extracted from |input_text|, by looking
for '-- BEGIN_MARKDOWN --' and '-- END_MARKDOWN --' line markers."""
in_markdown = False
result = []
for line in input_text.splitlines():
if in_markdown:
if '-- END_MARKDOWN --' in line:
in_markdown = False
else:
result.append(line)
else:
if '-- BEGIN_MARKDOWN --' in line:
in_markdown = True
return result
class AndroidManifest(object):
def __init__(self, path):
self.path = path
dom = xml.dom.minidom.parse(path)
manifests = dom.getElementsByTagName('manifest')
assert len(manifests) == 1
self.manifest = manifests[0]
def GetInstrumentationElements(self):
instrumentation_els = self.manifest.getElementsByTagName('instrumentation')
if len(instrumentation_els) == 0:
return None
return instrumentation_els
def CheckInstrumentationElements(self, expected_package):
instrs = self.GetInstrumentationElements()
if not instrs:
raise Exception('No <instrumentation> elements found in %s' % self.path)
for instr in instrs:
instrumented_package = instr.getAttributeNS(
'http://schemas.android.com/apk/res/android', 'targetPackage')
if instrumented_package != expected_package:
raise Exception(
'Wrong instrumented package. Expected %s, got %s'
% (expected_package, instrumented_package))
def GetPackageName(self):
return self.manifest.getAttribute('package')
dep_config_cache = {}
def GetDepConfig(path):
if not path in dep_config_cache:
with open(path) as jsonfile:
dep_config_cache[path] = json.load(jsonfile)['deps_info']
return dep_config_cache[path]
def DepsOfType(wanted_type, configs):
return [c for c in configs if c['type'] == wanted_type]
def GetAllDepsConfigsInOrder(deps_config_paths):
def GetDeps(path):
return GetDepConfig(path)['deps_configs']
return build_utils.GetSortedTransitiveDependencies(deps_config_paths, GetDeps)
def GetObjectByPath(obj, key_path):
"""Given an object, return its nth child based on a key path.
"""
return GetObjectByPath(obj[key_path[0]], key_path[1:]) if key_path else obj
def RemoveObjDups(obj, base, *key_path):
"""Remove array items from an object[*kep_path] that are also
contained in the base[*kep_path] (duplicates).
"""
base_target = set(GetObjectByPath(base, key_path))
target = GetObjectByPath(obj, key_path)
target[:] = [x for x in target if x not in base_target]
class Deps(object):
def __init__(self, direct_deps_config_paths):
self._all_deps_config_paths = GetAllDepsConfigsInOrder(
direct_deps_config_paths)
self._direct_deps_configs = [
GetDepConfig(p) for p in direct_deps_config_paths
]
self._all_deps_configs = [
GetDepConfig(p) for p in self._all_deps_config_paths
]
self._direct_deps_config_paths = direct_deps_config_paths
def All(self, wanted_type=None):
if wanted_type is None:
return self._all_deps_configs
return DepsOfType(wanted_type, self._all_deps_configs)
def Direct(self, wanted_type=None):
if wanted_type is None:
return self._direct_deps_configs
return DepsOfType(wanted_type, self._direct_deps_configs)
def AllConfigPaths(self):
return self._all_deps_config_paths
def RemoveNonDirectDep(self, path):
if path in self._direct_deps_config_paths:
raise Exception('Cannot remove direct dep.')
self._all_deps_config_paths.remove(path)
self._all_deps_configs.remove(GetDepConfig(path))
def GradlePrebuiltJarPaths(self):
ret = []
def helper(cur):
for config in cur.Direct('java_library'):
if config['is_prebuilt'] or config['gradle_treat_as_prebuilt']:
if config['unprocessed_jar_path'] not in ret:
ret.append(config['unprocessed_jar_path'])
helper(self)
return ret
def GradleLibraryProjectDeps(self):
ret = []
def helper(cur):
for config in cur.Direct('java_library'):
if config['is_prebuilt']:
pass
elif config['gradle_treat_as_prebuilt']:
helper(Deps(config['deps_configs']))
elif config not in ret:
ret.append(config)
helper(self)
return ret
def _MergeAssets(all_assets):
"""Merges all assets from the given deps.
Returns:
A tuple of: (compressed, uncompressed, locale_paks)
|compressed| and |uncompressed| are lists of "srcPath:zipPath". srcPath is
the path of the asset to add, and zipPath is the location within the zip
(excluding assets/ prefix).
|locale_paks| is a set of all zipPaths that have been marked as
treat_as_locale_paks=true.
"""
compressed = {}
uncompressed = {}
locale_paks = set()
for asset_dep in all_assets:
entry = asset_dep['assets']
disable_compression = entry.get('disable_compression')
treat_as_locale_paks = entry.get('treat_as_locale_paks')
dest_map = uncompressed if disable_compression else compressed
other_map = compressed if disable_compression else uncompressed
outputs = entry.get('outputs', [])
for src, dest in itertools.izip_longest(entry['sources'], outputs):
if not dest:
dest = os.path.basename(src)
# Merge so that each path shows up in only one of the lists, and that
# deps of the same target override previous ones.
other_map.pop(dest, 0)
dest_map[dest] = src
if treat_as_locale_paks:
locale_paks.add(dest)
def create_list(asset_map):
ret = ['%s:%s' % (src, dest) for dest, src in asset_map.iteritems()]
# Sort to ensure deterministic ordering.
ret.sort()
return ret
return create_list(compressed), create_list(uncompressed), locale_paks
def _ResolveGroups(configs):
"""Returns a list of configs with all groups inlined."""
ret = list(configs)
while True:
groups = DepsOfType('group', ret)
if not groups:
return ret
for config in groups:
index = ret.index(config)
expanded_configs = [GetDepConfig(p) for p in config['deps_configs']]
ret[index:index + 1] = expanded_configs
def _DepsFromPaths(dep_paths,
target_type,
filter_root_targets=True,
recursive_resource_deps=False):
"""Resolves all groups and trims dependency branches that we never want.
E.g. When a resource or asset depends on an apk target, the intent is to
include the .apk as a resource/asset, not to have the apk's classpath added.
This method is meant to be called to get the top nodes (i.e. closest to
current target) that we could then use to get a full transitive dependants
list (eg using Deps#all). So filtering single elements out of this list,
filters whole branches of dependencies. By resolving groups (i.e. expanding
them to their consituents), depending on a group is equivalent to directly
depending on each element of that group.
"""
blocklist = []
allowlist = []
# Don't allow root targets to be considered as a dep.
if filter_root_targets:
blocklist.extend(_ROOT_TYPES)
# Don't allow java libraries to cross through assets/resources.
if target_type in _RESOURCE_TYPES:
allowlist.extend(_RESOURCE_TYPES)
# Pretend that this target directly depends on all of its transitive
# dependencies.
if recursive_resource_deps:
dep_paths = GetAllDepsConfigsInOrder(dep_paths)
return _DepsFromPathsWithFilters(dep_paths, blocklist, allowlist)
def _DepsFromPathsWithFilters(dep_paths, blocklist=None, allowlist=None):
"""Resolves all groups and trims dependency branches that we never want.
See _DepsFromPaths.
|blocklist| if passed, are the types of direct dependencies we do not care
about (i.e. tips of branches that we wish to prune).
|allowlist| if passed, are the only types of direct dependencies we care
about (i.e. we wish to prune all other branches that do not start from one of
these).
"""
configs = [GetDepConfig(p) for p in dep_paths]
groups = DepsOfType('group', configs)
configs = _ResolveGroups(configs)
configs += groups
if blocklist:
configs = [c for c in configs if c['type'] not in blocklist]
if allowlist:
configs = [c for c in configs if c['type'] in allowlist]
return Deps([c['path'] for c in configs])
def _ExtractSharedLibsFromRuntimeDeps(runtime_deps_file):
ret = []
with open(runtime_deps_file) as f:
for line in f:
line = line.rstrip()
if not line.endswith('.so'):
continue
# Only unstripped .so files are listed in runtime deps.
# Convert to the stripped .so by going up one directory.
ret.append(os.path.normpath(line.replace('lib.unstripped/', '')))
ret.reverse()
return ret
def _CreateJavaLibrariesList(library_paths):
"""Returns a java literal array with the "base" library names:
e.g. libfoo.so -> foo
"""
names = ['"%s"' % os.path.basename(s)[3:-3] for s in library_paths]
return ('{%s}' % ','.join(sorted(set(names))))
def _CreateJavaLocaleListFromAssets(assets, locale_paks):
"""Returns a java literal array from a list of locale assets.
Args:
assets: A list of all APK asset paths in the form 'src:dst'
locale_paks: A list of asset paths that correponds to the locale pak
files of interest. Each |assets| entry will have its 'dst' part matched
against it to determine if they are part of the result.
Returns:
A string that is a Java source literal array listing the locale names
of the corresponding asset files, without directory or .pak suffix.
E.g. '{"en-GB", "en-US", "es-ES", "fr", ... }'
"""
assets_paths = [a.split(':')[1] for a in assets]
locales = [os.path.basename(a)[:-4] for a in assets_paths if a in locale_paks]
return '{%s}' % ','.join(['"%s"' % l for l in sorted(locales)])
def _AddJarMapping(jar_to_target, configs):
for config in configs:
jar = config.get('unprocessed_jar_path')
if jar:
jar_to_target[jar] = config['gn_target']
for jar in config.get('extra_classpath_jars', []):
jar_to_target[jar] = config['gn_target']
def main(argv):
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--build-config', help='Path to build_config output.')
parser.add_option(
'--type',
help='Type of this target (e.g. android_library).')
parser.add_option('--gn-target', help='GN label for this target')
parser.add_option(
'--deps-configs',
help='GN-list of dependent build_config files.')
parser.add_option(
'--annotation-processor-configs',
help='GN-list of build_config files for annotation processors.')
# android_resources options
parser.add_option('--srcjar', help='Path to target\'s resources srcjar.')
parser.add_option('--resources-zip', help='Path to target\'s resources zip.')
parser.add_option('--package-name',
help='Java package name for these resources.')
parser.add_option('--android-manifest', help='Path to android manifest.')
parser.add_option('--resource-dirs', action='append', default=[],
help='GYP-list of resource dirs')
parser.add_option(
'--res-sources-path',
help='Path to file containing a list of paths to resources.')
parser.add_option(
'--resource-overlay',
action='store_true',
help='Whether resources passed in via --resources-zip should override '
'resources with the same name')
parser.add_option(
'--recursive-resource-deps',
action='store_true',
help='Whether deps should be walked recursively to find resource deps.')
# android_assets options
parser.add_option('--asset-sources', help='List of asset sources.')
parser.add_option('--asset-renaming-sources',
help='List of asset sources with custom destinations.')
parser.add_option('--asset-renaming-destinations',
help='List of asset custom destinations.')
parser.add_option('--disable-asset-compression', action='store_true',
help='Whether to disable asset compression.')
parser.add_option('--treat-as-locale-paks', action='store_true',
help='Consider the assets as locale paks in BuildConfig.java')
# java library options
parser.add_option('--device-jar-path', help='Path to .jar for dexing.')
parser.add_option('--host-jar-path', help='Path to .jar for java_binary.')
parser.add_option('--unprocessed-jar-path',
help='Path to the .jar to use for javac classpath purposes.')
parser.add_option(
'--interface-jar-path',
help='Path to the interface .jar to use for javac classpath purposes.')
parser.add_option(
'--jetified-jar-path',
help='Path to the jetified.jar to use for javac classpath purposes.')
parser.add_option('--is-prebuilt', action='store_true',
help='Whether the jar was compiled or pre-compiled.')
parser.add_option('--java-sources-file', help='Path to .sources file')
parser.add_option('--bundled-srcjars',
help='GYP-list of .srcjars that have been included in this java_library.')
parser.add_option('--supports-android', action='store_true',
help='Whether this library supports running on the Android platform.')
parser.add_option('--requires-android', action='store_true',
help='Whether this library requires running on the Android platform.')
parser.add_option('--bypass-platform-checks', action='store_true',
help='Bypass checks for support/require Android platform.')
parser.add_option('--extra-classpath-jars',
help='GYP-list of .jar files to include on the classpath when compiling, '
'but not to include in the final binary.')
parser.add_option(
'--mergeable-android-manifests',
help='GN-list of AndroidManifest.xml to include in manifest merging.')
parser.add_option('--gradle-treat-as-prebuilt', action='store_true',
help='Whether this library should be treated as a prebuilt library by '
'generate_gradle.py.')
parser.add_option('--main-class',
help='Main class for java_binary or java_annotation_processor targets.')
parser.add_option('--java-resources-jar-path',
help='Path to JAR that contains java resources. Everything '
'from this JAR except meta-inf/ content and .class files '
'will be added to the final APK.')
parser.add_option(
'--non-chromium-code',
action='store_true',
help='True if a java library is not chromium code, used for lint.')
# android library options
parser.add_option('--dex-path', help='Path to target\'s dex output.')
# native library options
parser.add_option('--shared-libraries-runtime-deps',
help='Path to file containing runtime deps for shared '
'libraries.')
parser.add_option(
'--loadable-modules',
action='append',
help='GN-list of native libraries for primary '
'android-abi. Can be specified multiple times.',
default=[])
parser.add_option('--secondary-abi-shared-libraries-runtime-deps',
help='Path to file containing runtime deps for secondary '
'abi shared libraries.')
parser.add_option(
'--secondary-abi-loadable-modules',
action='append',
help='GN-list of native libraries for secondary '
'android-abi. Can be specified multiple times.',
default=[])
parser.add_option(
'--native-lib-placeholders',
action='append',
help='GN-list of native library placeholders to add.',
default=[])
parser.add_option(
'--secondary-native-lib-placeholders',
action='append',
help='GN-list of native library placeholders to add '
'for the secondary android-abi.',
default=[])
parser.add_option('--uncompress-shared-libraries', default=False,
action='store_true',
help='Whether to store native libraries uncompressed')
parser.add_option(
'--library-always-compress',
help='The list of library files that we always compress.')
parser.add_option(
'--library-renames',
default=[],
help='The list of library files that we prepend crazy. to their names.')
# apk options
parser.add_option('--apk-path', help='Path to the target\'s apk output.')
parser.add_option('--incremental-apk-path',
help="Path to the target's incremental apk output.")
parser.add_option('--incremental-install-json-path',
help="Path to the target's generated incremental install "
"json.")
parser.add_option(
'--tested-apk-config',
help='Path to the build config of the tested apk (for an instrumentation '
'test apk).')
parser.add_option(
'--proguard-enabled',
action='store_true',
help='Whether proguard is enabled for this apk or bundle module.')
parser.add_option(
'--proguard-configs',
help='GN-list of proguard flag files to use in final apk.')
parser.add_option(
'--proguard-mapping-path', help='Path to jar created by ProGuard step')
# apk options that are static library specific
parser.add_option(
'--static-library-dependent-configs',
help='GN list of .build_configs of targets that use this target as a '
'static library.')
# options shared between android_resources and apk targets
parser.add_option('--r-text-path', help='Path to target\'s R.txt file.')
parser.add_option('--fail',
help='GN-list of error message lines to fail with.')
parser.add_option('--final-dex-path',
help='Path to final input classes.dex (or classes.zip) to '
'use in final apk.')
parser.add_option('--res-size-info', help='Path to .ap_.info')
parser.add_option('--apk-proto-resources',
help='Path to resources compiled in protocol buffer format '
' for this apk.')
parser.add_option(
'--module-pathmap-path',
help='Path to pathmap file for resource paths in a bundle module.')
parser.add_option(
'--base-allowlist-rtxt-path',
help='Path to R.txt file for the base resources allowlist.')
parser.add_option(
'--is-base-module',
action='store_true',
help='Specifies that this module is a base module for some app bundle.')
parser.add_option('--generate-markdown-format-doc', action='store_true',
help='Dump the Markdown .build_config format documentation '
'then exit immediately.')
parser.add_option(
'--base-module-build-config',
help='Path to the base module\'s build config '
'if this is a feature module.')
parser.add_option(
'--module-build-configs',
help='For bundles, the paths of all non-async module .build_configs '
'for modules that are part of the bundle.')
parser.add_option('--version-name', help='Version name for this APK.')
parser.add_option('--version-code', help='Version code for this APK.')
options, args = parser.parse_args(argv)
if args:
parser.error('No positional arguments should be given.')
if options.generate_markdown_format_doc:
doc_lines = _ExtractMarkdownDocumentation(__doc__)
for line in doc_lines:
print(line)
return 0
if options.fail:
parser.error('\n'.join(build_utils.ParseGnList(options.fail)))
lib_options = ['unprocessed_jar_path', 'interface_jar_path']
device_lib_options = ['device_jar_path', 'dex_path']
required_options_map = {
'android_apk': ['build_config'] + lib_options + device_lib_options,
'android_app_bundle_module':
['build_config', 'final_dex_path', 'res_size_info'] + lib_options +
device_lib_options,
'android_assets': ['build_config'],
'android_resources': ['build_config', 'resources_zip'],
'dist_aar': ['build_config'],
'dist_jar': ['build_config'],
'group': ['build_config'],
'java_annotation_processor': ['build_config', 'main_class'],
'java_binary': ['build_config'],
'java_library': ['build_config', 'host_jar_path'] + lib_options,
'junit_binary': ['build_config'],
'system_java_library': ['build_config', 'unprocessed_jar_path'],
'android_app_bundle': ['build_config', 'module_build_configs'],
}
required_options = required_options_map.get(options.type)
if not required_options:
raise Exception('Unknown type: <%s>' % options.type)
build_utils.CheckOptions(options, parser, required_options)
if options.type != 'android_app_bundle_module':
if options.apk_proto_resources:
raise Exception('--apk-proto-resources can only be used with '
'--type=android_app_bundle_module')
if options.module_pathmap_path:
raise Exception('--module-pathmap-path can only be used with '
'--type=android_app_bundle_module')
if options.base_allowlist_rtxt_path:
raise Exception('--base-allowlist-rtxt-path can only be used with '
'--type=android_app_bundle_module')
if options.is_base_module:
raise Exception('--is-base-module can only be used with '
'--type=android_app_bundle_module')
is_apk_or_module_target = options.type in ('android_apk',
'android_app_bundle_module')
if not is_apk_or_module_target:
if options.uncompress_shared_libraries:
raise Exception('--uncompressed-shared-libraries can only be used '
'with --type=android_apk or '
'--type=android_app_bundle_module')
if options.library_always_compress:
raise Exception(
'--library-always-compress can only be used with --type=android_apk '
'or --type=android_app_bundle_module')
if options.library_renames:
raise Exception(
'--library-renames can only be used with --type=android_apk or '
'--type=android_app_bundle_module')
if options.device_jar_path and not options.dex_path:
raise Exception('java_library that supports Android requires a dex path.')
if any(getattr(options, x) for x in lib_options):
for attr in lib_options:
if not getattr(options, attr):
raise('Expected %s to be set.' % attr)
if options.requires_android and not options.supports_android:
raise Exception(
'--supports-android is required when using --requires-android')
is_java_target = options.type in (
'java_binary', 'junit_binary', 'java_annotation_processor',
'java_library', 'android_apk', 'dist_aar', 'dist_jar',
'system_java_library', 'android_app_bundle_module')
is_static_library_dex_provider_target = (
options.static_library_dependent_configs and options.proguard_enabled)
if is_static_library_dex_provider_target:
if options.type != 'android_apk':
raise Exception(
'--static-library-dependent-configs only supports --type=android_apk')
options.static_library_dependent_configs = build_utils.ParseGnList(
options.static_library_dependent_configs)
static_library_dependent_configs_by_path = {
p: GetDepConfig(p)
for p in options.static_library_dependent_configs
}
deps_configs_paths = build_utils.ParseGnList(options.deps_configs)
deps = _DepsFromPaths(deps_configs_paths,
options.type,
recursive_resource_deps=options.recursive_resource_deps)
processor_deps = _DepsFromPaths(
build_utils.ParseGnList(options.annotation_processor_configs or ''),
options.type, filter_root_targets=False)
all_inputs = sorted(
set(deps.AllConfigPaths() + processor_deps.AllConfigPaths() +
list(static_library_dependent_configs_by_path)))
direct_deps = deps.Direct()
system_library_deps = deps.Direct('system_java_library')
direct_library_deps = deps.Direct('java_library')
all_deps = deps.All()
all_library_deps = deps.All('java_library')
all_resources_deps = deps.All('android_resources')
if options.type == 'java_library':
java_library_deps = _DepsFromPathsWithFilters(
deps_configs_paths, allowlist=['android_resources'])
# for java libraries, we only care about resources that are directly
# reachable without going through another java_library.
all_resources_deps = java_library_deps.All('android_resources')
base_module_build_config = None
if options.base_module_build_config:
with open(options.base_module_build_config, 'r') as f:
base_module_build_config = json.load(f)
all_inputs.append(options.base_module_build_config)
# Initialize some common config.
# Any value that needs to be queryable by dependents must go within deps_info.
config = {
'deps_info': {
'name': os.path.basename(options.build_config),
'path': options.build_config,
'type': options.type,
'gn_target': options.gn_target,
'deps_configs': [d['path'] for d in direct_deps],
'chromium_code': not options.non_chromium_code,
},
# Info needed only by generate_gradle.py.
'gradle': {}
}
deps_info = config['deps_info']
gradle = config['gradle']
if options.type == 'android_apk' and options.tested_apk_config:
tested_apk_deps = Deps([options.tested_apk_config])
tested_apk_config = tested_apk_deps.Direct()[0]
gradle['apk_under_test'] = tested_apk_config['name']
if options.type == 'android_app_bundle_module':
deps_info['is_base_module'] = bool(options.is_base_module)
# Required for generating gradle files.
if options.type == 'java_library':
deps_info['is_prebuilt'] = bool(options.is_prebuilt)
deps_info['gradle_treat_as_prebuilt'] = options.gradle_treat_as_prebuilt
if options.android_manifest:
deps_info['android_manifest'] = options.android_manifest
if options.bundled_srcjars:
deps_info['bundled_srcjars'] = build_utils.ParseGnList(
options.bundled_srcjars)
if options.java_sources_file:
deps_info['java_sources_file'] = options.java_sources_file
if is_java_target:
if options.bundled_srcjars:
gradle['bundled_srcjars'] = deps_info['bundled_srcjars']
gradle['dependent_android_projects'] = []
gradle['dependent_java_projects'] = []
gradle['dependent_prebuilt_jars'] = deps.GradlePrebuiltJarPaths()
if options.main_class:
deps_info['main_class'] = options.main_class
for c in deps.GradleLibraryProjectDeps():
if c['requires_android']:
gradle['dependent_android_projects'].append(c['path'])
else:
gradle['dependent_java_projects'].append(c['path'])
if options.r_text_path:
deps_info['r_text_path'] = options.r_text_path
# TODO(tiborg): Remove creation of JNI info for type group and java_library
# once we can generate the JNI registration based on APK / module targets as
# opposed to groups and libraries.
if is_apk_or_module_target or options.type in (
'group', 'java_library', 'junit_binary'):
deps_info['jni'] = {}
all_java_sources = [c['java_sources_file'] for c in all_library_deps
if 'java_sources_file' in c]
if options.java_sources_file:
all_java_sources.append(options.java_sources_file)
if options.apk_proto_resources:
deps_info['proto_resources_path'] = options.apk_proto_resources
deps_info['version_name'] = options.version_name
deps_info['version_code'] = options.version_code
if options.module_pathmap_path:
deps_info['module_pathmap_path'] = options.module_pathmap_path
else:
# Ensure there is an entry, even if it is empty, for modules
# that have not enabled resource path shortening. Otherwise
# build_utils.ExpandFileArgs fails.
deps_info['module_pathmap_path'] = ''
if options.base_allowlist_rtxt_path:
deps_info['base_allowlist_rtxt_path'] = options.base_allowlist_rtxt_path
else:
# Ensure there is an entry, even if it is empty, for modules
# that don't need such a allowlist.
deps_info['base_allowlist_rtxt_path'] = ''
if is_java_target:
deps_info['requires_android'] = bool(options.requires_android)
deps_info['supports_android'] = bool(options.supports_android)
if not options.bypass_platform_checks:
deps_require_android = (all_resources_deps +
[d['name'] for d in all_library_deps if d['requires_android']])
deps_not_support_android = (
[d['name'] for d in all_library_deps if not d['supports_android']])
if deps_require_android and not options.requires_android:
raise Exception('Some deps require building for the Android platform: '
+ str(deps_require_android))
if deps_not_support_android and options.supports_android:
raise Exception('Not all deps support the Android platform: '
+ str(deps_not_support_android))
if is_apk_or_module_target or options.type == 'dist_jar':
all_dex_files = [c['dex_path'] for c in all_library_deps]
if is_java_target:
# Classpath values filled in below (after applying tested_apk_config).
config['javac'] = {}
if options.unprocessed_jar_path:
deps_info['unprocessed_jar_path'] = options.unprocessed_jar_path
deps_info['interface_jar_path'] = options.interface_jar_path
if options.device_jar_path:
deps_info['device_jar_path'] = options.device_jar_path
if options.host_jar_path:
deps_info['host_jar_path'] = options.host_jar_path
deps_info['jetified_jar_path'] = (options.jetified_jar_path
or options.interface_jar_path)
if options.dex_path:
deps_info['dex_path'] = options.dex_path
if is_apk_or_module_target:
all_dex_files.append(options.dex_path)
if options.type == 'android_apk':
deps_info['apk_path'] = options.apk_path
deps_info['incremental_apk_path'] = options.incremental_apk_path
deps_info['incremental_install_json_path'] = (
options.incremental_install_json_path)
if options.type == 'android_assets':
all_asset_sources = []
if options.asset_renaming_sources:
all_asset_sources.extend(
build_utils.ParseGnList(options.asset_renaming_sources))
if options.asset_sources:
all_asset_sources.extend(build_utils.ParseGnList(options.asset_sources))
deps_info['assets'] = {
'sources': all_asset_sources
}
if options.asset_renaming_destinations:
deps_info['assets']['outputs'] = (
build_utils.ParseGnList(options.asset_renaming_destinations))
if options.disable_asset_compression:
deps_info['assets']['disable_compression'] = True
if options.treat_as_locale_paks:
deps_info['assets']['treat_as_locale_paks'] = True
if options.type == 'android_resources':
deps_info['resources_zip'] = options.resources_zip
if options.resource_overlay:
deps_info['resource_overlay'] = True
if options.srcjar:
deps_info['srcjar'] = options.srcjar
if options.android_manifest:
manifest = AndroidManifest(options.android_manifest)
deps_info['package_name'] = manifest.GetPackageName()
if options.package_name:
deps_info['package_name'] = options.package_name
deps_info['res_sources_path'] = ''
if options.res_sources_path:
deps_info['res_sources_path'] = options.res_sources_path
if options.requires_android and is_java_target:
owned_resource_srcjars = set()
for c in all_resources_deps:
srcjar = c.get('srcjar')
if srcjar:
owned_resource_srcjars.add(srcjar)
for c in all_library_deps:
if c['requires_android'] and not c['is_prebuilt']:
# Many .aar files include R.class files in them, as it makes it easier
# for IDEs to resolve symbols. However, including them is not required
# and not all prebuilts do. Rather than try to detect their presense,
# just assume they are not there. The only consequence is redundant
# compilation of the R.class.
owned_resource_srcjars.difference_update(c['owned_resource_srcjars'])
deps_info['owned_resource_srcjars'] = sorted(owned_resource_srcjars)
if options.type == 'java_library':
# Used to strip out R.class for android_prebuilt()s.
config['javac']['resource_packages'] = [
c['package_name'] for c in all_resources_deps if 'package_name' in c
]
if options.package_name:
deps_info['package_name'] = options.package_name
if options.type in ('android_resources', 'android_apk', 'junit_binary',
'dist_aar', 'android_app_bundle_module', 'java_library'):
dependency_zips = []
dependency_zip_overlays = []
for c in all_resources_deps:
if not c['resources_zip']:
continue
dependency_zips.append(c['resources_zip'])
if c.get('resource_overlay'):
dependency_zip_overlays.append(c['resources_zip'])
extra_package_names = []
if options.type != 'android_resources':
extra_package_names = [
c['package_name'] for c in all_resources_deps if 'package_name' in c
]
# In final types (i.e. apks and modules) that create real R.java files,
# they must collect package names from java_libraries as well.
# https://crbug.com/1073476
if options.type != 'java_library':
extra_package_names.extend([
c['package_name'] for c in all_library_deps if 'package_name' in c
])
# For feature modules, remove any resources that already exist in the base
# module.
if base_module_build_config:
dependency_zips = [
c for c in dependency_zips
if c not in base_module_build_config['deps_info']['dependency_zips']
]
dependency_zip_overlays = [
c for c in dependency_zip_overlays if c not in
base_module_build_config['deps_info']['dependency_zip_overlays']
]
extra_package_names = [
c for c in extra_package_names if c not in
base_module_build_config['deps_info']['extra_package_names']
]
if options.type == 'android_apk' and options.tested_apk_config:
config['deps_info']['arsc_package_name'] = (
tested_apk_config['package_name'])
# We should not shadow the actual R.java files of the apk_under_test by
# creating new R.java files with the same package names in the tested apk.
extra_package_names = [
package for package in extra_package_names
if package not in tested_apk_config['extra_package_names']
]
if options.res_size_info:
config['deps_info']['res_size_info'] = options.res_size_info
config['deps_info']['dependency_zips'] = dependency_zips
config['deps_info']['dependency_zip_overlays'] = dependency_zip_overlays
config['deps_info']['extra_package_names'] = extra_package_names
# These are .jars to add to javac classpath but not to runtime classpath.
extra_classpath_jars = build_utils.ParseGnList(options.extra_classpath_jars)
if extra_classpath_jars:
deps_info['extra_classpath_jars'] = extra_classpath_jars
mergeable_android_manifests = build_utils.ParseGnList(
options.mergeable_android_manifests)
if mergeable_android_manifests:
deps_info['mergeable_android_manifests'] = mergeable_android_manifests
extra_proguard_classpath_jars = []
proguard_configs = build_utils.ParseGnList(options.proguard_configs)
if proguard_configs:
# Make a copy of |proguard_configs| since it's mutated below.
deps_info['proguard_configs'] = list(proguard_configs)
if options.type == 'dist_aar':
# dist_aar combines all dependency R.txt files into one for the aar.
r_text_files = [
c['r_text_path'] for c in all_resources_deps + all_library_deps
if 'r_text_path' in c
]
deps_info['dependency_r_txt_files'] = r_text_files
if is_java_target:
# The classpath used to compile this target when annotation processors are
# present.
javac_classpath = set(c['unprocessed_jar_path']
for c in direct_library_deps)
# The classpath used to compile this target when annotation processors are
# not present. These are also always used to know when a target needs to be
# rebuilt.
javac_interface_classpath = set(c['interface_jar_path']
for c in direct_library_deps)
# The classpath used for bytecode-rewritting.
javac_full_classpath = set(c['unprocessed_jar_path']
for c in all_library_deps)
# The classpath used for error prone.
javac_full_interface_classpath = set(c['interface_jar_path']
for c in all_library_deps)
# The path of the jetified jars.
jetified_full_jar_classpath = set(c['jetified_jar_path']
for c in all_library_deps)
# Adding base module to classpath to compile against its R.java file
if base_module_build_config:
javac_full_classpath.add(
base_module_build_config['deps_info']['unprocessed_jar_path'])
javac_full_interface_classpath.add(
base_module_build_config['deps_info']['interface_jar_path'])
jetified_full_jar_classpath.add(
base_module_build_config['deps_info']['jetified_jar_path'])
# Turbine now compiles headers against only the direct classpath, so the
# base module's interface jar must be on the direct interface classpath.
javac_interface_classpath.add(
base_module_build_config['deps_info']['interface_jar_path'])
for dep in direct_deps:
if 'extra_classpath_jars' in dep:
javac_classpath.update(dep['extra_classpath_jars'])
javac_interface_classpath.update(dep['extra_classpath_jars'])
for dep in all_deps:
if 'extra_classpath_jars' in dep:
javac_full_classpath.update(dep['extra_classpath_jars'])
javac_full_interface_classpath.update(dep['extra_classpath_jars'])
jetified_full_jar_classpath.update(dep['extra_classpath_jars'])
# TODO(agrieve): Might be less confusing to fold these into bootclasspath.
# Deps to add to the compile-time classpath (but not the runtime classpath).
# These are jars specified by input_jars_paths that almost never change.
# Just add them directly to all the classpaths.
if options.extra_classpath_jars:
javac_classpath.update(extra_classpath_jars)
javac_interface_classpath.update(extra_classpath_jars)
javac_full_classpath.update(extra_classpath_jars)
javac_full_interface_classpath.update(extra_classpath_jars)
jetified_full_jar_classpath.update(extra_classpath_jars)
if is_java_target or options.type == 'android_app_bundle':
# The classpath to use to run this target (or as an input to ProGuard).
device_classpath = []
if is_java_target and options.device_jar_path:
device_classpath.append(options.device_jar_path)
device_classpath.extend(
c.get('device_jar_path') for c in all_library_deps
if c.get('device_jar_path'))
if options.type == 'android_app_bundle':
for d in deps.Direct('android_app_bundle_module'):
device_classpath.extend(c for c in d.get('device_classpath', [])
if c not in device_classpath)
if options.type in ('dist_jar', 'java_binary', 'junit_binary'):
# The classpath to use to run this target.
host_classpath = []
if options.host_jar_path:
host_classpath.append(options.host_jar_path)
host_classpath.extend(c['host_jar_path'] for c in all_library_deps)
deps_info['host_classpath'] = host_classpath
# We allow lint to be run on android_apk targets, so we collect lint
# artifacts for them.
# We allow lint to be run on android_app_bundle targets, so we need to
# collect lint artifacts for the android_app_bundle_module targets that the
# bundle includes. Different android_app_bundle targets may include different
# android_app_bundle_module targets, so the bundle needs to be able to
# de-duplicate these lint artifacts.
if options.type in ('android_app_bundle_module', 'android_apk'):
# Collect all sources and resources at the apk/bundle_module level.
lint_srcjars = set()
lint_java_sources = set()
lint_resource_sources = set()
lint_resource_zips = set()
if options.java_sources_file:
lint_java_sources.add(options.java_sources_file)
if options.bundled_srcjars:
lint_srcjars.update(deps_info['bundled_srcjars'])
for c in all_library_deps:
if c['chromium_code'] and c['requires_android']:
if 'java_sources_file' in c:
lint_java_sources.add(c['java_sources_file'])
lint_srcjars.update(c['bundled_srcjars'])
if options.res_sources_path:
lint_resource_sources.add(options.res_sources_path)
if options.resources_zip:
lint_resource_zips.add(options.resources_zip)
for c in all_resources_deps:
if c['chromium_code']:
# Prefer res_sources_path to resources_zips so that lint errors have
# real paths and to avoid needing to extract during lint.
if c['res_sources_path']:
lint_resource_sources.add(c['res_sources_path'])
else:
lint_resource_zips.add(c['resources_zip'])
deps_info['lint_srcjars'] = sorted(lint_srcjars)
deps_info['lint_java_sources'] = sorted(lint_java_sources)
deps_info['lint_resource_sources'] = sorted(lint_resource_sources)
deps_info['lint_resource_zips'] = sorted(lint_resource_zips)
deps_info['lint_extra_android_manifests'] = []
if options.type == 'android_apk':
assert options.android_manifest, 'Android APKs must define a manifest'
deps_info['lint_android_manifest'] = options.android_manifest
if options.type == 'android_app_bundle':
module_configs = [
GetDepConfig(c)
for c in build_utils.ParseGnList(options.module_build_configs)
]
jni_all_source = set()
lint_srcjars = set()
lint_java_sources = set()
lint_resource_sources = set()
lint_resource_zips = set()
lint_extra_android_manifests = set()
for c in module_configs:
if c['is_base_module']:
assert 'base_module_config' not in deps_info, (
'Must have exactly 1 base module!')
deps_info['base_module_config'] = c['path']
# Use the base module's android manifest for linting.
deps_info['lint_android_manifest'] = c['android_manifest']
else:
lint_extra_android_manifests.add(c['android_manifest'])
jni_all_source.update(c['jni']['all_source'])
lint_srcjars.update(c['lint_srcjars'])
lint_java_sources.update(c['lint_java_sources'])
lint_resource_sources.update(c['lint_resource_sources'])
lint_resource_zips.update(c['lint_resource_zips'])
deps_info['jni'] = {'all_source': sorted(jni_all_source)}
deps_info['lint_srcjars'] = sorted(lint_srcjars)
deps_info['lint_java_sources'] = sorted(lint_java_sources)
deps_info['lint_resource_sources'] = sorted(lint_resource_sources)
deps_info['lint_resource_zips'] = sorted(lint_resource_zips)
deps_info['lint_extra_android_manifests'] = sorted(
lint_extra_android_manifests)
# Map configs to classpath entries that should be included in their final dex.
classpath_entries_by_owning_config = collections.defaultdict(list)
extra_main_r_text_files = []
if is_static_library_dex_provider_target:
# Map classpath entries to configs that include them in their classpath.
configs_by_classpath_entry = collections.defaultdict(list)
static_lib_jar_paths = {}
for config_path, dep_config in (sorted(
static_library_dependent_configs_by_path.iteritems())):
# For bundles, only the jar path and jni sources of the base module
# are relevant for proguard. Should be updated when bundle feature
# modules support JNI.
base_config = dep_config
if dep_config['type'] == 'android_app_bundle':
base_config = GetDepConfig(dep_config['base_module_config'])
extra_main_r_text_files.append(base_config['r_text_path'])
static_lib_jar_paths[config_path] = base_config['device_jar_path']
proguard_configs.extend(dep_config['proguard_all_configs'])
extra_proguard_classpath_jars.extend(
dep_config['proguard_classpath_jars'])
all_java_sources.extend(base_config['jni']['all_source'])
# The srcjars containing the generated R.java files are excluded for APK
# targets the use static libraries, so we add them here to ensure the
# union of resource IDs are available in the static library APK.
for package in base_config['extra_package_names']:
if package not in extra_package_names:
extra_package_names.append(package)
for cp_entry in dep_config['device_classpath']:
configs_by_classpath_entry[cp_entry].append(config_path)
for cp_entry in device_classpath:
configs_by_classpath_entry[cp_entry].append(options.build_config)
for cp_entry, candidate_configs in configs_by_classpath_entry.iteritems():
config_path = (candidate_configs[0]
if len(candidate_configs) == 1 else options.build_config)
classpath_entries_by_owning_config[config_path].append(cp_entry)
device_classpath.append(cp_entry)
device_classpath = sorted(set(device_classpath))
deps_info['static_library_proguard_mapping_output_paths'] = sorted([
d['proguard_mapping_path']
for d in static_library_dependent_configs_by_path.itervalues()
])
deps_info['static_library_dependent_classpath_configs'] = {
path: sorted(set(classpath))
for path, classpath in classpath_entries_by_owning_config.iteritems()
}
deps_info['extra_main_r_text_files'] = sorted(extra_main_r_text_files)
if is_apk_or_module_target or options.type in ('group', 'java_library',
'junit_binary'):
deps_info['jni']['all_source'] = sorted(set(all_java_sources))
system_jars = [c['unprocessed_jar_path'] for c in system_library_deps]
system_interface_jars = [c['interface_jar_path'] for c in system_library_deps]
if system_library_deps:
config['android'] = {}
config['android']['sdk_interface_jars'] = system_interface_jars
config['android']['sdk_jars'] = system_jars
if options.type in ('android_apk', 'dist_aar',
'dist_jar', 'android_app_bundle_module', 'android_app_bundle'):
for c in all_deps:
proguard_configs.extend(c.get('proguard_configs', []))
extra_proguard_classpath_jars.extend(c.get('extra_classpath_jars', []))
if options.type == 'android_app_bundle':
for c in deps.Direct('android_app_bundle_module'):
proguard_configs.extend(p for p in c.get('proguard_configs', []))
if options.type == 'android_app_bundle':
for d in deps.Direct('android_app_bundle_module'):
extra_proguard_classpath_jars.extend(
c for c in d.get('proguard_classpath_jars', [])
if c not in extra_proguard_classpath_jars)
if options.type == 'android_app_bundle':
deps_proguard_enabled = []
deps_proguard_disabled = []
for d in deps.Direct('android_app_bundle_module'):
if not d['device_classpath']:
# We don't care about modules that have no Java code for proguarding.
continue
if d['proguard_enabled']:
deps_proguard_enabled.append(d['name'])
else:
deps_proguard_disabled.append(d['name'])
if deps_proguard_enabled and deps_proguard_disabled:
raise Exception('Deps %s have proguard enabled while deps %s have '
'proguard disabled' % (deps_proguard_enabled,
deps_proguard_disabled))
else:
deps_info['proguard_enabled'] = bool(options.proguard_enabled)
if options.proguard_mapping_path:
deps_info['proguard_mapping_path'] = options.proguard_mapping_path
# The java code for an instrumentation test apk is assembled differently for
# ProGuard vs. non-ProGuard.
#
# Without ProGuard: Each library's jar is dexed separately and then combined
# into a single classes.dex. A test apk will include all dex files not already
# present in the apk-under-test. At runtime all test code lives in the test
# apk, and the program code lives in the apk-under-test.
#
# With ProGuard: Each library's .jar file is fed into ProGuard, which outputs
# a single .jar, which is then dexed into a classes.dex. A test apk includes
# all jar files from the program and the tests because having them separate
# doesn't work with ProGuard's whole-program optimizations. Although the
# apk-under-test still has all of its code in its classes.dex, none of it is
# used at runtime because the copy of it within the test apk takes precidence.
if options.type == 'android_apk' and options.tested_apk_config:
if tested_apk_config['proguard_enabled']:
assert options.proguard_enabled, ('proguard must be enabled for '
'instrumentation apks if it\'s enabled for the tested apk.')
# Mutating lists, so no need to explicitly re-assign to dict.
proguard_configs.extend(
p for p in tested_apk_config['proguard_all_configs'])
extra_proguard_classpath_jars.extend(
p for p in tested_apk_config['proguard_classpath_jars'])
tested_apk_config = GetDepConfig(options.tested_apk_config)
deps_info['proguard_under_test_mapping'] = (
tested_apk_config['proguard_mapping_path'])
elif options.proguard_enabled:
# Not sure why you'd want to proguard the test apk when the under-test apk
# is not proguarded, but it's easy enough to support.
deps_info['proguard_under_test_mapping'] = ''
# Add all tested classes to the test's classpath to ensure that the test's
# java code is a superset of the tested apk's java code
device_classpath_extended = list(device_classpath)
device_classpath_extended.extend(
p for p in tested_apk_config['device_classpath']
if p not in device_classpath)
# Include in the classpath classes that are added directly to the apk under
# test (those that are not a part of a java_library).
javac_classpath.add(tested_apk_config['unprocessed_jar_path'])
javac_interface_classpath.add(tested_apk_config['interface_jar_path'])
javac_full_classpath.add(tested_apk_config['unprocessed_jar_path'])
javac_full_interface_classpath.add(tested_apk_config['interface_jar_path'])
jetified_full_jar_classpath.add(tested_apk_config['interface_jar_path'])
javac_full_classpath.update(tested_apk_config['javac_full_classpath'])
javac_full_interface_classpath.update(
tested_apk_config['javac_full_interface_classpath'])
jetified_full_jar_classpath.update(
tested_apk_config['jetified_full_jar_classpath'])
# Exclude .jar files from the test apk that exist within the apk under test.
tested_apk_library_deps = tested_apk_deps.All('java_library')
tested_apk_dex_files = {c['dex_path'] for c in tested_apk_library_deps}
all_dex_files = [p for p in all_dex_files if p not in tested_apk_dex_files]
tested_apk_jar_files = set(tested_apk_config['device_classpath'])
device_classpath = [
p for p in device_classpath if p not in tested_apk_jar_files
]
if options.type in ('android_apk', 'dist_aar', 'dist_jar',
'android_app_bundle_module', 'android_app_bundle'):
deps_info['proguard_all_configs'] = sorted(set(proguard_configs))
deps_info['proguard_classpath_jars'] = sorted(
set(extra_proguard_classpath_jars))
# Dependencies for the final dex file of an apk.
if (is_apk_or_module_target or options.final_dex_path
or options.type == 'dist_jar'):
config['final_dex'] = {}
dex_config = config['final_dex']
dex_config['path'] = options.final_dex_path
if is_apk_or_module_target or options.type == 'dist_jar':
dex_config['all_dex_files'] = all_dex_files
if is_java_target:
config['javac']['classpath'] = sorted(javac_classpath)
config['javac']['interface_classpath'] = sorted(javac_interface_classpath)
# Direct() will be of type 'java_annotation_processor', and so not included
# in All('java_library').
# Annotation processors run as part of the build, so need host_jar_path.
config['javac']['processor_classpath'] = [
c['host_jar_path'] for c in processor_deps.Direct()
if c.get('host_jar_path')
]
config['javac']['processor_classpath'] += [
c['host_jar_path'] for c in processor_deps.All('java_library')
]
config['javac']['processor_classes'] = [
c['main_class'] for c in processor_deps.Direct()]
deps_info['javac_full_classpath'] = sorted(javac_full_classpath)
deps_info['javac_full_interface_classpath'] = sorted(
javac_full_interface_classpath)
deps_info['jetified_full_jar_classpath'] = sorted(
jetified_full_jar_classpath)
elif options.type == 'android_app_bundle':
# bundles require javac_full_classpath to create .aab.jar.info and require
# javac_full_interface_classpath for lint.
javac_full_classpath = set()
javac_full_interface_classpath = set()
for d in deps.Direct('android_app_bundle_module'):
javac_full_classpath.update(d['javac_full_classpath'])
javac_full_interface_classpath.update(d['javac_full_interface_classpath'])
javac_full_classpath.add(d['unprocessed_jar_path'])
javac_full_interface_classpath.add(d['interface_jar_path'])
deps_info['javac_full_classpath'] = sorted(javac_full_classpath)
deps_info['javac_full_interface_classpath'] = sorted(
javac_full_interface_classpath)
if options.type in ('android_apk', 'dist_jar', 'android_app_bundle_module',
'android_app_bundle'):
deps_info['device_classpath'] = device_classpath
if options.tested_apk_config:
deps_info['device_classpath_extended'] = device_classpath_extended
if options.type in ('android_apk', 'dist_jar'):
all_interface_jars = []
if options.interface_jar_path:
all_interface_jars.append(options.interface_jar_path)
all_interface_jars.extend(c['interface_jar_path'] for c in all_library_deps)
config['dist_jar'] = {
'all_interface_jars': all_interface_jars,
}
if is_apk_or_module_target:
manifest = AndroidManifest(options.android_manifest)
deps_info['package_name'] = manifest.GetPackageName()
if not options.tested_apk_config and manifest.GetInstrumentationElements():
# This must then have instrumentation only for itself.
manifest.CheckInstrumentationElements(manifest.GetPackageName())
library_paths = []
java_libraries_list = None
if options.shared_libraries_runtime_deps:
library_paths = _ExtractSharedLibsFromRuntimeDeps(
options.shared_libraries_runtime_deps)
java_libraries_list = _CreateJavaLibrariesList(library_paths)
all_inputs.append(options.shared_libraries_runtime_deps)
secondary_abi_library_paths = []
if options.secondary_abi_shared_libraries_runtime_deps:
secondary_abi_library_paths = _ExtractSharedLibsFromRuntimeDeps(
options.secondary_abi_shared_libraries_runtime_deps)
all_inputs.append(options.secondary_abi_shared_libraries_runtime_deps)
native_library_placeholder_paths = build_utils.ParseGnList(
options.native_lib_placeholders)
secondary_native_library_placeholder_paths = build_utils.ParseGnList(
options.secondary_native_lib_placeholders)
loadable_modules = build_utils.ParseGnList(options.loadable_modules)
secondary_abi_loadable_modules = build_utils.ParseGnList(
options.secondary_abi_loadable_modules)
config['native'] = {
'libraries':
library_paths,
'native_library_placeholders':
native_library_placeholder_paths,
'secondary_abi_libraries':
secondary_abi_library_paths,
'secondary_native_library_placeholders':
secondary_native_library_placeholder_paths,
'java_libraries_list':
java_libraries_list,
'uncompress_shared_libraries':
options.uncompress_shared_libraries,
'library_always_compress':
options.library_always_compress,
'library_renames':
options.library_renames,
'loadable_modules':
loadable_modules,
'secondary_abi_loadable_modules':
secondary_abi_loadable_modules,
}
config['assets'], config['uncompressed_assets'], locale_paks = (
_MergeAssets(deps.All('android_assets')))
deps_info['compressed_locales_java_list'] = _CreateJavaLocaleListFromAssets(
config['assets'], locale_paks)
deps_info[
'uncompressed_locales_java_list'] = _CreateJavaLocaleListFromAssets(
config['uncompressed_assets'], locale_paks)
config['extra_android_manifests'] = []
for c in all_deps:
config['extra_android_manifests'].extend(
c.get('mergeable_android_manifests', []))
# Collect java resources
java_resources_jars = [d['java_resources_jar'] for d in all_library_deps
if 'java_resources_jar' in d]
if options.tested_apk_config:
tested_apk_resource_jars = [d['java_resources_jar']
for d in tested_apk_library_deps
if 'java_resources_jar' in d]
java_resources_jars = [jar for jar in java_resources_jars
if jar not in tested_apk_resource_jars]
config['java_resources_jars'] = java_resources_jars
if options.java_resources_jar_path:
deps_info['java_resources_jar'] = options.java_resources_jar_path
# DYNAMIC FEATURE MODULES:
# Make sure that dependencies that exist on the base module
# are not duplicated on the feature module.
if base_module_build_config:
base = base_module_build_config
RemoveObjDups(config, base, 'deps_info', 'device_classpath')
RemoveObjDups(config, base, 'deps_info', 'javac_full_classpath')
RemoveObjDups(config, base, 'deps_info', 'javac_full_interface_classpath')
RemoveObjDups(config, base, 'deps_info', 'jetified_full_jar_classpath')
RemoveObjDups(config, base, 'deps_info', 'jni', 'all_source')
RemoveObjDups(config, base, 'final_dex', 'all_dex_files')
RemoveObjDups(config, base, 'extra_android_manifests')
if is_java_target:
jar_to_target = {}
_AddJarMapping(jar_to_target, [deps_info])
_AddJarMapping(jar_to_target, all_deps)
if base_module_build_config:
_AddJarMapping(jar_to_target, [base_module_build_config['deps_info']])
if options.tested_apk_config:
_AddJarMapping(jar_to_target, [tested_apk_config])
for jar, target in itertools.izip(
tested_apk_config['javac_full_classpath'],
tested_apk_config['javac_full_classpath_targets']):
jar_to_target[jar] = target
# Used by bytecode_processor to give better error message when missing
# deps are found.
config['deps_info']['javac_full_classpath_targets'] = [
jar_to_target[x] for x in deps_info['javac_full_classpath']
]
build_utils.WriteJson(config, options.build_config, only_if_changed=True)
if options.depfile:
build_utils.WriteDepfile(options.depfile, options.build_config, all_inputs)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 41.784324 | 80 | 0.722824 |
from __future__ import print_function
import collections
import itertools
import json
import optparse
import os
import sys
import xml.dom.minidom
from util import build_utils
from util import resource_utils
_ROOT_TYPES = ('android_apk', 'java_binary', 'java_annotation_processor',
'junit_binary', 'android_app_bundle')
_RESOURCE_TYPES = ('android_assets', 'android_resources', 'system_java_library')
def _ExtractMarkdownDocumentation(input_text):
in_markdown = False
result = []
for line in input_text.splitlines():
if in_markdown:
if '-- END_MARKDOWN --' in line:
in_markdown = False
else:
result.append(line)
else:
if '-- BEGIN_MARKDOWN --' in line:
in_markdown = True
return result
class AndroidManifest(object):
def __init__(self, path):
self.path = path
dom = xml.dom.minidom.parse(path)
manifests = dom.getElementsByTagName('manifest')
assert len(manifests) == 1
self.manifest = manifests[0]
def GetInstrumentationElements(self):
instrumentation_els = self.manifest.getElementsByTagName('instrumentation')
if len(instrumentation_els) == 0:
return None
return instrumentation_els
def CheckInstrumentationElements(self, expected_package):
instrs = self.GetInstrumentationElements()
if not instrs:
raise Exception('No <instrumentation> elements found in %s' % self.path)
for instr in instrs:
instrumented_package = instr.getAttributeNS(
'http://schemas.android.com/apk/res/android', 'targetPackage')
if instrumented_package != expected_package:
raise Exception(
'Wrong instrumented package. Expected %s, got %s'
% (expected_package, instrumented_package))
def GetPackageName(self):
return self.manifest.getAttribute('package')
dep_config_cache = {}
def GetDepConfig(path):
if not path in dep_config_cache:
with open(path) as jsonfile:
dep_config_cache[path] = json.load(jsonfile)['deps_info']
return dep_config_cache[path]
def DepsOfType(wanted_type, configs):
return [c for c in configs if c['type'] == wanted_type]
def GetAllDepsConfigsInOrder(deps_config_paths):
def GetDeps(path):
return GetDepConfig(path)['deps_configs']
return build_utils.GetSortedTransitiveDependencies(deps_config_paths, GetDeps)
def GetObjectByPath(obj, key_path):
return GetObjectByPath(obj[key_path[0]], key_path[1:]) if key_path else obj
def RemoveObjDups(obj, base, *key_path):
base_target = set(GetObjectByPath(base, key_path))
target = GetObjectByPath(obj, key_path)
target[:] = [x for x in target if x not in base_target]
class Deps(object):
def __init__(self, direct_deps_config_paths):
self._all_deps_config_paths = GetAllDepsConfigsInOrder(
direct_deps_config_paths)
self._direct_deps_configs = [
GetDepConfig(p) for p in direct_deps_config_paths
]
self._all_deps_configs = [
GetDepConfig(p) for p in self._all_deps_config_paths
]
self._direct_deps_config_paths = direct_deps_config_paths
def All(self, wanted_type=None):
if wanted_type is None:
return self._all_deps_configs
return DepsOfType(wanted_type, self._all_deps_configs)
def Direct(self, wanted_type=None):
if wanted_type is None:
return self._direct_deps_configs
return DepsOfType(wanted_type, self._direct_deps_configs)
def AllConfigPaths(self):
return self._all_deps_config_paths
def RemoveNonDirectDep(self, path):
if path in self._direct_deps_config_paths:
raise Exception('Cannot remove direct dep.')
self._all_deps_config_paths.remove(path)
self._all_deps_configs.remove(GetDepConfig(path))
def GradlePrebuiltJarPaths(self):
ret = []
def helper(cur):
for config in cur.Direct('java_library'):
if config['is_prebuilt'] or config['gradle_treat_as_prebuilt']:
if config['unprocessed_jar_path'] not in ret:
ret.append(config['unprocessed_jar_path'])
helper(self)
return ret
def GradleLibraryProjectDeps(self):
ret = []
def helper(cur):
for config in cur.Direct('java_library'):
if config['is_prebuilt']:
pass
elif config['gradle_treat_as_prebuilt']:
helper(Deps(config['deps_configs']))
elif config not in ret:
ret.append(config)
helper(self)
return ret
def _MergeAssets(all_assets):
compressed = {}
uncompressed = {}
locale_paks = set()
for asset_dep in all_assets:
entry = asset_dep['assets']
disable_compression = entry.get('disable_compression')
treat_as_locale_paks = entry.get('treat_as_locale_paks')
dest_map = uncompressed if disable_compression else compressed
other_map = compressed if disable_compression else uncompressed
outputs = entry.get('outputs', [])
for src, dest in itertools.izip_longest(entry['sources'], outputs):
if not dest:
dest = os.path.basename(src)
other_map.pop(dest, 0)
dest_map[dest] = src
if treat_as_locale_paks:
locale_paks.add(dest)
def create_list(asset_map):
ret = ['%s:%s' % (src, dest) for dest, src in asset_map.iteritems()]
ret.sort()
return ret
return create_list(compressed), create_list(uncompressed), locale_paks
def _ResolveGroups(configs):
ret = list(configs)
while True:
groups = DepsOfType('group', ret)
if not groups:
return ret
for config in groups:
index = ret.index(config)
expanded_configs = [GetDepConfig(p) for p in config['deps_configs']]
ret[index:index + 1] = expanded_configs
def _DepsFromPaths(dep_paths,
target_type,
filter_root_targets=True,
recursive_resource_deps=False):
blocklist = []
allowlist = []
if filter_root_targets:
blocklist.extend(_ROOT_TYPES)
# Don't allow java libraries to cross through assets/resources.
if target_type in _RESOURCE_TYPES:
allowlist.extend(_RESOURCE_TYPES)
if recursive_resource_deps:
dep_paths = GetAllDepsConfigsInOrder(dep_paths)
return _DepsFromPathsWithFilters(dep_paths, blocklist, allowlist)
def _DepsFromPathsWithFilters(dep_paths, blocklist=None, allowlist=None):
configs = [GetDepConfig(p) for p in dep_paths]
groups = DepsOfType('group', configs)
configs = _ResolveGroups(configs)
configs += groups
if blocklist:
configs = [c for c in configs if c['type'] not in blocklist]
if allowlist:
configs = [c for c in configs if c['type'] in allowlist]
return Deps([c['path'] for c in configs])
def _ExtractSharedLibsFromRuntimeDeps(runtime_deps_file):
ret = []
with open(runtime_deps_file) as f:
for line in f:
line = line.rstrip()
if not line.endswith('.so'):
continue
ret.append(os.path.normpath(line.replace('lib.unstripped/', '')))
ret.reverse()
return ret
def _CreateJavaLibrariesList(library_paths):
names = ['"%s"' % os.path.basename(s)[3:-3] for s in library_paths]
return ('{%s}' % ','.join(sorted(set(names))))
def _CreateJavaLocaleListFromAssets(assets, locale_paks):
assets_paths = [a.split(':')[1] for a in assets]
locales = [os.path.basename(a)[:-4] for a in assets_paths if a in locale_paks]
return '{%s}' % ','.join(['"%s"' % l for l in sorted(locales)])
def _AddJarMapping(jar_to_target, configs):
for config in configs:
jar = config.get('unprocessed_jar_path')
if jar:
jar_to_target[jar] = config['gn_target']
for jar in config.get('extra_classpath_jars', []):
jar_to_target[jar] = config['gn_target']
def main(argv):
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--build-config', help='Path to build_config output.')
parser.add_option(
'--type',
help='Type of this target (e.g. android_library).')
parser.add_option('--gn-target', help='GN label for this target')
parser.add_option(
'--deps-configs',
help='GN-list of dependent build_config files.')
parser.add_option(
'--annotation-processor-configs',
help='GN-list of build_config files for annotation processors.')
parser.add_option('--srcjar', help='Path to target\'s resources srcjar.')
parser.add_option('--resources-zip', help='Path to target\'s resources zip.')
parser.add_option('--package-name',
help='Java package name for these resources.')
parser.add_option('--android-manifest', help='Path to android manifest.')
parser.add_option('--resource-dirs', action='append', default=[],
help='GYP-list of resource dirs')
parser.add_option(
'--res-sources-path',
help='Path to file containing a list of paths to resources.')
parser.add_option(
'--resource-overlay',
action='store_true',
help='Whether resources passed in via --resources-zip should override '
'resources with the same name')
parser.add_option(
'--recursive-resource-deps',
action='store_true',
help='Whether deps should be walked recursively to find resource deps.')
parser.add_option('--asset-sources', help='List of asset sources.')
parser.add_option('--asset-renaming-sources',
help='List of asset sources with custom destinations.')
parser.add_option('--asset-renaming-destinations',
help='List of asset custom destinations.')
parser.add_option('--disable-asset-compression', action='store_true',
help='Whether to disable asset compression.')
parser.add_option('--treat-as-locale-paks', action='store_true',
help='Consider the assets as locale paks in BuildConfig.java')
parser.add_option('--device-jar-path', help='Path to .jar for dexing.')
parser.add_option('--host-jar-path', help='Path to .jar for java_binary.')
parser.add_option('--unprocessed-jar-path',
help='Path to the .jar to use for javac classpath purposes.')
parser.add_option(
'--interface-jar-path',
help='Path to the interface .jar to use for javac classpath purposes.')
parser.add_option(
'--jetified-jar-path',
help='Path to the jetified.jar to use for javac classpath purposes.')
parser.add_option('--is-prebuilt', action='store_true',
help='Whether the jar was compiled or pre-compiled.')
parser.add_option('--java-sources-file', help='Path to .sources file')
parser.add_option('--bundled-srcjars',
help='GYP-list of .srcjars that have been included in this java_library.')
parser.add_option('--supports-android', action='store_true',
help='Whether this library supports running on the Android platform.')
parser.add_option('--requires-android', action='store_true',
help='Whether this library requires running on the Android platform.')
parser.add_option('--bypass-platform-checks', action='store_true',
help='Bypass checks for support/require Android platform.')
parser.add_option('--extra-classpath-jars',
help='GYP-list of .jar files to include on the classpath when compiling, '
'but not to include in the final binary.')
parser.add_option(
'--mergeable-android-manifests',
help='GN-list of AndroidManifest.xml to include in manifest merging.')
parser.add_option('--gradle-treat-as-prebuilt', action='store_true',
help='Whether this library should be treated as a prebuilt library by '
'generate_gradle.py.')
parser.add_option('--main-class',
help='Main class for java_binary or java_annotation_processor targets.')
parser.add_option('--java-resources-jar-path',
help='Path to JAR that contains java resources. Everything '
'from this JAR except meta-inf/ content and .class files '
'will be added to the final APK.')
parser.add_option(
'--non-chromium-code',
action='store_true',
help='True if a java library is not chromium code, used for lint.')
parser.add_option('--dex-path', help='Path to target\'s dex output.')
# native library options
parser.add_option('--shared-libraries-runtime-deps',
help='Path to file containing runtime deps for shared '
'libraries.')
parser.add_option(
'--loadable-modules',
action='append',
help='GN-list of native libraries for primary '
'android-abi. Can be specified multiple times.',
default=[])
parser.add_option('--secondary-abi-shared-libraries-runtime-deps',
help='Path to file containing runtime deps for secondary '
'abi shared libraries.')
parser.add_option(
'--secondary-abi-loadable-modules',
action='append',
help='GN-list of native libraries for secondary '
'android-abi. Can be specified multiple times.',
default=[])
parser.add_option(
'--native-lib-placeholders',
action='append',
help='GN-list of native library placeholders to add.',
default=[])
parser.add_option(
'--secondary-native-lib-placeholders',
action='append',
help='GN-list of native library placeholders to add '
'for the secondary android-abi.',
default=[])
parser.add_option('--uncompress-shared-libraries', default=False,
action='store_true',
help='Whether to store native libraries uncompressed')
parser.add_option(
'--library-always-compress',
help='The list of library files that we always compress.')
parser.add_option(
'--library-renames',
default=[],
help='The list of library files that we prepend crazy. to their names.')
# apk options
parser.add_option('--apk-path', help='Path to the target\'s apk output.')
parser.add_option('--incremental-apk-path',
help="Path to the target's incremental apk output.")
parser.add_option('--incremental-install-json-path',
help="Path to the target's generated incremental install "
"json.")
parser.add_option(
'--tested-apk-config',
help='Path to the build config of the tested apk (for an instrumentation '
'test apk).')
parser.add_option(
'--proguard-enabled',
action='store_true',
help='Whether proguard is enabled for this apk or bundle module.')
parser.add_option(
'--proguard-configs',
help='GN-list of proguard flag files to use in final apk.')
parser.add_option(
'--proguard-mapping-path', help='Path to jar created by ProGuard step')
parser.add_option(
'--static-library-dependent-configs',
help='GN list of .build_configs of targets that use this target as a '
'static library.')
parser.add_option('--r-text-path', help='Path to target\'s R.txt file.')
parser.add_option('--fail',
help='GN-list of error message lines to fail with.')
parser.add_option('--final-dex-path',
help='Path to final input classes.dex (or classes.zip) to '
'use in final apk.')
parser.add_option('--res-size-info', help='Path to .ap_.info')
parser.add_option('--apk-proto-resources',
help='Path to resources compiled in protocol buffer format '
' for this apk.')
parser.add_option(
'--module-pathmap-path',
help='Path to pathmap file for resource paths in a bundle module.')
parser.add_option(
'--base-allowlist-rtxt-path',
help='Path to R.txt file for the base resources allowlist.')
parser.add_option(
'--is-base-module',
action='store_true',
help='Specifies that this module is a base module for some app bundle.')
parser.add_option('--generate-markdown-format-doc', action='store_true',
help='Dump the Markdown .build_config format documentation '
'then exit immediately.')
parser.add_option(
'--base-module-build-config',
help='Path to the base module\'s build config '
'if this is a feature module.')
parser.add_option(
'--module-build-configs',
help='For bundles, the paths of all non-async module .build_configs '
'for modules that are part of the bundle.')
parser.add_option('--version-name', help='Version name for this APK.')
parser.add_option('--version-code', help='Version code for this APK.')
options, args = parser.parse_args(argv)
if args:
parser.error('No positional arguments should be given.')
if options.generate_markdown_format_doc:
doc_lines = _ExtractMarkdownDocumentation(__doc__)
for line in doc_lines:
print(line)
return 0
if options.fail:
parser.error('\n'.join(build_utils.ParseGnList(options.fail)))
lib_options = ['unprocessed_jar_path', 'interface_jar_path']
device_lib_options = ['device_jar_path', 'dex_path']
required_options_map = {
'android_apk': ['build_config'] + lib_options + device_lib_options,
'android_app_bundle_module':
['build_config', 'final_dex_path', 'res_size_info'] + lib_options +
device_lib_options,
'android_assets': ['build_config'],
'android_resources': ['build_config', 'resources_zip'],
'dist_aar': ['build_config'],
'dist_jar': ['build_config'],
'group': ['build_config'],
'java_annotation_processor': ['build_config', 'main_class'],
'java_binary': ['build_config'],
'java_library': ['build_config', 'host_jar_path'] + lib_options,
'junit_binary': ['build_config'],
'system_java_library': ['build_config', 'unprocessed_jar_path'],
'android_app_bundle': ['build_config', 'module_build_configs'],
}
required_options = required_options_map.get(options.type)
if not required_options:
raise Exception('Unknown type: <%s>' % options.type)
build_utils.CheckOptions(options, parser, required_options)
if options.type != 'android_app_bundle_module':
if options.apk_proto_resources:
raise Exception('--apk-proto-resources can only be used with '
'--type=android_app_bundle_module')
if options.module_pathmap_path:
raise Exception('--module-pathmap-path can only be used with '
'--type=android_app_bundle_module')
if options.base_allowlist_rtxt_path:
raise Exception('--base-allowlist-rtxt-path can only be used with '
'--type=android_app_bundle_module')
if options.is_base_module:
raise Exception('--is-base-module can only be used with '
'--type=android_app_bundle_module')
is_apk_or_module_target = options.type in ('android_apk',
'android_app_bundle_module')
if not is_apk_or_module_target:
if options.uncompress_shared_libraries:
raise Exception('--uncompressed-shared-libraries can only be used '
'with --type=android_apk or '
'--type=android_app_bundle_module')
if options.library_always_compress:
raise Exception(
'--library-always-compress can only be used with --type=android_apk '
'or --type=android_app_bundle_module')
if options.library_renames:
raise Exception(
'--library-renames can only be used with --type=android_apk or '
'--type=android_app_bundle_module')
if options.device_jar_path and not options.dex_path:
raise Exception('java_library that supports Android requires a dex path.')
if any(getattr(options, x) for x in lib_options):
for attr in lib_options:
if not getattr(options, attr):
raise('Expected %s to be set.' % attr)
if options.requires_android and not options.supports_android:
raise Exception(
'--supports-android is required when using --requires-android')
is_java_target = options.type in (
'java_binary', 'junit_binary', 'java_annotation_processor',
'java_library', 'android_apk', 'dist_aar', 'dist_jar',
'system_java_library', 'android_app_bundle_module')
is_static_library_dex_provider_target = (
options.static_library_dependent_configs and options.proguard_enabled)
if is_static_library_dex_provider_target:
if options.type != 'android_apk':
raise Exception(
'--static-library-dependent-configs only supports --type=android_apk')
options.static_library_dependent_configs = build_utils.ParseGnList(
options.static_library_dependent_configs)
static_library_dependent_configs_by_path = {
p: GetDepConfig(p)
for p in options.static_library_dependent_configs
}
deps_configs_paths = build_utils.ParseGnList(options.deps_configs)
deps = _DepsFromPaths(deps_configs_paths,
options.type,
recursive_resource_deps=options.recursive_resource_deps)
processor_deps = _DepsFromPaths(
build_utils.ParseGnList(options.annotation_processor_configs or ''),
options.type, filter_root_targets=False)
all_inputs = sorted(
set(deps.AllConfigPaths() + processor_deps.AllConfigPaths() +
list(static_library_dependent_configs_by_path)))
direct_deps = deps.Direct()
system_library_deps = deps.Direct('system_java_library')
direct_library_deps = deps.Direct('java_library')
all_deps = deps.All()
all_library_deps = deps.All('java_library')
all_resources_deps = deps.All('android_resources')
if options.type == 'java_library':
java_library_deps = _DepsFromPathsWithFilters(
deps_configs_paths, allowlist=['android_resources'])
all_resources_deps = java_library_deps.All('android_resources')
base_module_build_config = None
if options.base_module_build_config:
with open(options.base_module_build_config, 'r') as f:
base_module_build_config = json.load(f)
all_inputs.append(options.base_module_build_config)
config = {
'deps_info': {
'name': os.path.basename(options.build_config),
'path': options.build_config,
'type': options.type,
'gn_target': options.gn_target,
'deps_configs': [d['path'] for d in direct_deps],
'chromium_code': not options.non_chromium_code,
},
'gradle': {}
}
deps_info = config['deps_info']
gradle = config['gradle']
if options.type == 'android_apk' and options.tested_apk_config:
tested_apk_deps = Deps([options.tested_apk_config])
tested_apk_config = tested_apk_deps.Direct()[0]
gradle['apk_under_test'] = tested_apk_config['name']
if options.type == 'android_app_bundle_module':
deps_info['is_base_module'] = bool(options.is_base_module)
if options.type == 'java_library':
deps_info['is_prebuilt'] = bool(options.is_prebuilt)
deps_info['gradle_treat_as_prebuilt'] = options.gradle_treat_as_prebuilt
if options.android_manifest:
deps_info['android_manifest'] = options.android_manifest
if options.bundled_srcjars:
deps_info['bundled_srcjars'] = build_utils.ParseGnList(
options.bundled_srcjars)
if options.java_sources_file:
deps_info['java_sources_file'] = options.java_sources_file
if is_java_target:
if options.bundled_srcjars:
gradle['bundled_srcjars'] = deps_info['bundled_srcjars']
gradle['dependent_android_projects'] = []
gradle['dependent_java_projects'] = []
gradle['dependent_prebuilt_jars'] = deps.GradlePrebuiltJarPaths()
if options.main_class:
deps_info['main_class'] = options.main_class
for c in deps.GradleLibraryProjectDeps():
if c['requires_android']:
gradle['dependent_android_projects'].append(c['path'])
else:
gradle['dependent_java_projects'].append(c['path'])
if options.r_text_path:
deps_info['r_text_path'] = options.r_text_path
if is_apk_or_module_target or options.type in (
'group', 'java_library', 'junit_binary'):
deps_info['jni'] = {}
all_java_sources = [c['java_sources_file'] for c in all_library_deps
if 'java_sources_file' in c]
if options.java_sources_file:
all_java_sources.append(options.java_sources_file)
if options.apk_proto_resources:
deps_info['proto_resources_path'] = options.apk_proto_resources
deps_info['version_name'] = options.version_name
deps_info['version_code'] = options.version_code
if options.module_pathmap_path:
deps_info['module_pathmap_path'] = options.module_pathmap_path
else:
deps_info['module_pathmap_path'] = ''
if options.base_allowlist_rtxt_path:
deps_info['base_allowlist_rtxt_path'] = options.base_allowlist_rtxt_path
else:
deps_info['base_allowlist_rtxt_path'] = ''
if is_java_target:
deps_info['requires_android'] = bool(options.requires_android)
deps_info['supports_android'] = bool(options.supports_android)
if not options.bypass_platform_checks:
deps_require_android = (all_resources_deps +
[d['name'] for d in all_library_deps if d['requires_android']])
deps_not_support_android = (
[d['name'] for d in all_library_deps if not d['supports_android']])
if deps_require_android and not options.requires_android:
raise Exception('Some deps require building for the Android platform: '
+ str(deps_require_android))
if deps_not_support_android and options.supports_android:
raise Exception('Not all deps support the Android platform: '
+ str(deps_not_support_android))
if is_apk_or_module_target or options.type == 'dist_jar':
all_dex_files = [c['dex_path'] for c in all_library_deps]
if is_java_target:
# Classpath values filled in below (after applying tested_apk_config).
config['javac'] = {}
if options.unprocessed_jar_path:
deps_info['unprocessed_jar_path'] = options.unprocessed_jar_path
deps_info['interface_jar_path'] = options.interface_jar_path
if options.device_jar_path:
deps_info['device_jar_path'] = options.device_jar_path
if options.host_jar_path:
deps_info['host_jar_path'] = options.host_jar_path
deps_info['jetified_jar_path'] = (options.jetified_jar_path
or options.interface_jar_path)
if options.dex_path:
deps_info['dex_path'] = options.dex_path
if is_apk_or_module_target:
all_dex_files.append(options.dex_path)
if options.type == 'android_apk':
deps_info['apk_path'] = options.apk_path
deps_info['incremental_apk_path'] = options.incremental_apk_path
deps_info['incremental_install_json_path'] = (
options.incremental_install_json_path)
if options.type == 'android_assets':
all_asset_sources = []
if options.asset_renaming_sources:
all_asset_sources.extend(
build_utils.ParseGnList(options.asset_renaming_sources))
if options.asset_sources:
all_asset_sources.extend(build_utils.ParseGnList(options.asset_sources))
deps_info['assets'] = {
'sources': all_asset_sources
}
if options.asset_renaming_destinations:
deps_info['assets']['outputs'] = (
build_utils.ParseGnList(options.asset_renaming_destinations))
if options.disable_asset_compression:
deps_info['assets']['disable_compression'] = True
if options.treat_as_locale_paks:
deps_info['assets']['treat_as_locale_paks'] = True
if options.type == 'android_resources':
deps_info['resources_zip'] = options.resources_zip
if options.resource_overlay:
deps_info['resource_overlay'] = True
if options.srcjar:
deps_info['srcjar'] = options.srcjar
if options.android_manifest:
manifest = AndroidManifest(options.android_manifest)
deps_info['package_name'] = manifest.GetPackageName()
if options.package_name:
deps_info['package_name'] = options.package_name
deps_info['res_sources_path'] = ''
if options.res_sources_path:
deps_info['res_sources_path'] = options.res_sources_path
if options.requires_android and is_java_target:
owned_resource_srcjars = set()
for c in all_resources_deps:
srcjar = c.get('srcjar')
if srcjar:
owned_resource_srcjars.add(srcjar)
for c in all_library_deps:
if c['requires_android'] and not c['is_prebuilt']:
# Many .aar files include R.class files in them, as it makes it easier
# for IDEs to resolve symbols. However, including them is not required
# and not all prebuilts do. Rather than try to detect their presense,
# just assume they are not there. The only consequence is redundant
# compilation of the R.class.
owned_resource_srcjars.difference_update(c['owned_resource_srcjars'])
deps_info['owned_resource_srcjars'] = sorted(owned_resource_srcjars)
if options.type == 'java_library':
# Used to strip out R.class for android_prebuilt()s.
config['javac']['resource_packages'] = [
c['package_name'] for c in all_resources_deps if 'package_name' in c
]
if options.package_name:
deps_info['package_name'] = options.package_name
if options.type in ('android_resources', 'android_apk', 'junit_binary',
'dist_aar', 'android_app_bundle_module', 'java_library'):
dependency_zips = []
dependency_zip_overlays = []
for c in all_resources_deps:
if not c['resources_zip']:
continue
dependency_zips.append(c['resources_zip'])
if c.get('resource_overlay'):
dependency_zip_overlays.append(c['resources_zip'])
extra_package_names = []
if options.type != 'android_resources':
extra_package_names = [
c['package_name'] for c in all_resources_deps if 'package_name' in c
]
# In final types (i.e. apks and modules) that create real R.java files,
# they must collect package names from java_libraries as well.
# https://crbug.com/1073476
if options.type != 'java_library':
extra_package_names.extend([
c['package_name'] for c in all_library_deps if 'package_name' in c
])
# For feature modules, remove any resources that already exist in the base
# module.
if base_module_build_config:
dependency_zips = [
c for c in dependency_zips
if c not in base_module_build_config['deps_info']['dependency_zips']
]
dependency_zip_overlays = [
c for c in dependency_zip_overlays if c not in
base_module_build_config['deps_info']['dependency_zip_overlays']
]
extra_package_names = [
c for c in extra_package_names if c not in
base_module_build_config['deps_info']['extra_package_names']
]
if options.type == 'android_apk' and options.tested_apk_config:
config['deps_info']['arsc_package_name'] = (
tested_apk_config['package_name'])
# We should not shadow the actual R.java files of the apk_under_test by
# creating new R.java files with the same package names in the tested apk.
extra_package_names = [
package for package in extra_package_names
if package not in tested_apk_config['extra_package_names']
]
if options.res_size_info:
config['deps_info']['res_size_info'] = options.res_size_info
config['deps_info']['dependency_zips'] = dependency_zips
config['deps_info']['dependency_zip_overlays'] = dependency_zip_overlays
config['deps_info']['extra_package_names'] = extra_package_names
# These are .jars to add to javac classpath but not to runtime classpath.
extra_classpath_jars = build_utils.ParseGnList(options.extra_classpath_jars)
if extra_classpath_jars:
deps_info['extra_classpath_jars'] = extra_classpath_jars
mergeable_android_manifests = build_utils.ParseGnList(
options.mergeable_android_manifests)
if mergeable_android_manifests:
deps_info['mergeable_android_manifests'] = mergeable_android_manifests
extra_proguard_classpath_jars = []
proguard_configs = build_utils.ParseGnList(options.proguard_configs)
if proguard_configs:
# Make a copy of |proguard_configs| since it's mutated below.
deps_info['proguard_configs'] = list(proguard_configs)
if options.type == 'dist_aar':
r_text_files = [
c['r_text_path'] for c in all_resources_deps + all_library_deps
if 'r_text_path' in c
]
deps_info['dependency_r_txt_files'] = r_text_files
if is_java_target:
javac_classpath = set(c['unprocessed_jar_path']
for c in direct_library_deps)
javac_interface_classpath = set(c['interface_jar_path']
for c in direct_library_deps)
javac_full_classpath = set(c['unprocessed_jar_path']
for c in all_library_deps)
javac_full_interface_classpath = set(c['interface_jar_path']
for c in all_library_deps)
jetified_full_jar_classpath = set(c['jetified_jar_path']
for c in all_library_deps)
if base_module_build_config:
javac_full_classpath.add(
base_module_build_config['deps_info']['unprocessed_jar_path'])
javac_full_interface_classpath.add(
base_module_build_config['deps_info']['interface_jar_path'])
jetified_full_jar_classpath.add(
base_module_build_config['deps_info']['jetified_jar_path'])
javac_interface_classpath.add(
base_module_build_config['deps_info']['interface_jar_path'])
for dep in direct_deps:
if 'extra_classpath_jars' in dep:
javac_classpath.update(dep['extra_classpath_jars'])
javac_interface_classpath.update(dep['extra_classpath_jars'])
for dep in all_deps:
if 'extra_classpath_jars' in dep:
javac_full_classpath.update(dep['extra_classpath_jars'])
javac_full_interface_classpath.update(dep['extra_classpath_jars'])
jetified_full_jar_classpath.update(dep['extra_classpath_jars'])
# TODO(agrieve): Might be less confusing to fold these into bootclasspath.
# Deps to add to the compile-time classpath (but not the runtime classpath).
# These are jars specified by input_jars_paths that almost never change.
# Just add them directly to all the classpaths.
if options.extra_classpath_jars:
javac_classpath.update(extra_classpath_jars)
javac_interface_classpath.update(extra_classpath_jars)
javac_full_classpath.update(extra_classpath_jars)
javac_full_interface_classpath.update(extra_classpath_jars)
jetified_full_jar_classpath.update(extra_classpath_jars)
if is_java_target or options.type == 'android_app_bundle':
# The classpath to use to run this target (or as an input to ProGuard).
device_classpath = []
if is_java_target and options.device_jar_path:
device_classpath.append(options.device_jar_path)
device_classpath.extend(
c.get('device_jar_path') for c in all_library_deps
if c.get('device_jar_path'))
if options.type == 'android_app_bundle':
for d in deps.Direct('android_app_bundle_module'):
device_classpath.extend(c for c in d.get('device_classpath', [])
if c not in device_classpath)
if options.type in ('dist_jar', 'java_binary', 'junit_binary'):
# The classpath to use to run this target.
host_classpath = []
if options.host_jar_path:
host_classpath.append(options.host_jar_path)
host_classpath.extend(c['host_jar_path'] for c in all_library_deps)
deps_info['host_classpath'] = host_classpath
# We allow lint to be run on android_apk targets, so we collect lint
# artifacts for them.
# We allow lint to be run on android_app_bundle targets, so we need to
# collect lint artifacts for the android_app_bundle_module targets that the
# bundle includes. Different android_app_bundle targets may include different
# android_app_bundle_module targets, so the bundle needs to be able to
# de-duplicate these lint artifacts.
if options.type in ('android_app_bundle_module', 'android_apk'):
# Collect all sources and resources at the apk/bundle_module level.
lint_srcjars = set()
lint_java_sources = set()
lint_resource_sources = set()
lint_resource_zips = set()
if options.java_sources_file:
lint_java_sources.add(options.java_sources_file)
if options.bundled_srcjars:
lint_srcjars.update(deps_info['bundled_srcjars'])
for c in all_library_deps:
if c['chromium_code'] and c['requires_android']:
if 'java_sources_file' in c:
lint_java_sources.add(c['java_sources_file'])
lint_srcjars.update(c['bundled_srcjars'])
if options.res_sources_path:
lint_resource_sources.add(options.res_sources_path)
if options.resources_zip:
lint_resource_zips.add(options.resources_zip)
for c in all_resources_deps:
if c['chromium_code']:
# Prefer res_sources_path to resources_zips so that lint errors have
# real paths and to avoid needing to extract during lint.
if c['res_sources_path']:
lint_resource_sources.add(c['res_sources_path'])
else:
lint_resource_zips.add(c['resources_zip'])
deps_info['lint_srcjars'] = sorted(lint_srcjars)
deps_info['lint_java_sources'] = sorted(lint_java_sources)
deps_info['lint_resource_sources'] = sorted(lint_resource_sources)
deps_info['lint_resource_zips'] = sorted(lint_resource_zips)
deps_info['lint_extra_android_manifests'] = []
if options.type == 'android_apk':
assert options.android_manifest, 'Android APKs must define a manifest'
deps_info['lint_android_manifest'] = options.android_manifest
if options.type == 'android_app_bundle':
module_configs = [
GetDepConfig(c)
for c in build_utils.ParseGnList(options.module_build_configs)
]
jni_all_source = set()
lint_srcjars = set()
lint_java_sources = set()
lint_resource_sources = set()
lint_resource_zips = set()
lint_extra_android_manifests = set()
for c in module_configs:
if c['is_base_module']:
assert 'base_module_config' not in deps_info, (
'Must have exactly 1 base module!')
deps_info['base_module_config'] = c['path']
# Use the base module's android manifest for linting.
deps_info['lint_android_manifest'] = c['android_manifest']
else:
lint_extra_android_manifests.add(c['android_manifest'])
jni_all_source.update(c['jni']['all_source'])
lint_srcjars.update(c['lint_srcjars'])
lint_java_sources.update(c['lint_java_sources'])
lint_resource_sources.update(c['lint_resource_sources'])
lint_resource_zips.update(c['lint_resource_zips'])
deps_info['jni'] = {'all_source': sorted(jni_all_source)}
deps_info['lint_srcjars'] = sorted(lint_srcjars)
deps_info['lint_java_sources'] = sorted(lint_java_sources)
deps_info['lint_resource_sources'] = sorted(lint_resource_sources)
deps_info['lint_resource_zips'] = sorted(lint_resource_zips)
deps_info['lint_extra_android_manifests'] = sorted(
lint_extra_android_manifests)
classpath_entries_by_owning_config = collections.defaultdict(list)
extra_main_r_text_files = []
if is_static_library_dex_provider_target:
configs_by_classpath_entry = collections.defaultdict(list)
static_lib_jar_paths = {}
for config_path, dep_config in (sorted(
static_library_dependent_configs_by_path.iteritems())):
base_config = dep_config
if dep_config['type'] == 'android_app_bundle':
base_config = GetDepConfig(dep_config['base_module_config'])
extra_main_r_text_files.append(base_config['r_text_path'])
static_lib_jar_paths[config_path] = base_config['device_jar_path']
proguard_configs.extend(dep_config['proguard_all_configs'])
extra_proguard_classpath_jars.extend(
dep_config['proguard_classpath_jars'])
all_java_sources.extend(base_config['jni']['all_source'])
for package in base_config['extra_package_names']:
if package not in extra_package_names:
extra_package_names.append(package)
for cp_entry in dep_config['device_classpath']:
configs_by_classpath_entry[cp_entry].append(config_path)
for cp_entry in device_classpath:
configs_by_classpath_entry[cp_entry].append(options.build_config)
for cp_entry, candidate_configs in configs_by_classpath_entry.iteritems():
config_path = (candidate_configs[0]
if len(candidate_configs) == 1 else options.build_config)
classpath_entries_by_owning_config[config_path].append(cp_entry)
device_classpath.append(cp_entry)
device_classpath = sorted(set(device_classpath))
deps_info['static_library_proguard_mapping_output_paths'] = sorted([
d['proguard_mapping_path']
for d in static_library_dependent_configs_by_path.itervalues()
])
deps_info['static_library_dependent_classpath_configs'] = {
path: sorted(set(classpath))
for path, classpath in classpath_entries_by_owning_config.iteritems()
}
deps_info['extra_main_r_text_files'] = sorted(extra_main_r_text_files)
if is_apk_or_module_target or options.type in ('group', 'java_library',
'junit_binary'):
deps_info['jni']['all_source'] = sorted(set(all_java_sources))
system_jars = [c['unprocessed_jar_path'] for c in system_library_deps]
system_interface_jars = [c['interface_jar_path'] for c in system_library_deps]
if system_library_deps:
config['android'] = {}
config['android']['sdk_interface_jars'] = system_interface_jars
config['android']['sdk_jars'] = system_jars
if options.type in ('android_apk', 'dist_aar',
'dist_jar', 'android_app_bundle_module', 'android_app_bundle'):
for c in all_deps:
proguard_configs.extend(c.get('proguard_configs', []))
extra_proguard_classpath_jars.extend(c.get('extra_classpath_jars', []))
if options.type == 'android_app_bundle':
for c in deps.Direct('android_app_bundle_module'):
proguard_configs.extend(p for p in c.get('proguard_configs', []))
if options.type == 'android_app_bundle':
for d in deps.Direct('android_app_bundle_module'):
extra_proguard_classpath_jars.extend(
c for c in d.get('proguard_classpath_jars', [])
if c not in extra_proguard_classpath_jars)
if options.type == 'android_app_bundle':
deps_proguard_enabled = []
deps_proguard_disabled = []
for d in deps.Direct('android_app_bundle_module'):
if not d['device_classpath']:
continue
if d['proguard_enabled']:
deps_proguard_enabled.append(d['name'])
else:
deps_proguard_disabled.append(d['name'])
if deps_proguard_enabled and deps_proguard_disabled:
raise Exception('Deps %s have proguard enabled while deps %s have '
'proguard disabled' % (deps_proguard_enabled,
deps_proguard_disabled))
else:
deps_info['proguard_enabled'] = bool(options.proguard_enabled)
if options.proguard_mapping_path:
deps_info['proguard_mapping_path'] = options.proguard_mapping_path
# The java code for an instrumentation test apk is assembled differently for
# ProGuard vs. non-ProGuard.
#
# Without ProGuard: Each library's jar is dexed separately and then combined
# a single .jar, which is then dexed into a classes.dex. A test apk includes
# all jar files from the program and the tests because having them separate
# doesn't work with ProGuard's whole-program optimizations. Although the
# apk-under-test still has all of its code in its classes.dex, none of it is
# used at runtime because the copy of it within the test apk takes precidence.
if options.type == 'android_apk' and options.tested_apk_config:
if tested_apk_config['proguard_enabled']:
assert options.proguard_enabled, ('proguard must be enabled for '
'instrumentation apks if it\'s enabled for the tested apk.')
proguard_configs.extend(
p for p in tested_apk_config['proguard_all_configs'])
extra_proguard_classpath_jars.extend(
p for p in tested_apk_config['proguard_classpath_jars'])
tested_apk_config = GetDepConfig(options.tested_apk_config)
deps_info['proguard_under_test_mapping'] = (
tested_apk_config['proguard_mapping_path'])
elif options.proguard_enabled:
# is not proguarded, but it's easy enough to support.
deps_info['proguard_under_test_mapping'] = ''
device_classpath_extended = list(device_classpath)
device_classpath_extended.extend(
p for p in tested_apk_config['device_classpath']
if p not in device_classpath)
# Include in the classpath classes that are added directly to the apk under
# test (those that are not a part of a java_library).
javac_classpath.add(tested_apk_config['unprocessed_jar_path'])
javac_interface_classpath.add(tested_apk_config['interface_jar_path'])
javac_full_classpath.add(tested_apk_config['unprocessed_jar_path'])
javac_full_interface_classpath.add(tested_apk_config['interface_jar_path'])
jetified_full_jar_classpath.add(tested_apk_config['interface_jar_path'])
javac_full_classpath.update(tested_apk_config['javac_full_classpath'])
javac_full_interface_classpath.update(
tested_apk_config['javac_full_interface_classpath'])
jetified_full_jar_classpath.update(
tested_apk_config['jetified_full_jar_classpath'])
# Exclude .jar files from the test apk that exist within the apk under test.
tested_apk_library_deps = tested_apk_deps.All('java_library')
tested_apk_dex_files = {c['dex_path'] for c in tested_apk_library_deps}
all_dex_files = [p for p in all_dex_files if p not in tested_apk_dex_files]
tested_apk_jar_files = set(tested_apk_config['device_classpath'])
device_classpath = [
p for p in device_classpath if p not in tested_apk_jar_files
]
if options.type in ('android_apk', 'dist_aar', 'dist_jar',
'android_app_bundle_module', 'android_app_bundle'):
deps_info['proguard_all_configs'] = sorted(set(proguard_configs))
deps_info['proguard_classpath_jars'] = sorted(
set(extra_proguard_classpath_jars))
# Dependencies for the final dex file of an apk.
if (is_apk_or_module_target or options.final_dex_path
or options.type == 'dist_jar'):
config['final_dex'] = {}
dex_config = config['final_dex']
dex_config['path'] = options.final_dex_path
if is_apk_or_module_target or options.type == 'dist_jar':
dex_config['all_dex_files'] = all_dex_files
if is_java_target:
config['javac']['classpath'] = sorted(javac_classpath)
config['javac']['interface_classpath'] = sorted(javac_interface_classpath)
# Direct() will be of type 'java_annotation_processor', and so not included
# in All('java_library').
# Annotation processors run as part of the build, so need host_jar_path.
config['javac']['processor_classpath'] = [
c['host_jar_path'] for c in processor_deps.Direct()
if c.get('host_jar_path')
]
config['javac']['processor_classpath'] += [
c['host_jar_path'] for c in processor_deps.All('java_library')
]
config['javac']['processor_classes'] = [
c['main_class'] for c in processor_deps.Direct()]
deps_info['javac_full_classpath'] = sorted(javac_full_classpath)
deps_info['javac_full_interface_classpath'] = sorted(
javac_full_interface_classpath)
deps_info['jetified_full_jar_classpath'] = sorted(
jetified_full_jar_classpath)
elif options.type == 'android_app_bundle':
# bundles require javac_full_classpath to create .aab.jar.info and require
# javac_full_interface_classpath for lint.
javac_full_classpath = set()
javac_full_interface_classpath = set()
for d in deps.Direct('android_app_bundle_module'):
javac_full_classpath.update(d['javac_full_classpath'])
javac_full_interface_classpath.update(d['javac_full_interface_classpath'])
javac_full_classpath.add(d['unprocessed_jar_path'])
javac_full_interface_classpath.add(d['interface_jar_path'])
deps_info['javac_full_classpath'] = sorted(javac_full_classpath)
deps_info['javac_full_interface_classpath'] = sorted(
javac_full_interface_classpath)
if options.type in ('android_apk', 'dist_jar', 'android_app_bundle_module',
'android_app_bundle'):
deps_info['device_classpath'] = device_classpath
if options.tested_apk_config:
deps_info['device_classpath_extended'] = device_classpath_extended
if options.type in ('android_apk', 'dist_jar'):
all_interface_jars = []
if options.interface_jar_path:
all_interface_jars.append(options.interface_jar_path)
all_interface_jars.extend(c['interface_jar_path'] for c in all_library_deps)
config['dist_jar'] = {
'all_interface_jars': all_interface_jars,
}
if is_apk_or_module_target:
manifest = AndroidManifest(options.android_manifest)
deps_info['package_name'] = manifest.GetPackageName()
if not options.tested_apk_config and manifest.GetInstrumentationElements():
# This must then have instrumentation only for itself.
manifest.CheckInstrumentationElements(manifest.GetPackageName())
library_paths = []
java_libraries_list = None
if options.shared_libraries_runtime_deps:
library_paths = _ExtractSharedLibsFromRuntimeDeps(
options.shared_libraries_runtime_deps)
java_libraries_list = _CreateJavaLibrariesList(library_paths)
all_inputs.append(options.shared_libraries_runtime_deps)
secondary_abi_library_paths = []
if options.secondary_abi_shared_libraries_runtime_deps:
secondary_abi_library_paths = _ExtractSharedLibsFromRuntimeDeps(
options.secondary_abi_shared_libraries_runtime_deps)
all_inputs.append(options.secondary_abi_shared_libraries_runtime_deps)
native_library_placeholder_paths = build_utils.ParseGnList(
options.native_lib_placeholders)
secondary_native_library_placeholder_paths = build_utils.ParseGnList(
options.secondary_native_lib_placeholders)
loadable_modules = build_utils.ParseGnList(options.loadable_modules)
secondary_abi_loadable_modules = build_utils.ParseGnList(
options.secondary_abi_loadable_modules)
config['native'] = {
'libraries':
library_paths,
'native_library_placeholders':
native_library_placeholder_paths,
'secondary_abi_libraries':
secondary_abi_library_paths,
'secondary_native_library_placeholders':
secondary_native_library_placeholder_paths,
'java_libraries_list':
java_libraries_list,
'uncompress_shared_libraries':
options.uncompress_shared_libraries,
'library_always_compress':
options.library_always_compress,
'library_renames':
options.library_renames,
'loadable_modules':
loadable_modules,
'secondary_abi_loadable_modules':
secondary_abi_loadable_modules,
}
config['assets'], config['uncompressed_assets'], locale_paks = (
_MergeAssets(deps.All('android_assets')))
deps_info['compressed_locales_java_list'] = _CreateJavaLocaleListFromAssets(
config['assets'], locale_paks)
deps_info[
'uncompressed_locales_java_list'] = _CreateJavaLocaleListFromAssets(
config['uncompressed_assets'], locale_paks)
config['extra_android_manifests'] = []
for c in all_deps:
config['extra_android_manifests'].extend(
c.get('mergeable_android_manifests', []))
# Collect java resources
java_resources_jars = [d['java_resources_jar'] for d in all_library_deps
if 'java_resources_jar' in d]
if options.tested_apk_config:
tested_apk_resource_jars = [d['java_resources_jar']
for d in tested_apk_library_deps
if 'java_resources_jar' in d]
java_resources_jars = [jar for jar in java_resources_jars
if jar not in tested_apk_resource_jars]
config['java_resources_jars'] = java_resources_jars
if options.java_resources_jar_path:
deps_info['java_resources_jar'] = options.java_resources_jar_path
# DYNAMIC FEATURE MODULES:
# Make sure that dependencies that exist on the base module
# are not duplicated on the feature module.
if base_module_build_config:
base = base_module_build_config
RemoveObjDups(config, base, 'deps_info', 'device_classpath')
RemoveObjDups(config, base, 'deps_info', 'javac_full_classpath')
RemoveObjDups(config, base, 'deps_info', 'javac_full_interface_classpath')
RemoveObjDups(config, base, 'deps_info', 'jetified_full_jar_classpath')
RemoveObjDups(config, base, 'deps_info', 'jni', 'all_source')
RemoveObjDups(config, base, 'final_dex', 'all_dex_files')
RemoveObjDups(config, base, 'extra_android_manifests')
if is_java_target:
jar_to_target = {}
_AddJarMapping(jar_to_target, [deps_info])
_AddJarMapping(jar_to_target, all_deps)
if base_module_build_config:
_AddJarMapping(jar_to_target, [base_module_build_config['deps_info']])
if options.tested_apk_config:
_AddJarMapping(jar_to_target, [tested_apk_config])
for jar, target in itertools.izip(
tested_apk_config['javac_full_classpath'],
tested_apk_config['javac_full_classpath_targets']):
jar_to_target[jar] = target
# Used by bytecode_processor to give better error message when missing
# deps are found.
config['deps_info']['javac_full_classpath_targets'] = [
jar_to_target[x] for x in deps_info['javac_full_classpath']
]
build_utils.WriteJson(config, options.build_config, only_if_changed=True)
if options.depfile:
build_utils.WriteDepfile(options.depfile, options.build_config, all_inputs)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| true | true |
1c386d25c75cabcae6810281f5a96424983e664d | 2,465 | py | Python | src/mbed_tools/cli/main.py | jainvikas8/mbed-tools | 55435a4c5a7184038e1fde8e7b1c4194fa75ab8a | [
"Apache-2.0"
] | null | null | null | src/mbed_tools/cli/main.py | jainvikas8/mbed-tools | 55435a4c5a7184038e1fde8e7b1c4194fa75ab8a | [
"Apache-2.0"
] | null | null | null | src/mbed_tools/cli/main.py | jainvikas8/mbed-tools | 55435a4c5a7184038e1fde8e7b1c4194fa75ab8a | [
"Apache-2.0"
] | null | null | null | #
# Copyright (C) 2020 Arm Mbed. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Main cli entry point."""
import logging
from pkg_resources import get_distribution
from typing import Union, Any
import click
from mbed_tools.lib.logging import set_log_level, MbedToolsHandler
from mbed_tools.cli.configure import configure
from mbed_tools.cli.list_connected_devices import list_connected_devices
from mbed_tools.cli.env_cli import cli as env_cli
from mbed_tools.cli.project_management import init, clone, checkout, libs
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
LOGGER = logging.getLogger(__name__)
class GroupWithExceptionHandling(click.Group):
"""A click.Group which handles ToolsErrors and logging."""
def invoke(self, context: click.Context) -> None:
"""Invoke the command group.
Args:
context: The current click context.
"""
# Use the context manager to ensure tools exceptions (expected behaviour) are shown as messages to the user,
# but all other exceptions (unexpected behaviour) are shown as errors.
with MbedToolsHandler(LOGGER, context.params["traceback"]):
super().invoke(context)
def print_version(context: click.Context, param: Union[click.Option, click.Parameter], value: bool) -> Any:
"""Print the version of mbed-tools."""
if not value or context.resilient_parsing:
return
version_string = get_distribution("mbed-tools").version
click.echo(version_string)
context.exit()
@click.group(cls=GroupWithExceptionHandling, context_settings=CONTEXT_SETTINGS)
@click.option(
"--version",
is_flag=True,
callback=print_version,
expose_value=False,
is_eager=True,
help="Display versions of all Mbed Tools packages.",
)
@click.option(
"-v",
"--verbose",
default=0,
count=True,
help="Set the verbosity level, enter multiple times to increase verbosity.",
)
@click.option("-t", "--traceback", is_flag=True, show_default=True, help="Show a traceback when an error is raised.")
def cli(verbose: int, traceback: bool) -> None:
"""Command line tool for interacting with Mbed OS."""
set_log_level(verbose)
cli.add_command(configure, "configure")
cli.add_command(list_connected_devices, "devices")
cli.add_command(env_cli, "env")
cli.add_command(init, "init")
cli.add_command(checkout, "checkout")
cli.add_command(clone, "clone")
cli.add_command(libs, "libs")
| 31.602564 | 117 | 0.726166 |
import logging
from pkg_resources import get_distribution
from typing import Union, Any
import click
from mbed_tools.lib.logging import set_log_level, MbedToolsHandler
from mbed_tools.cli.configure import configure
from mbed_tools.cli.list_connected_devices import list_connected_devices
from mbed_tools.cli.env_cli import cli as env_cli
from mbed_tools.cli.project_management import init, clone, checkout, libs
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
LOGGER = logging.getLogger(__name__)
class GroupWithExceptionHandling(click.Group):
def invoke(self, context: click.Context) -> None:
with MbedToolsHandler(LOGGER, context.params["traceback"]):
super().invoke(context)
def print_version(context: click.Context, param: Union[click.Option, click.Parameter], value: bool) -> Any:
if not value or context.resilient_parsing:
return
version_string = get_distribution("mbed-tools").version
click.echo(version_string)
context.exit()
@click.group(cls=GroupWithExceptionHandling, context_settings=CONTEXT_SETTINGS)
@click.option(
"--version",
is_flag=True,
callback=print_version,
expose_value=False,
is_eager=True,
help="Display versions of all Mbed Tools packages.",
)
@click.option(
"-v",
"--verbose",
default=0,
count=True,
help="Set the verbosity level, enter multiple times to increase verbosity.",
)
@click.option("-t", "--traceback", is_flag=True, show_default=True, help="Show a traceback when an error is raised.")
def cli(verbose: int, traceback: bool) -> None:
set_log_level(verbose)
cli.add_command(configure, "configure")
cli.add_command(list_connected_devices, "devices")
cli.add_command(env_cli, "env")
cli.add_command(init, "init")
cli.add_command(checkout, "checkout")
cli.add_command(clone, "clone")
cli.add_command(libs, "libs")
| true | true |
1c386e6e932f1a3a526952a0150d184afbe6588e | 3,630 | py | Python | tests/test_utils.py | pynista/telegrambotapiwrapper | 4310882a1a7db94f5256b010ff8a3103b405dc0d | [
"MIT"
] | 1 | 2021-05-10T06:49:52.000Z | 2021-05-10T06:49:52.000Z | tests/test_utils.py | pynista/telegrambotapiwrapper | 4310882a1a7db94f5256b010ff8a3103b405dc0d | [
"MIT"
] | null | null | null | tests/test_utils.py | pynista/telegrambotapiwrapper | 4310882a1a7db94f5256b010ff8a3103b405dc0d | [
"MIT"
] | null | null | null | import unittest
from telegrambotapiwrapper.request import replace__from___to__from
from telegrambotapiwrapper.response import is_str_int_float_bool
from telegrambotapiwrapper.response import replace__from__by__from_
def is_ends_with_underscore(value: str):
"""Does value end with underscore."""
if value == "":
return False
else:
return value[-1] == '_'
class TestUtils(unittest.TestCase):
def test_is_str_int_float_bool(self):
self.assertTrue(is_str_int_float_bool("assaads"))
self.assertTrue(is_str_int_float_bool(123))
self.assertTrue(is_str_int_float_bool(23432.45435))
self.assertTrue(is_str_int_float_bool(True))
class A:
pass
self.assertFalse(is_str_int_float_bool(A()))
def test_is_ends_with_underscore(self):
self.assertTrue(is_ends_with_underscore("asedsaads_"))
self.assertTrue(is_ends_with_underscore("_aessaads_"))
self.assertFalse(is_ends_with_underscore("_aseswsdwc"))
self.assertFalse(is_ends_with_underscore(""))
def test_replace_from_word(self):
"""from -> from_"""
without_from_ = {
'chat': {
'all_members_are_administrators': True,
'first_name': 'regfrefre',
'id': 1234,
'pinned_message': {
'chat': {
'id': 12344332534,
'title': 'fjhdkjfhskdlhsj',
'type': 'sddsfdsf',
'from': False
},
'date': 12435324,
'message_id': 123214234
},
'title': 'fvgfgfd',
'type': 'sdfdsfds',
'username': 'regfrefre'
},
'date': 4584979847685478,
'from': {
'first_name': '23fdvfvdsc',
'id': 1232343,
'is_bot': False,
'last_name': 'gjfdkjglkfjglkjf'
},
'message_id': 12312321
}
res = replace__from__by__from_(without_from_)
self.assertIn("from_", res['chat']['pinned_message']['chat'])
self.assertNotIn("from", res['chat']['pinned_message']['chat'])
self.assertIn("from_", res)
self.assertNotIn("from", res)
def test_replace_from__word(self):
"""from_ -> from"""
with_from_ = {
'chat': {
'all_members_are_administrators': True,
'first_name': 'regfrefre',
'id': 1234,
'pinned_message': {
'chat': {
'id': 12344332534,
'title': 'fjhdkjfhskdlhsj',
'type': 'sddsfdsf',
'from_': False
},
'date': 12435324,
'message_id': 123214234
},
'title': 'fvgfgfd',
'type': 'sdfdsfds',
'username': 'regfrefre'
},
'date': 4584979847685478,
'from_': {
'first_name': '23fdvfvdsc',
'id': 1232343,
'is_bot': False,
'last_name': 'gjfdkjglkfjglkjf'
},
'message_id': 12312321
}
res = replace__from___to__from(with_from_)
self.assertIn("from", res['chat']['pinned_message']['chat'])
self.assertNotIn("from_", res['chat']['pinned_message']['chat'])
self.assertIn("from", res)
self.assertNotIn("from_", res)
| 33.611111 | 72 | 0.50854 | import unittest
from telegrambotapiwrapper.request import replace__from___to__from
from telegrambotapiwrapper.response import is_str_int_float_bool
from telegrambotapiwrapper.response import replace__from__by__from_
def is_ends_with_underscore(value: str):
if value == "":
return False
else:
return value[-1] == '_'
class TestUtils(unittest.TestCase):
def test_is_str_int_float_bool(self):
self.assertTrue(is_str_int_float_bool("assaads"))
self.assertTrue(is_str_int_float_bool(123))
self.assertTrue(is_str_int_float_bool(23432.45435))
self.assertTrue(is_str_int_float_bool(True))
class A:
pass
self.assertFalse(is_str_int_float_bool(A()))
def test_is_ends_with_underscore(self):
self.assertTrue(is_ends_with_underscore("asedsaads_"))
self.assertTrue(is_ends_with_underscore("_aessaads_"))
self.assertFalse(is_ends_with_underscore("_aseswsdwc"))
self.assertFalse(is_ends_with_underscore(""))
def test_replace_from_word(self):
without_from_ = {
'chat': {
'all_members_are_administrators': True,
'first_name': 'regfrefre',
'id': 1234,
'pinned_message': {
'chat': {
'id': 12344332534,
'title': 'fjhdkjfhskdlhsj',
'type': 'sddsfdsf',
'from': False
},
'date': 12435324,
'message_id': 123214234
},
'title': 'fvgfgfd',
'type': 'sdfdsfds',
'username': 'regfrefre'
},
'date': 4584979847685478,
'from': {
'first_name': '23fdvfvdsc',
'id': 1232343,
'is_bot': False,
'last_name': 'gjfdkjglkfjglkjf'
},
'message_id': 12312321
}
res = replace__from__by__from_(without_from_)
self.assertIn("from_", res['chat']['pinned_message']['chat'])
self.assertNotIn("from", res['chat']['pinned_message']['chat'])
self.assertIn("from_", res)
self.assertNotIn("from", res)
def test_replace_from__word(self):
with_from_ = {
'chat': {
'all_members_are_administrators': True,
'first_name': 'regfrefre',
'id': 1234,
'pinned_message': {
'chat': {
'id': 12344332534,
'title': 'fjhdkjfhskdlhsj',
'type': 'sddsfdsf',
'from_': False
},
'date': 12435324,
'message_id': 123214234
},
'title': 'fvgfgfd',
'type': 'sdfdsfds',
'username': 'regfrefre'
},
'date': 4584979847685478,
'from_': {
'first_name': '23fdvfvdsc',
'id': 1232343,
'is_bot': False,
'last_name': 'gjfdkjglkfjglkjf'
},
'message_id': 12312321
}
res = replace__from___to__from(with_from_)
self.assertIn("from", res['chat']['pinned_message']['chat'])
self.assertNotIn("from_", res['chat']['pinned_message']['chat'])
self.assertIn("from", res)
self.assertNotIn("from_", res)
| true | true |
1c386ee9726c7ae77bf8398030c2377cee80db26 | 1,443 | py | Python | smtools/extract_tarfile.py | hrsma2i/smtools | d52394b17fa2870dd4a48f7d14db9bbbbb25b9f2 | [
"MIT"
] | null | null | null | smtools/extract_tarfile.py | hrsma2i/smtools | d52394b17fa2870dd4a48f7d14db9bbbbb25b9f2 | [
"MIT"
] | null | null | null | smtools/extract_tarfile.py | hrsma2i/smtools | d52394b17fa2870dd4a48f7d14db9bbbbb25b9f2 | [
"MIT"
] | null | null | null | import os
from glob import glob
import tarfile
import argparse
def extract_tarfile(inp_dir, remove=True):
try:
tar_file = sorted(glob(os.path.join(inp_dir, '*.tar.gz')))[0]
except IndexError as e:
print(e)
print('There is no tar file in {}'.format(inp_dir))
tar_file = concat(inp_dir, remove=remove)
extract(tar_file, remove=remove)
def concat(inp_dir, remove=True):
tar_file_fracs = sorted(glob(os.path.join(inp_dir, '*.tar.gz-*')))
# '.../dataset.tar.gz-*' -> '.../dataset.tar.gz'
assert len(tar_file_fracs) > 0,\
'Cant\'t even find tar file fractions in {}'.format(inp_dir)
tar_file = os.path.join(
inp_dir,
os.path.basename(tar_file_fracs[0]).split('-')[0],
)
for frac in tar_file_fracs:
with open(frac, 'rb') as f_in,\
open(tar_file, 'ab') as f_out:
f_out.write(f_in.read())
if remove:
os.remove(frac)
return tar_file
def extract(tar_file, remove=True):
with tarfile.open(tar_file, 'r:gz') as tf:
tf.extractall(os.path.dirname(tar_file))
if remove:
os.remove(tar_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'inp_dir'
)
parser.add_argument(
'-rm', '--remove',
action="store_true"
)
args = parser.parse_args()
concat(args.inp_dir, remove=args.remove)
| 25.767857 | 70 | 0.605683 | import os
from glob import glob
import tarfile
import argparse
def extract_tarfile(inp_dir, remove=True):
try:
tar_file = sorted(glob(os.path.join(inp_dir, '*.tar.gz')))[0]
except IndexError as e:
print(e)
print('There is no tar file in {}'.format(inp_dir))
tar_file = concat(inp_dir, remove=remove)
extract(tar_file, remove=remove)
def concat(inp_dir, remove=True):
tar_file_fracs = sorted(glob(os.path.join(inp_dir, '*.tar.gz-*')))
assert len(tar_file_fracs) > 0,\
'Cant\'t even find tar file fractions in {}'.format(inp_dir)
tar_file = os.path.join(
inp_dir,
os.path.basename(tar_file_fracs[0]).split('-')[0],
)
for frac in tar_file_fracs:
with open(frac, 'rb') as f_in,\
open(tar_file, 'ab') as f_out:
f_out.write(f_in.read())
if remove:
os.remove(frac)
return tar_file
def extract(tar_file, remove=True):
with tarfile.open(tar_file, 'r:gz') as tf:
tf.extractall(os.path.dirname(tar_file))
if remove:
os.remove(tar_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'inp_dir'
)
parser.add_argument(
'-rm', '--remove',
action="store_true"
)
args = parser.parse_args()
concat(args.inp_dir, remove=args.remove)
| true | true |
1c386ffc62e6cdddb2329450ddc928e7cd854dbb | 1,124 | py | Python | helper/tile.py | LHGames-2018/DCI4espaces | 0b5dfed8b99d07f3f13ef4b9861494155922bc8b | [
"MIT"
] | null | null | null | helper/tile.py | LHGames-2018/DCI4espaces | 0b5dfed8b99d07f3f13ef4b9861494155922bc8b | [
"MIT"
] | null | null | null | helper/tile.py | LHGames-2018/DCI4espaces | 0b5dfed8b99d07f3f13ef4b9861494155922bc8b | [
"MIT"
] | null | null | null | from helper.structs import Point
from enum import Enum
class Tile:
def __init__(self, tile_content, x, y):
self.TileContent = tile_content
self.Position = Point(x, y)
pass
def __repr__(self):
return "(%d, %d, %s)" % (self.Position.x, self.Position.y, TileContent.getName(self.TileContent))
class ResourceTile(Tile):
def __init__(self, tile_content, x, y, amount_left, density):
Tile.__init__(self, tile_content, x, y)
self.AmountLeft = amount_left
self.Density = density
class TileContent(Enum):
Empty = 0
Wall = 1
House = 2
Lava = 3
Resource = 4
Shop = 5
Player = 6
Unknown = 7
@staticmethod
def getName(value):
names = {
TileContent.Empty: "Empty",
TileContent.Wall: "Wall",
TileContent.House: "House",
TileContent.Lava: "Lava",
TileContent.Resource: "Resource",
TileContent.Shop: "Shop",
TileContent.Player: "Player",
TileContent.Unknown: "Unknown",
}
return names[value]
| 24.434783 | 105 | 0.579181 | from helper.structs import Point
from enum import Enum
class Tile:
def __init__(self, tile_content, x, y):
self.TileContent = tile_content
self.Position = Point(x, y)
pass
def __repr__(self):
return "(%d, %d, %s)" % (self.Position.x, self.Position.y, TileContent.getName(self.TileContent))
class ResourceTile(Tile):
def __init__(self, tile_content, x, y, amount_left, density):
Tile.__init__(self, tile_content, x, y)
self.AmountLeft = amount_left
self.Density = density
class TileContent(Enum):
Empty = 0
Wall = 1
House = 2
Lava = 3
Resource = 4
Shop = 5
Player = 6
Unknown = 7
@staticmethod
def getName(value):
names = {
TileContent.Empty: "Empty",
TileContent.Wall: "Wall",
TileContent.House: "House",
TileContent.Lava: "Lava",
TileContent.Resource: "Resource",
TileContent.Shop: "Shop",
TileContent.Player: "Player",
TileContent.Unknown: "Unknown",
}
return names[value]
| true | true |
1c38719c8678e05b32268f4953f347590b317c36 | 20,919 | py | Python | core/domain/blog_services.py | WebFlakyTest/oppia | 520e35490eae8171beb035fbafc2948983abec75 | [
"Apache-2.0"
] | 1 | 2021-08-17T20:33:12.000Z | 2021-08-17T20:33:12.000Z | core/domain/blog_services.py | WebFlakyTest/oppia | 520e35490eae8171beb035fbafc2948983abec75 | [
"Apache-2.0"
] | null | null | null | core/domain/blog_services.py | WebFlakyTest/oppia | 520e35490eae8171beb035fbafc2948983abec75 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for operations on blogs, and related models."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
from constants import constants
from core.domain import blog_domain
from core.domain import html_cleaner
from core.domain import role_services
from core.platform import models
import feconf
import python_utils
import utils
(blog_models,) = models.Registry.import_models([models.NAMES.blog])
datastore_services = models.Registry.import_datastore_services()
def get_blog_post_from_model(blog_post_model):
"""Returns a blog post domain object given a blog post model loaded
from the datastore.
Args:
blog_post_model: BlogPostModel. The blog post model loaded from the
datastore.
Returns:
BlogPost. A blog post domain object corresponding to the given
blog post model.
"""
return blog_domain.BlogPost(
blog_post_model.id,
blog_post_model.author_id,
blog_post_model.title,
blog_post_model.content,
blog_post_model.url_fragment,
blog_post_model.tags,
blog_post_model.thumbnail_filename,
blog_post_model.last_updated,
blog_post_model.published_on)
def get_blog_post_by_id(blog_post_id, strict=True):
"""Returns a domain object representing a blog post.
Args:
blog_post_id: str. ID of the blog post.
strict: bool. Fails noisily if the model doesn't exist.
Returns:
BlogPost or None. The domain object representing a blog post with the
given id, or None if it does not exist.
"""
blog_post_model = blog_models.BlogPostModel.get(blog_post_id, strict=strict)
if blog_post_model:
return get_blog_post_from_model(blog_post_model)
else:
return None
def get_blog_post_by_url_fragment(url_fragment):
"""Returns a domain object representing a blog post.
Args:
url_fragment: str. The url fragment of the blog post.
Returns:
BlogPost or None. The domain object representing a blog post with the
given ID, or None if it does not exist.
"""
blog_post_model = (
blog_models.BlogPostModel.get_by_url_fragment(url_fragment))
if blog_post_model is None:
return None
return get_blog_post_from_model(blog_post_model)
def get_blog_post_summary_from_model(blog_post_summary_model):
"""Returns a blog post summary domain object given a blog post summary
model loaded from the datastore.
Args:
blog_post_summary_model: BlogPostSummaryModel. The blog post model
loaded from the datastore.
Returns:
BlogPostSummary. A blog post summary domain object corresponding to the
given blog post summary model.
"""
return blog_domain.BlogPostSummary(
blog_post_summary_model.id,
blog_post_summary_model.author_id,
blog_post_summary_model.title,
blog_post_summary_model.summary,
blog_post_summary_model.url_fragment,
blog_post_summary_model.tags,
blog_post_summary_model.thumbnail_filename,
blog_post_summary_model.last_updated,
blog_post_summary_model.published_on)
def get_blog_post_summary_by_id(blog_post_id, strict=False):
"""Returns a domain object representing a blog post summary.
Args:
blog_post_id: str. ID of the blog post.
strict: bool. Fails noisily if the model doesn't exist.
Returns:
BlogPostSummary or None. The domain object representing a blog post
summary with the given ID, or None if it does not exist.
"""
blog_post_summary_model = blog_models.BlogPostSummaryModel.get(
blog_post_id, strict=strict)
if blog_post_summary_model:
blog_post_summary = get_blog_post_summary_from_model(
blog_post_summary_model)
return blog_post_summary
else:
return None
def get_blog_post_summary_models_list_by_user_id(
user_id, blog_post_is_published):
"""Given the user ID and status, it returns the list of blog post summary
domain object for which user is an editor and the status matches.
Args:
user_id: str. The user who is editor of the blog posts.
blog_post_is_published: bool. Whether the given blog post is
published or not.
Returns:
list(BlogPostSummary). The blog post summaries of the blog posts for
which the user is an editor corresponding to the status
(draft/published).
"""
blog_post_ids = filter_blog_post_ids(user_id, blog_post_is_published)
blog_post_summary_models = (
blog_models.BlogPostSummaryModel.get_multi(blog_post_ids))
blog_post_summaries = []
blog_post_summaries = [
get_blog_post_summary_from_model(model) if model is not None else None
for model in blog_post_summary_models]
return blog_post_summaries if len(blog_post_summaries) != 0 else None
def filter_blog_post_ids(user_id, blog_post_is_published):
"""Given the user ID and status, it returns the IDs of all blog post
according to the status.
Args:
user_id: str. The user who is editor of the blog post.
blog_post_is_published: bool. True if blog post is published.
Returns:
list(str). The blog post IDs of the blog posts for which the user is an
editor corresponding to the status(draft/published).
"""
blog_post_rights_models = blog_models.BlogPostRightsModel.query(
blog_models.BlogPostRightsModel.editor_ids == user_id,
blog_models.BlogPostRightsModel.blog_post_is_published == (
blog_post_is_published)).fetch()
model_ids = []
if blog_post_rights_models:
for model in blog_post_rights_models:
model_ids.append(model.id)
return model_ids
def get_blog_post_summary_by_title(title):
"""Returns a domain object representing a blog post summary model.
Args:
title: str. The title of the blog post.
Returns:
BlogPostSummary or None. The domain object representing a blog post
summary with the given title, or None if it does not exist.
"""
blog_post_summary_model = blog_models.BlogPostSummaryModel.query(
blog_models.BlogPostSummaryModel.title == title
).fetch() # pylint: disable=singleton-comparison
if len(blog_post_summary_model) == 0:
return None
return get_blog_post_summary_from_model(blog_post_summary_model[0])
def get_new_blog_post_id():
"""Returns a new blog post ID.
Returns:
str. A new blog post ID.
"""
return blog_models.BlogPostModel.generate_new_blog_post_id()
def get_blog_post_rights_from_model(blog_post_rights_model):
"""Returns a blog post rights domain object given a blog post rights
model loaded from the datastore.
Args:
blog_post_rights_model: BlogPostRightsModel. The blog post rights model
loaded from the datastore.
Returns:
BlogPostRights. A blog post rights domain object corresponding to the
given blog post rights model.
"""
return blog_domain.BlogPostRights(
blog_post_rights_model.id,
blog_post_rights_model.editor_ids,
blog_post_rights_model.blog_post_is_published)
def get_blog_post_rights(blog_post_id, strict=True):
"""Retrieves the rights object for the given blog post.
Args:
blog_post_id: str. ID of the blog post.
strict: bool. Whether to fail noisily if no blog post rights model
with a given ID exists in the datastore.
Returns:
BlogPostRights. The rights object associated with the given blog post.
Raises:
EntityNotFoundError. The blog post with ID blog post id was not
found in the datastore.
"""
model = blog_models.BlogPostRightsModel.get(blog_post_id, strict=strict)
if model is None:
return None
return get_blog_post_rights_from_model(model)
def get_published_blog_post_summaries_by_user_id(user_id, max_limit):
"""Retrieves the summary objects for given number of published blog posts
for which the given user is an editor.
Args:
user_id: str. ID of the user.
max_limit: int. The number of models to be fetched.
Returns:
list(BlogPostSummary). The summary objects associated with the
blog posts assigned to given user.
"""
blog_rights_models = (
blog_models.BlogPostRightsModel.get_published_models_by_user(
user_id, max_limit))
if not blog_rights_models:
return None
blog_post_ids = [model.id for model in blog_rights_models]
blog_summary_models = (
blog_models.BlogPostSummaryModel.get_multi(blog_post_ids))
blog_post_summaries = [
get_blog_post_summary_from_model(model)
for model in blog_summary_models]
return blog_post_summaries
def does_blog_post_with_url_fragment_exist(url_fragment):
"""Checks if blog post with provided url fragment exists.
Args:
url_fragment: str. The url fragment for the blog post.
Returns:
bool. Whether the the url fragment for the blog post exists.
Raises:
Exception. Blog Post URL fragment is not a string.
"""
if not isinstance(url_fragment, python_utils.BASESTRING):
raise utils.ValidationError(
'Blog Post URL fragment should be a string. Recieved:'
'%s' % url_fragment)
existing_blog_post = get_blog_post_by_url_fragment(url_fragment)
return existing_blog_post is not None
def _save_blog_post(blog_post):
"""Saves a BlogPost domain object to the datastore.
Args:
blog_post: BlogPost. The blog post domain object for the given
blog post.
"""
model = blog_models.BlogPostModel.get(blog_post.id, strict=True)
blog_post.validate()
model.title = blog_post.title
model.content = blog_post.content
model.tags = blog_post.tags
model.published_on = blog_post.published_on
model.thumbnail_filename = blog_post.thumbnail_filename
model.url_fragment = blog_post.url_fragment
model.update_timestamps()
model.put()
def publish_blog_post(blog_post_id):
"""Marks the given blog post as published.
Args:
blog_post_id: str. The ID of the given blog post.
Raises:
Exception. The given blog post does not exist.
"""
blog_post_rights = get_blog_post_rights(blog_post_id, strict=False)
if blog_post_rights is None:
raise Exception('The given blog post does not exist')
blog_post = get_blog_post_by_id(blog_post_id, strict=True)
blog_post.validate(strict=True)
blog_post_summary = get_blog_post_summary_by_id(blog_post_id, strict=True)
blog_post_summary.validate(strict=True)
blog_post_rights.blog_post_is_published = True
published_on = datetime.datetime.utcnow()
blog_post.published_on = published_on
blog_post_summary.published_on = published_on
save_blog_post_rights(blog_post_rights)
_save_blog_post_summary(blog_post_summary)
_save_blog_post(blog_post)
def unpublish_blog_post(blog_post_id):
"""Marks the given blog post as unpublished or draft.
Args:
blog_post_id: str. The ID of the given blog post.
Raises:
Exception. The given blog post does not exist.
"""
blog_post_rights = get_blog_post_rights(blog_post_id, strict=False)
if blog_post_rights is None:
raise Exception('The given blog post does not exist')
blog_post_rights.blog_post_is_published = False
save_blog_post_rights(blog_post_rights)
def delete_blog_post(blog_post_id):
"""Deletes all the models related to a blog post.
Args:
blog_post_id: str. ID of the blog post which is to be
deleted.
"""
blog_models.BlogPostModel.get(blog_post_id).delete()
blog_models.BlogPostSummaryModel.get(blog_post_id).delete()
blog_models.BlogPostRightsModel.get(blog_post_id).delete()
def _save_blog_post_summary(blog_post_summary):
"""Saves a BlogPostSummary domain object to the datastore.
Args:
blog_post_summary: BlogPostSummary. The summary object for the given
blog post summary.
"""
model = blog_models.BlogPostSummaryModel.get(
blog_post_summary.id, strict=False)
if model:
model.author_id = blog_post_summary.author_id
model.title = blog_post_summary.title
model.summary = blog_post_summary.summary
model.tags = blog_post_summary.tags
model.published_on = blog_post_summary.published_on
model.thumbnail_filename = blog_post_summary.thumbnail_filename
model.url_fragment = blog_post_summary.url_fragment
else:
model = blog_models.BlogPostSummaryModel(
id=blog_post_summary.id,
author_id=blog_post_summary.author_id,
title=blog_post_summary.title,
summary=blog_post_summary.summary,
tags=blog_post_summary.tags,
published_on=blog_post_summary.published_on,
thumbnail_filename=blog_post_summary.thumbnail_filename,
url_fragment=blog_post_summary.url_fragment,
)
model.update_timestamps()
model.put()
def save_blog_post_rights(blog_post_rights):
"""Saves a BlogPostRights domain object to the datastore.
Args:
blog_post_rights: BlogPostRights. The rights object for the given
blog post.
"""
model = blog_models.BlogPostRightsModel.get(
blog_post_rights.id, strict=True)
model.editor_ids = blog_post_rights.editor_ids
model.blog_post_is_published = blog_post_rights.blog_post_is_published
model.update_timestamps()
model.put()
def check_can_edit_blog_post(user, blog_post_rights):
"""Checks whether the user can edit the given blog post.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
blog_post_rights: BlogPostRights or None. Rights object for the given
blog post.
Returns:
bool. Whether the given user can edit the given blog post.
"""
if blog_post_rights is None:
return False
if role_services.ACTION_EDIT_ANY_BLOG_POST in user.actions:
return True
if blog_post_rights.is_editor(user.user_id):
return True
return False
def deassign_user_from_all_blog_posts(user_id):
"""Removes the user from the list of editor_ids for all blog posts.
Args:
user_id: str. ID to be removed from editor_ids.
"""
blog_models.BlogPostRightsModel.deassign_user_from_all_blog_posts(
user_id)
def generate_url_fragment(title, blog_post_id):
"""Generates the url fragment for a blog post from the title of the blog
post.
Args:
title: str. The title of the blog post.
blog_post_id: str. The unique blog post ID.
Returns:
str. The url fragment of the blog post.
"""
lower_title = title.lower()
hyphenated_title = lower_title.replace(' ', '-')
lower_id = blog_post_id.lower()
return hyphenated_title + '-' + lower_id
def generate_summary_of_blog_post(content):
"""Generates the summary for a blog post from the content of the blog
post.
Args:
content: santized html str. The blog post content to be truncated.
Returns:
str. The summary of the blog post.
"""
raw_text = html_cleaner.strip_html_tags(content)
max_chars_in_summary = constants.MAX_CHARS_IN_BLOG_POST_SUMMARY - 3
summary = raw_text[:max_chars_in_summary] + '...'
return summary
def compute_summary_of_blog_post(blog_post):
"""Creates BlogPostSummary domain object from BlogPost domain object.
Args:
blog_post: BlogPost. The blog post domain object.
Returns:
BlogPostSummary. The blog post summary domain object.
"""
summary = generate_summary_of_blog_post(blog_post.content)
return blog_domain.BlogPostSummary(
blog_post.id,
blog_post.author_id,
blog_post.title,
summary,
blog_post.url_fragment,
blog_post.tags,
blog_post.thumbnail_filename,
blog_post.last_updated,
blog_post.published_on)
def apply_change_dict(blog_post_id, change_dict):
"""Applies a changelist to blog post and returns the result.
Args:
blog_post_id: str. ID of the given blog post.
change_dict: dict. A dict containing all the changes keyed
by corresponding field name (title, content,
thumbnail_filename, tags).
Returns:
UpdatedBlogPost. The modified blog post object.
"""
blog_post = get_blog_post_by_id(blog_post_id)
if 'title' in change_dict:
blog_post.update_title(change_dict['title'])
url_fragment = generate_url_fragment(
change_dict['title'], blog_post_id)
blog_post.update_url_fragment(url_fragment)
if 'thumbnail_filename' in change_dict:
blog_post.update_thumbnail_filename(change_dict['thumbnail_filename'])
if 'content' in change_dict:
blog_post.update_content(change_dict['content'])
if 'tags' in change_dict:
blog_post.update_tags(change_dict['tags'])
return blog_post
def update_blog_post(blog_post_id, change_dict):
"""Updates the blog post and its summary model in the datastore.
Args:
blog_post_id: str. The ID of the blog post which is to be updated.
change_dict: dict. A dict containing all the changes keyed by
corresponding field name (title, content, thumbnail_filename,
tags).
"""
updated_blog_post = apply_change_dict(blog_post_id, change_dict)
if 'title' in change_dict:
blog_post_models = blog_models.BlogPostModel.query().filter(
blog_models.BlogPostModel.title == updated_blog_post.title
).filter(blog_models.BlogPostModel.deleted == False).fetch() # pylint: disable=singleton-comparison
if blog_post_models != []:
raise utils.ValidationError(
'Blog Post with given title already exists: %s'
% updated_blog_post.title)
_save_blog_post(updated_blog_post)
updated_blog_post_summary = compute_summary_of_blog_post(updated_blog_post)
_save_blog_post_summary(updated_blog_post_summary)
def create_new_blog_post(author_id):
"""Creates models for new blog post and returns new BlogPost domain
object.
Args:
author_id: str. The user ID of the author for new blog post.
Returns:
BlogPost. A newly created blog post domain object .
"""
blog_post_id = get_new_blog_post_id()
new_blog_post_model = blog_models.BlogPostModel.create(
blog_post_id, author_id
)
blog_models.BlogPostRightsModel.create(blog_post_id, author_id)
new_blog_post = get_blog_post_from_model(new_blog_post_model)
new_blog_post_summary_model = compute_summary_of_blog_post(new_blog_post)
_save_blog_post_summary(new_blog_post_summary_model)
return new_blog_post
def get_published_blog_post_summaries(offset=0):
"""Returns published BlogPostSummaries list.
Args:
offset: int. Number of query results to skip from top.
Returns:
list(BlogPostSummaries) | None . These are sorted in order of the date
published. None if no blog post is published.
"""
max_limit = feconf.MAX_NUM_CARDS_TO_DISPLAY_ON_BLOG_HOMEPAGE
blog_post_rights_models = blog_models.BlogPostRightsModel.query(
blog_models.BlogPostRightsModel.blog_post_is_published == True).order( # pylint: disable=singleton-comparison
-blog_models.BlogPostRightsModel.last_updated).fetch(
max_limit, offset=offset)
if len(blog_post_rights_models) == 0:
return None
blog_post_ids = [model.id for model in blog_post_rights_models]
blog_post_summary_models = (
blog_models.BlogPostSummaryModel.get_multi(blog_post_ids))
blog_post_summaries = []
blog_post_summaries = [
get_blog_post_summary_from_model(model) if model is not None else None
for model in blog_post_summary_models]
return blog_post_summaries
| 33.849515 | 117 | 0.713849 |
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
from constants import constants
from core.domain import blog_domain
from core.domain import html_cleaner
from core.domain import role_services
from core.platform import models
import feconf
import python_utils
import utils
(blog_models,) = models.Registry.import_models([models.NAMES.blog])
datastore_services = models.Registry.import_datastore_services()
def get_blog_post_from_model(blog_post_model):
return blog_domain.BlogPost(
blog_post_model.id,
blog_post_model.author_id,
blog_post_model.title,
blog_post_model.content,
blog_post_model.url_fragment,
blog_post_model.tags,
blog_post_model.thumbnail_filename,
blog_post_model.last_updated,
blog_post_model.published_on)
def get_blog_post_by_id(blog_post_id, strict=True):
blog_post_model = blog_models.BlogPostModel.get(blog_post_id, strict=strict)
if blog_post_model:
return get_blog_post_from_model(blog_post_model)
else:
return None
def get_blog_post_by_url_fragment(url_fragment):
blog_post_model = (
blog_models.BlogPostModel.get_by_url_fragment(url_fragment))
if blog_post_model is None:
return None
return get_blog_post_from_model(blog_post_model)
def get_blog_post_summary_from_model(blog_post_summary_model):
return blog_domain.BlogPostSummary(
blog_post_summary_model.id,
blog_post_summary_model.author_id,
blog_post_summary_model.title,
blog_post_summary_model.summary,
blog_post_summary_model.url_fragment,
blog_post_summary_model.tags,
blog_post_summary_model.thumbnail_filename,
blog_post_summary_model.last_updated,
blog_post_summary_model.published_on)
def get_blog_post_summary_by_id(blog_post_id, strict=False):
blog_post_summary_model = blog_models.BlogPostSummaryModel.get(
blog_post_id, strict=strict)
if blog_post_summary_model:
blog_post_summary = get_blog_post_summary_from_model(
blog_post_summary_model)
return blog_post_summary
else:
return None
def get_blog_post_summary_models_list_by_user_id(
user_id, blog_post_is_published):
blog_post_ids = filter_blog_post_ids(user_id, blog_post_is_published)
blog_post_summary_models = (
blog_models.BlogPostSummaryModel.get_multi(blog_post_ids))
blog_post_summaries = []
blog_post_summaries = [
get_blog_post_summary_from_model(model) if model is not None else None
for model in blog_post_summary_models]
return blog_post_summaries if len(blog_post_summaries) != 0 else None
def filter_blog_post_ids(user_id, blog_post_is_published):
blog_post_rights_models = blog_models.BlogPostRightsModel.query(
blog_models.BlogPostRightsModel.editor_ids == user_id,
blog_models.BlogPostRightsModel.blog_post_is_published == (
blog_post_is_published)).fetch()
model_ids = []
if blog_post_rights_models:
for model in blog_post_rights_models:
model_ids.append(model.id)
return model_ids
def get_blog_post_summary_by_title(title):
blog_post_summary_model = blog_models.BlogPostSummaryModel.query(
blog_models.BlogPostSummaryModel.title == title
).fetch()
if len(blog_post_summary_model) == 0:
return None
return get_blog_post_summary_from_model(blog_post_summary_model[0])
def get_new_blog_post_id():
return blog_models.BlogPostModel.generate_new_blog_post_id()
def get_blog_post_rights_from_model(blog_post_rights_model):
return blog_domain.BlogPostRights(
blog_post_rights_model.id,
blog_post_rights_model.editor_ids,
blog_post_rights_model.blog_post_is_published)
def get_blog_post_rights(blog_post_id, strict=True):
model = blog_models.BlogPostRightsModel.get(blog_post_id, strict=strict)
if model is None:
return None
return get_blog_post_rights_from_model(model)
def get_published_blog_post_summaries_by_user_id(user_id, max_limit):
blog_rights_models = (
blog_models.BlogPostRightsModel.get_published_models_by_user(
user_id, max_limit))
if not blog_rights_models:
return None
blog_post_ids = [model.id for model in blog_rights_models]
blog_summary_models = (
blog_models.BlogPostSummaryModel.get_multi(blog_post_ids))
blog_post_summaries = [
get_blog_post_summary_from_model(model)
for model in blog_summary_models]
return blog_post_summaries
def does_blog_post_with_url_fragment_exist(url_fragment):
if not isinstance(url_fragment, python_utils.BASESTRING):
raise utils.ValidationError(
'Blog Post URL fragment should be a string. Recieved:'
'%s' % url_fragment)
existing_blog_post = get_blog_post_by_url_fragment(url_fragment)
return existing_blog_post is not None
def _save_blog_post(blog_post):
model = blog_models.BlogPostModel.get(blog_post.id, strict=True)
blog_post.validate()
model.title = blog_post.title
model.content = blog_post.content
model.tags = blog_post.tags
model.published_on = blog_post.published_on
model.thumbnail_filename = blog_post.thumbnail_filename
model.url_fragment = blog_post.url_fragment
model.update_timestamps()
model.put()
def publish_blog_post(blog_post_id):
blog_post_rights = get_blog_post_rights(blog_post_id, strict=False)
if blog_post_rights is None:
raise Exception('The given blog post does not exist')
blog_post = get_blog_post_by_id(blog_post_id, strict=True)
blog_post.validate(strict=True)
blog_post_summary = get_blog_post_summary_by_id(blog_post_id, strict=True)
blog_post_summary.validate(strict=True)
blog_post_rights.blog_post_is_published = True
published_on = datetime.datetime.utcnow()
blog_post.published_on = published_on
blog_post_summary.published_on = published_on
save_blog_post_rights(blog_post_rights)
_save_blog_post_summary(blog_post_summary)
_save_blog_post(blog_post)
def unpublish_blog_post(blog_post_id):
blog_post_rights = get_blog_post_rights(blog_post_id, strict=False)
if blog_post_rights is None:
raise Exception('The given blog post does not exist')
blog_post_rights.blog_post_is_published = False
save_blog_post_rights(blog_post_rights)
def delete_blog_post(blog_post_id):
blog_models.BlogPostModel.get(blog_post_id).delete()
blog_models.BlogPostSummaryModel.get(blog_post_id).delete()
blog_models.BlogPostRightsModel.get(blog_post_id).delete()
def _save_blog_post_summary(blog_post_summary):
model = blog_models.BlogPostSummaryModel.get(
blog_post_summary.id, strict=False)
if model:
model.author_id = blog_post_summary.author_id
model.title = blog_post_summary.title
model.summary = blog_post_summary.summary
model.tags = blog_post_summary.tags
model.published_on = blog_post_summary.published_on
model.thumbnail_filename = blog_post_summary.thumbnail_filename
model.url_fragment = blog_post_summary.url_fragment
else:
model = blog_models.BlogPostSummaryModel(
id=blog_post_summary.id,
author_id=blog_post_summary.author_id,
title=blog_post_summary.title,
summary=blog_post_summary.summary,
tags=blog_post_summary.tags,
published_on=blog_post_summary.published_on,
thumbnail_filename=blog_post_summary.thumbnail_filename,
url_fragment=blog_post_summary.url_fragment,
)
model.update_timestamps()
model.put()
def save_blog_post_rights(blog_post_rights):
model = blog_models.BlogPostRightsModel.get(
blog_post_rights.id, strict=True)
model.editor_ids = blog_post_rights.editor_ids
model.blog_post_is_published = blog_post_rights.blog_post_is_published
model.update_timestamps()
model.put()
def check_can_edit_blog_post(user, blog_post_rights):
if blog_post_rights is None:
return False
if role_services.ACTION_EDIT_ANY_BLOG_POST in user.actions:
return True
if blog_post_rights.is_editor(user.user_id):
return True
return False
def deassign_user_from_all_blog_posts(user_id):
blog_models.BlogPostRightsModel.deassign_user_from_all_blog_posts(
user_id)
def generate_url_fragment(title, blog_post_id):
lower_title = title.lower()
hyphenated_title = lower_title.replace(' ', '-')
lower_id = blog_post_id.lower()
return hyphenated_title + '-' + lower_id
def generate_summary_of_blog_post(content):
raw_text = html_cleaner.strip_html_tags(content)
max_chars_in_summary = constants.MAX_CHARS_IN_BLOG_POST_SUMMARY - 3
summary = raw_text[:max_chars_in_summary] + '...'
return summary
def compute_summary_of_blog_post(blog_post):
summary = generate_summary_of_blog_post(blog_post.content)
return blog_domain.BlogPostSummary(
blog_post.id,
blog_post.author_id,
blog_post.title,
summary,
blog_post.url_fragment,
blog_post.tags,
blog_post.thumbnail_filename,
blog_post.last_updated,
blog_post.published_on)
def apply_change_dict(blog_post_id, change_dict):
blog_post = get_blog_post_by_id(blog_post_id)
if 'title' in change_dict:
blog_post.update_title(change_dict['title'])
url_fragment = generate_url_fragment(
change_dict['title'], blog_post_id)
blog_post.update_url_fragment(url_fragment)
if 'thumbnail_filename' in change_dict:
blog_post.update_thumbnail_filename(change_dict['thumbnail_filename'])
if 'content' in change_dict:
blog_post.update_content(change_dict['content'])
if 'tags' in change_dict:
blog_post.update_tags(change_dict['tags'])
return blog_post
def update_blog_post(blog_post_id, change_dict):
updated_blog_post = apply_change_dict(blog_post_id, change_dict)
if 'title' in change_dict:
blog_post_models = blog_models.BlogPostModel.query().filter(
blog_models.BlogPostModel.title == updated_blog_post.title
).filter(blog_models.BlogPostModel.deleted == False).fetch()
if blog_post_models != []:
raise utils.ValidationError(
'Blog Post with given title already exists: %s'
% updated_blog_post.title)
_save_blog_post(updated_blog_post)
updated_blog_post_summary = compute_summary_of_blog_post(updated_blog_post)
_save_blog_post_summary(updated_blog_post_summary)
def create_new_blog_post(author_id):
blog_post_id = get_new_blog_post_id()
new_blog_post_model = blog_models.BlogPostModel.create(
blog_post_id, author_id
)
blog_models.BlogPostRightsModel.create(blog_post_id, author_id)
new_blog_post = get_blog_post_from_model(new_blog_post_model)
new_blog_post_summary_model = compute_summary_of_blog_post(new_blog_post)
_save_blog_post_summary(new_blog_post_summary_model)
return new_blog_post
def get_published_blog_post_summaries(offset=0):
max_limit = feconf.MAX_NUM_CARDS_TO_DISPLAY_ON_BLOG_HOMEPAGE
blog_post_rights_models = blog_models.BlogPostRightsModel.query(
blog_models.BlogPostRightsModel.blog_post_is_published == True).order(
-blog_models.BlogPostRightsModel.last_updated).fetch(
max_limit, offset=offset)
if len(blog_post_rights_models) == 0:
return None
blog_post_ids = [model.id for model in blog_post_rights_models]
blog_post_summary_models = (
blog_models.BlogPostSummaryModel.get_multi(blog_post_ids))
blog_post_summaries = []
blog_post_summaries = [
get_blog_post_summary_from_model(model) if model is not None else None
for model in blog_post_summary_models]
return blog_post_summaries
| true | true |
1c3871e35a981db48c53da28b1aaaae1d572059f | 298 | py | Python | external/odds/__init__.py | dmartin35/pronosfoot | 861ae091fbb65684d00ae8c5ff3726eeaa306ed7 | [
"MIT"
] | 4 | 2017-07-09T21:18:18.000Z | 2022-02-16T13:05:10.000Z | external/odds/__init__.py | dmartin35/pronosfoot | 861ae091fbb65684d00ae8c5ff3726eeaa306ed7 | [
"MIT"
] | 2 | 2020-01-16T14:21:42.000Z | 2021-03-02T18:34:15.000Z | external/odds/__init__.py | dmartin35/pronosfoot | 861ae091fbb65684d00ae8c5ff3726eeaa306ed7 | [
"MIT"
] | null | null | null | from external.odds.betclic.api import get_odds
# FDJ parsing is broken - their UI has been refactored with JS framework &
# protected async JSON API usage (requires HEADERS) and more complex to isolate & group match odds
# hence move to another betting website - which is still full html rendered
| 49.666667 | 98 | 0.788591 | from external.odds.betclic.api import get_odds
| true | true |
1c3872127e305c1d6af42c66f910cbac6696d91c | 72,366 | py | Python | Python-Client/project/myService.py | d0d0d0/Persona | cbea1dfaae1d44b286d9b350ccba36bc7fca4a5a | [
"MIT"
] | null | null | null | Python-Client/project/myService.py | d0d0d0/Persona | cbea1dfaae1d44b286d9b350ccba36bc7fca4a5a | [
"MIT"
] | null | null | null | Python-Client/project/myService.py | d0d0d0/Persona | cbea1dfaae1d44b286d9b350ccba36bc7fca4a5a | [
"MIT"
] | null | null | null | #
# Autogenerated by Thrift Compiler (0.9.1)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
"""
The Thrift Service API of the Application
"""
def registerRequest(self, username, password, name, email):
"""
Parameters:
- username
- password
- name
- email
"""
pass
def login(self, username, password, mac):
"""
Parameters:
- username
- password
- mac
"""
pass
def logout(self, mac, key):
"""
Parameters:
- mac
- key
"""
pass
def addDevice(self, mac, devicename, certfile, capabilities, rsakey, key):
"""
Parameters:
- mac
- devicename
- certfile
- capabilities
- rsakey
- key
"""
pass
def renameDevice(self, mac, devicename, key):
"""
Parameters:
- mac
- devicename
- key
"""
pass
def updateIp(self, mac, ip, key):
"""
Parameters:
- mac
- ip
- key
"""
pass
def getDevices(self, key):
"""
Parameters:
- key
"""
pass
def getGroups(self, friends, key):
"""
Parameters:
- friends
- key
"""
pass
def addGroup(self, gname, key):
"""
Parameters:
- gname
- key
"""
pass
def addUserToGroup(self, gid, username, key):
"""
Parameters:
- gid
- username
- key
"""
pass
def addDeviceToGroup(self, gid, device, key):
"""
Parameters:
- gid
- device
- key
"""
pass
def addDeviceToFacebook(self, device, key):
"""
Parameters:
- device
- key
"""
pass
class Client(Iface):
"""
The Thrift Service API of the Application
"""
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def registerRequest(self, username, password, name, email):
"""
Parameters:
- username
- password
- name
- email
"""
self.send_registerRequest(username, password, name, email)
return self.recv_registerRequest()
def send_registerRequest(self, username, password, name, email):
self._oprot.writeMessageBegin('registerRequest', TMessageType.CALL, self._seqid)
args = registerRequest_args()
args.username = username
args.password = password
args.name = name
args.email = email
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_registerRequest(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = registerRequest_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "registerRequest failed: unknown result");
def login(self, username, password, mac):
"""
Parameters:
- username
- password
- mac
"""
self.send_login(username, password, mac)
return self.recv_login()
def send_login(self, username, password, mac):
self._oprot.writeMessageBegin('login', TMessageType.CALL, self._seqid)
args = login_args()
args.username = username
args.password = password
args.mac = mac
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_login(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = login_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "login failed: unknown result");
def logout(self, mac, key):
"""
Parameters:
- mac
- key
"""
self.send_logout(mac, key)
return self.recv_logout()
def send_logout(self, mac, key):
self._oprot.writeMessageBegin('logout', TMessageType.CALL, self._seqid)
args = logout_args()
args.mac = mac
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_logout(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = logout_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "logout failed: unknown result");
def addDevice(self, mac, devicename, certfile, capabilities, rsakey, key):
"""
Parameters:
- mac
- devicename
- certfile
- capabilities
- rsakey
- key
"""
self.send_addDevice(mac, devicename, certfile, capabilities, rsakey, key)
return self.recv_addDevice()
def send_addDevice(self, mac, devicename, certfile, capabilities, rsakey, key):
self._oprot.writeMessageBegin('addDevice', TMessageType.CALL, self._seqid)
args = addDevice_args()
args.mac = mac
args.devicename = devicename
args.certfile = certfile
args.capabilities = capabilities
args.rsakey = rsakey
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_addDevice(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = addDevice_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "addDevice failed: unknown result");
def renameDevice(self, mac, devicename, key):
"""
Parameters:
- mac
- devicename
- key
"""
self.send_renameDevice(mac, devicename, key)
return self.recv_renameDevice()
def send_renameDevice(self, mac, devicename, key):
self._oprot.writeMessageBegin('renameDevice', TMessageType.CALL, self._seqid)
args = renameDevice_args()
args.mac = mac
args.devicename = devicename
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_renameDevice(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = renameDevice_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "renameDevice failed: unknown result");
def updateIp(self, mac, ip, key):
"""
Parameters:
- mac
- ip
- key
"""
self.send_updateIp(mac, ip, key)
return self.recv_updateIp()
def send_updateIp(self, mac, ip, key):
self._oprot.writeMessageBegin('updateIp', TMessageType.CALL, self._seqid)
args = updateIp_args()
args.mac = mac
args.ip = ip
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_updateIp(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = updateIp_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "updateIp failed: unknown result");
def getDevices(self, key):
"""
Parameters:
- key
"""
self.send_getDevices(key)
return self.recv_getDevices()
def send_getDevices(self, key):
self._oprot.writeMessageBegin('getDevices', TMessageType.CALL, self._seqid)
args = getDevices_args()
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getDevices(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getDevices_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getDevices failed: unknown result");
def getGroups(self, friends, key):
"""
Parameters:
- friends
- key
"""
self.send_getGroups(friends, key)
return self.recv_getGroups()
def send_getGroups(self, friends, key):
self._oprot.writeMessageBegin('getGroups', TMessageType.CALL, self._seqid)
args = getGroups_args()
args.friends = friends
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getGroups(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getGroups_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getGroups failed: unknown result");
def addGroup(self, gname, key):
"""
Parameters:
- gname
- key
"""
self.send_addGroup(gname, key)
return self.recv_addGroup()
def send_addGroup(self, gname, key):
self._oprot.writeMessageBegin('addGroup', TMessageType.CALL, self._seqid)
args = addGroup_args()
args.gname = gname
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_addGroup(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = addGroup_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "addGroup failed: unknown result");
def addUserToGroup(self, gid, username, key):
"""
Parameters:
- gid
- username
- key
"""
self.send_addUserToGroup(gid, username, key)
return self.recv_addUserToGroup()
def send_addUserToGroup(self, gid, username, key):
self._oprot.writeMessageBegin('addUserToGroup', TMessageType.CALL, self._seqid)
args = addUserToGroup_args()
args.gid = gid
args.username = username
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_addUserToGroup(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = addUserToGroup_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "addUserToGroup failed: unknown result");
def addDeviceToGroup(self, gid, device, key):
"""
Parameters:
- gid
- device
- key
"""
self.send_addDeviceToGroup(gid, device, key)
return self.recv_addDeviceToGroup()
def send_addDeviceToGroup(self, gid, device, key):
self._oprot.writeMessageBegin('addDeviceToGroup', TMessageType.CALL, self._seqid)
args = addDeviceToGroup_args()
args.gid = gid
args.device = device
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_addDeviceToGroup(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = addDeviceToGroup_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "addDeviceToGroup failed: unknown result");
def addDeviceToFacebook(self, device, key):
"""
Parameters:
- device
- key
"""
self.send_addDeviceToFacebook(device, key)
return self.recv_addDeviceToFacebook()
def send_addDeviceToFacebook(self, device, key):
self._oprot.writeMessageBegin('addDeviceToFacebook', TMessageType.CALL, self._seqid)
args = addDeviceToFacebook_args()
args.device = device
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_addDeviceToFacebook(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = addDeviceToFacebook_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "addDeviceToFacebook failed: unknown result");
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["registerRequest"] = Processor.process_registerRequest
self._processMap["login"] = Processor.process_login
self._processMap["logout"] = Processor.process_logout
self._processMap["addDevice"] = Processor.process_addDevice
self._processMap["renameDevice"] = Processor.process_renameDevice
self._processMap["updateIp"] = Processor.process_updateIp
self._processMap["getDevices"] = Processor.process_getDevices
self._processMap["getGroups"] = Processor.process_getGroups
self._processMap["addGroup"] = Processor.process_addGroup
self._processMap["addUserToGroup"] = Processor.process_addUserToGroup
self._processMap["addDeviceToGroup"] = Processor.process_addDeviceToGroup
self._processMap["addDeviceToFacebook"] = Processor.process_addDeviceToFacebook
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_registerRequest(self, seqid, iprot, oprot):
args = registerRequest_args()
args.read(iprot)
iprot.readMessageEnd()
result = registerRequest_result()
result.success = self._handler.registerRequest(args.username, args.password, args.name, args.email)
oprot.writeMessageBegin("registerRequest", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_login(self, seqid, iprot, oprot):
args = login_args()
args.read(iprot)
iprot.readMessageEnd()
result = login_result()
result.success = self._handler.login(args.username, args.password, args.mac)
oprot.writeMessageBegin("login", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_logout(self, seqid, iprot, oprot):
args = logout_args()
args.read(iprot)
iprot.readMessageEnd()
result = logout_result()
result.success = self._handler.logout(args.mac, args.key)
oprot.writeMessageBegin("logout", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_addDevice(self, seqid, iprot, oprot):
args = addDevice_args()
args.read(iprot)
iprot.readMessageEnd()
result = addDevice_result()
result.success = self._handler.addDevice(args.mac, args.devicename, args.certfile, args.capabilities, args.rsakey, args.key)
oprot.writeMessageBegin("addDevice", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_renameDevice(self, seqid, iprot, oprot):
args = renameDevice_args()
args.read(iprot)
iprot.readMessageEnd()
result = renameDevice_result()
result.success = self._handler.renameDevice(args.mac, args.devicename, args.key)
oprot.writeMessageBegin("renameDevice", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_updateIp(self, seqid, iprot, oprot):
args = updateIp_args()
args.read(iprot)
iprot.readMessageEnd()
result = updateIp_result()
result.success = self._handler.updateIp(args.mac, args.ip, args.key)
oprot.writeMessageBegin("updateIp", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getDevices(self, seqid, iprot, oprot):
args = getDevices_args()
args.read(iprot)
iprot.readMessageEnd()
result = getDevices_result()
result.success = self._handler.getDevices(args.key)
oprot.writeMessageBegin("getDevices", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getGroups(self, seqid, iprot, oprot):
args = getGroups_args()
args.read(iprot)
iprot.readMessageEnd()
result = getGroups_result()
result.success = self._handler.getGroups(args.friends, args.key)
oprot.writeMessageBegin("getGroups", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_addGroup(self, seqid, iprot, oprot):
args = addGroup_args()
args.read(iprot)
iprot.readMessageEnd()
result = addGroup_result()
result.success = self._handler.addGroup(args.gname, args.key)
oprot.writeMessageBegin("addGroup", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_addUserToGroup(self, seqid, iprot, oprot):
args = addUserToGroup_args()
args.read(iprot)
iprot.readMessageEnd()
result = addUserToGroup_result()
result.success = self._handler.addUserToGroup(args.gid, args.username, args.key)
oprot.writeMessageBegin("addUserToGroup", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_addDeviceToGroup(self, seqid, iprot, oprot):
args = addDeviceToGroup_args()
args.read(iprot)
iprot.readMessageEnd()
result = addDeviceToGroup_result()
result.success = self._handler.addDeviceToGroup(args.gid, args.device, args.key)
oprot.writeMessageBegin("addDeviceToGroup", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_addDeviceToFacebook(self, seqid, iprot, oprot):
args = addDeviceToFacebook_args()
args.read(iprot)
iprot.readMessageEnd()
result = addDeviceToFacebook_result()
result.success = self._handler.addDeviceToFacebook(args.device, args.key)
oprot.writeMessageBegin("addDeviceToFacebook", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class registerRequest_args:
"""
Attributes:
- username
- password
- name
- email
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'username', None, None, ), # 1
(2, TType.STRING, 'password', None, None, ), # 2
(3, TType.STRING, 'name', None, None, ), # 3
(4, TType.STRING, 'email', None, None, ), # 4
)
def __init__(self, username=None, password=None, name=None, email=None,):
self.username = username
self.password = password
self.name = name
self.email = email
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.username = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.password = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.email = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('registerRequest_args')
if self.username is not None:
oprot.writeFieldBegin('username', TType.STRING, 1)
oprot.writeString(self.username)
oprot.writeFieldEnd()
if self.password is not None:
oprot.writeFieldBegin('password', TType.STRING, 2)
oprot.writeString(self.password)
oprot.writeFieldEnd()
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 3)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.email is not None:
oprot.writeFieldBegin('email', TType.STRING, 4)
oprot.writeString(self.email)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class registerRequest_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('registerRequest_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class login_args:
"""
Attributes:
- username
- password
- mac
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'username', None, None, ), # 1
(2, TType.STRING, 'password', None, None, ), # 2
(3, TType.STRING, 'mac', None, None, ), # 3
)
def __init__(self, username=None, password=None, mac=None,):
self.username = username
self.password = password
self.mac = mac
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.username = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.password = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.mac = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('login_args')
if self.username is not None:
oprot.writeFieldBegin('username', TType.STRING, 1)
oprot.writeString(self.username)
oprot.writeFieldEnd()
if self.password is not None:
oprot.writeFieldBegin('password', TType.STRING, 2)
oprot.writeString(self.password)
oprot.writeFieldEnd()
if self.mac is not None:
oprot.writeFieldBegin('mac', TType.STRING, 3)
oprot.writeString(self.mac)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class login_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('login_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class logout_args:
"""
Attributes:
- mac
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'mac', None, None, ), # 1
(2, TType.STRING, 'key', None, None, ), # 2
)
def __init__(self, mac=None, key=None,):
self.mac = mac
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.mac = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('logout_args')
if self.mac is not None:
oprot.writeFieldBegin('mac', TType.STRING, 1)
oprot.writeString(self.mac)
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 2)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class logout_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('logout_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addDevice_args:
"""
Attributes:
- mac
- devicename
- certfile
- capabilities
- rsakey
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'mac', None, None, ), # 1
(2, TType.STRING, 'devicename', None, None, ), # 2
(3, TType.STRING, 'certfile', None, None, ), # 3
(4, TType.STRING, 'capabilities', None, None, ), # 4
(5, TType.STRING, 'rsakey', None, None, ), # 5
(6, TType.STRING, 'key', None, None, ), # 6
)
def __init__(self, mac=None, devicename=None, certfile=None, capabilities=None, rsakey=None, key=None,):
self.mac = mac
self.devicename = devicename
self.certfile = certfile
self.capabilities = capabilities
self.rsakey = rsakey
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.mac = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.devicename = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.certfile = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.capabilities = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.rsakey = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addDevice_args')
if self.mac is not None:
oprot.writeFieldBegin('mac', TType.STRING, 1)
oprot.writeString(self.mac)
oprot.writeFieldEnd()
if self.devicename is not None:
oprot.writeFieldBegin('devicename', TType.STRING, 2)
oprot.writeString(self.devicename)
oprot.writeFieldEnd()
if self.certfile is not None:
oprot.writeFieldBegin('certfile', TType.STRING, 3)
oprot.writeString(self.certfile)
oprot.writeFieldEnd()
if self.capabilities is not None:
oprot.writeFieldBegin('capabilities', TType.STRING, 4)
oprot.writeString(self.capabilities)
oprot.writeFieldEnd()
if self.rsakey is not None:
oprot.writeFieldBegin('rsakey', TType.STRING, 5)
oprot.writeString(self.rsakey)
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 6)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addDevice_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addDevice_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class renameDevice_args:
"""
Attributes:
- mac
- devicename
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'mac', None, None, ), # 1
(2, TType.STRING, 'devicename', None, None, ), # 2
(3, TType.STRING, 'key', None, None, ), # 3
)
def __init__(self, mac=None, devicename=None, key=None,):
self.mac = mac
self.devicename = devicename
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.mac = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.devicename = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('renameDevice_args')
if self.mac is not None:
oprot.writeFieldBegin('mac', TType.STRING, 1)
oprot.writeString(self.mac)
oprot.writeFieldEnd()
if self.devicename is not None:
oprot.writeFieldBegin('devicename', TType.STRING, 2)
oprot.writeString(self.devicename)
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 3)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class renameDevice_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('renameDevice_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class updateIp_args:
"""
Attributes:
- mac
- ip
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'mac', None, None, ), # 1
(2, TType.STRING, 'ip', None, None, ), # 2
(3, TType.STRING, 'key', None, None, ), # 3
)
def __init__(self, mac=None, ip=None, key=None,):
self.mac = mac
self.ip = ip
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.mac = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.ip = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('updateIp_args')
if self.mac is not None:
oprot.writeFieldBegin('mac', TType.STRING, 1)
oprot.writeString(self.mac)
oprot.writeFieldEnd()
if self.ip is not None:
oprot.writeFieldBegin('ip', TType.STRING, 2)
oprot.writeString(self.ip)
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 3)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class updateIp_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('updateIp_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getDevices_args:
"""
Attributes:
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', None, None, ), # 1
)
def __init__(self, key=None,):
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getDevices_args')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getDevices_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(Device, Device.thrift_spec)), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in xrange(_size7):
_elem12 = Device()
_elem12.read(iprot)
self.success.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getDevices_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter13 in self.success:
iter13.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getGroups_args:
"""
Attributes:
- friends
- key
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'friends', (TType.STRING,None), None, ), # 1
(2, TType.STRING, 'key', None, None, ), # 2
)
def __init__(self, friends=None, key=None,):
self.friends = friends
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.friends = []
(_etype17, _size14) = iprot.readListBegin()
for _i18 in xrange(_size14):
_elem19 = iprot.readString();
self.friends.append(_elem19)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getGroups_args')
if self.friends is not None:
oprot.writeFieldBegin('friends', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.friends))
for iter20 in self.friends:
oprot.writeString(iter20)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 2)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getGroups_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getGroups_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addGroup_args:
"""
Attributes:
- gname
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'gname', None, None, ), # 1
(2, TType.STRING, 'key', None, None, ), # 2
)
def __init__(self, gname=None, key=None,):
self.gname = gname
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.gname = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addGroup_args')
if self.gname is not None:
oprot.writeFieldBegin('gname', TType.STRING, 1)
oprot.writeString(self.gname)
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 2)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addGroup_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addGroup_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addUserToGroup_args:
"""
Attributes:
- gid
- username
- key
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'gid', None, None, ), # 1
(2, TType.STRING, 'username', None, None, ), # 2
(3, TType.STRING, 'key', None, None, ), # 3
)
def __init__(self, gid=None, username=None, key=None,):
self.gid = gid
self.username = username
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.gid = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.username = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addUserToGroup_args')
if self.gid is not None:
oprot.writeFieldBegin('gid', TType.I64, 1)
oprot.writeI64(self.gid)
oprot.writeFieldEnd()
if self.username is not None:
oprot.writeFieldBegin('username', TType.STRING, 2)
oprot.writeString(self.username)
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 3)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addUserToGroup_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addUserToGroup_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addDeviceToGroup_args:
"""
Attributes:
- gid
- device
- key
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'gid', None, None, ), # 1
(2, TType.STRING, 'device', None, None, ), # 2
(3, TType.STRING, 'key', None, None, ), # 3
)
def __init__(self, gid=None, device=None, key=None,):
self.gid = gid
self.device = device
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.gid = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.device = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addDeviceToGroup_args')
if self.gid is not None:
oprot.writeFieldBegin('gid', TType.I64, 1)
oprot.writeI64(self.gid)
oprot.writeFieldEnd()
if self.device is not None:
oprot.writeFieldBegin('device', TType.STRING, 2)
oprot.writeString(self.device)
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 3)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addDeviceToGroup_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addDeviceToGroup_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addDeviceToFacebook_args:
"""
Attributes:
- device
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'device', None, None, ), # 1
(2, TType.STRING, 'key', None, None, ), # 2
)
def __init__(self, device=None, key=None,):
self.device = device
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.device = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addDeviceToFacebook_args')
if self.device is not None:
oprot.writeFieldBegin('device', TType.STRING, 1)
oprot.writeString(self.device)
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 2)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addDeviceToFacebook_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addDeviceToFacebook_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| 29.890954 | 188 | 0.657118 |
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
def registerRequest(self, username, password, name, email):
pass
def login(self, username, password, mac):
pass
def logout(self, mac, key):
pass
def addDevice(self, mac, devicename, certfile, capabilities, rsakey, key):
pass
def renameDevice(self, mac, devicename, key):
pass
def updateIp(self, mac, ip, key):
pass
def getDevices(self, key):
pass
def getGroups(self, friends, key):
pass
def addGroup(self, gname, key):
pass
def addUserToGroup(self, gid, username, key):
pass
def addDeviceToGroup(self, gid, device, key):
pass
def addDeviceToFacebook(self, device, key):
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def registerRequest(self, username, password, name, email):
self.send_registerRequest(username, password, name, email)
return self.recv_registerRequest()
def send_registerRequest(self, username, password, name, email):
self._oprot.writeMessageBegin('registerRequest', TMessageType.CALL, self._seqid)
args = registerRequest_args()
args.username = username
args.password = password
args.name = name
args.email = email
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_registerRequest(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = registerRequest_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "registerRequest failed: unknown result");
def login(self, username, password, mac):
self.send_login(username, password, mac)
return self.recv_login()
def send_login(self, username, password, mac):
self._oprot.writeMessageBegin('login', TMessageType.CALL, self._seqid)
args = login_args()
args.username = username
args.password = password
args.mac = mac
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_login(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = login_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "login failed: unknown result");
def logout(self, mac, key):
self.send_logout(mac, key)
return self.recv_logout()
def send_logout(self, mac, key):
self._oprot.writeMessageBegin('logout', TMessageType.CALL, self._seqid)
args = logout_args()
args.mac = mac
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_logout(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = logout_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "logout failed: unknown result");
def addDevice(self, mac, devicename, certfile, capabilities, rsakey, key):
self.send_addDevice(mac, devicename, certfile, capabilities, rsakey, key)
return self.recv_addDevice()
def send_addDevice(self, mac, devicename, certfile, capabilities, rsakey, key):
self._oprot.writeMessageBegin('addDevice', TMessageType.CALL, self._seqid)
args = addDevice_args()
args.mac = mac
args.devicename = devicename
args.certfile = certfile
args.capabilities = capabilities
args.rsakey = rsakey
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_addDevice(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = addDevice_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "addDevice failed: unknown result");
def renameDevice(self, mac, devicename, key):
self.send_renameDevice(mac, devicename, key)
return self.recv_renameDevice()
def send_renameDevice(self, mac, devicename, key):
self._oprot.writeMessageBegin('renameDevice', TMessageType.CALL, self._seqid)
args = renameDevice_args()
args.mac = mac
args.devicename = devicename
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_renameDevice(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = renameDevice_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "renameDevice failed: unknown result");
def updateIp(self, mac, ip, key):
self.send_updateIp(mac, ip, key)
return self.recv_updateIp()
def send_updateIp(self, mac, ip, key):
self._oprot.writeMessageBegin('updateIp', TMessageType.CALL, self._seqid)
args = updateIp_args()
args.mac = mac
args.ip = ip
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_updateIp(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = updateIp_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "updateIp failed: unknown result");
def getDevices(self, key):
self.send_getDevices(key)
return self.recv_getDevices()
def send_getDevices(self, key):
self._oprot.writeMessageBegin('getDevices', TMessageType.CALL, self._seqid)
args = getDevices_args()
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getDevices(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getDevices_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getDevices failed: unknown result");
def getGroups(self, friends, key):
self.send_getGroups(friends, key)
return self.recv_getGroups()
def send_getGroups(self, friends, key):
self._oprot.writeMessageBegin('getGroups', TMessageType.CALL, self._seqid)
args = getGroups_args()
args.friends = friends
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getGroups(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getGroups_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getGroups failed: unknown result");
def addGroup(self, gname, key):
self.send_addGroup(gname, key)
return self.recv_addGroup()
def send_addGroup(self, gname, key):
self._oprot.writeMessageBegin('addGroup', TMessageType.CALL, self._seqid)
args = addGroup_args()
args.gname = gname
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_addGroup(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = addGroup_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "addGroup failed: unknown result");
def addUserToGroup(self, gid, username, key):
self.send_addUserToGroup(gid, username, key)
return self.recv_addUserToGroup()
def send_addUserToGroup(self, gid, username, key):
self._oprot.writeMessageBegin('addUserToGroup', TMessageType.CALL, self._seqid)
args = addUserToGroup_args()
args.gid = gid
args.username = username
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_addUserToGroup(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = addUserToGroup_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "addUserToGroup failed: unknown result");
def addDeviceToGroup(self, gid, device, key):
self.send_addDeviceToGroup(gid, device, key)
return self.recv_addDeviceToGroup()
def send_addDeviceToGroup(self, gid, device, key):
self._oprot.writeMessageBegin('addDeviceToGroup', TMessageType.CALL, self._seqid)
args = addDeviceToGroup_args()
args.gid = gid
args.device = device
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_addDeviceToGroup(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = addDeviceToGroup_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "addDeviceToGroup failed: unknown result");
def addDeviceToFacebook(self, device, key):
self.send_addDeviceToFacebook(device, key)
return self.recv_addDeviceToFacebook()
def send_addDeviceToFacebook(self, device, key):
self._oprot.writeMessageBegin('addDeviceToFacebook', TMessageType.CALL, self._seqid)
args = addDeviceToFacebook_args()
args.device = device
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_addDeviceToFacebook(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = addDeviceToFacebook_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "addDeviceToFacebook failed: unknown result");
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["registerRequest"] = Processor.process_registerRequest
self._processMap["login"] = Processor.process_login
self._processMap["logout"] = Processor.process_logout
self._processMap["addDevice"] = Processor.process_addDevice
self._processMap["renameDevice"] = Processor.process_renameDevice
self._processMap["updateIp"] = Processor.process_updateIp
self._processMap["getDevices"] = Processor.process_getDevices
self._processMap["getGroups"] = Processor.process_getGroups
self._processMap["addGroup"] = Processor.process_addGroup
self._processMap["addUserToGroup"] = Processor.process_addUserToGroup
self._processMap["addDeviceToGroup"] = Processor.process_addDeviceToGroup
self._processMap["addDeviceToFacebook"] = Processor.process_addDeviceToFacebook
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_registerRequest(self, seqid, iprot, oprot):
args = registerRequest_args()
args.read(iprot)
iprot.readMessageEnd()
result = registerRequest_result()
result.success = self._handler.registerRequest(args.username, args.password, args.name, args.email)
oprot.writeMessageBegin("registerRequest", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_login(self, seqid, iprot, oprot):
args = login_args()
args.read(iprot)
iprot.readMessageEnd()
result = login_result()
result.success = self._handler.login(args.username, args.password, args.mac)
oprot.writeMessageBegin("login", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_logout(self, seqid, iprot, oprot):
args = logout_args()
args.read(iprot)
iprot.readMessageEnd()
result = logout_result()
result.success = self._handler.logout(args.mac, args.key)
oprot.writeMessageBegin("logout", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_addDevice(self, seqid, iprot, oprot):
args = addDevice_args()
args.read(iprot)
iprot.readMessageEnd()
result = addDevice_result()
result.success = self._handler.addDevice(args.mac, args.devicename, args.certfile, args.capabilities, args.rsakey, args.key)
oprot.writeMessageBegin("addDevice", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_renameDevice(self, seqid, iprot, oprot):
args = renameDevice_args()
args.read(iprot)
iprot.readMessageEnd()
result = renameDevice_result()
result.success = self._handler.renameDevice(args.mac, args.devicename, args.key)
oprot.writeMessageBegin("renameDevice", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_updateIp(self, seqid, iprot, oprot):
args = updateIp_args()
args.read(iprot)
iprot.readMessageEnd()
result = updateIp_result()
result.success = self._handler.updateIp(args.mac, args.ip, args.key)
oprot.writeMessageBegin("updateIp", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getDevices(self, seqid, iprot, oprot):
args = getDevices_args()
args.read(iprot)
iprot.readMessageEnd()
result = getDevices_result()
result.success = self._handler.getDevices(args.key)
oprot.writeMessageBegin("getDevices", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getGroups(self, seqid, iprot, oprot):
args = getGroups_args()
args.read(iprot)
iprot.readMessageEnd()
result = getGroups_result()
result.success = self._handler.getGroups(args.friends, args.key)
oprot.writeMessageBegin("getGroups", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_addGroup(self, seqid, iprot, oprot):
args = addGroup_args()
args.read(iprot)
iprot.readMessageEnd()
result = addGroup_result()
result.success = self._handler.addGroup(args.gname, args.key)
oprot.writeMessageBegin("addGroup", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_addUserToGroup(self, seqid, iprot, oprot):
args = addUserToGroup_args()
args.read(iprot)
iprot.readMessageEnd()
result = addUserToGroup_result()
result.success = self._handler.addUserToGroup(args.gid, args.username, args.key)
oprot.writeMessageBegin("addUserToGroup", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_addDeviceToGroup(self, seqid, iprot, oprot):
args = addDeviceToGroup_args()
args.read(iprot)
iprot.readMessageEnd()
result = addDeviceToGroup_result()
result.success = self._handler.addDeviceToGroup(args.gid, args.device, args.key)
oprot.writeMessageBegin("addDeviceToGroup", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_addDeviceToFacebook(self, seqid, iprot, oprot):
args = addDeviceToFacebook_args()
args.read(iprot)
iprot.readMessageEnd()
result = addDeviceToFacebook_result()
result.success = self._handler.addDeviceToFacebook(args.device, args.key)
oprot.writeMessageBegin("addDeviceToFacebook", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
class registerRequest_args:
thrift_spec = (
None,
(1, TType.STRING, 'username', None, None, ),
(2, TType.STRING, 'password', None, None, ),
(3, TType.STRING, 'name', None, None, ),
(4, TType.STRING, 'email', None, None, ),
)
def __init__(self, username=None, password=None, name=None, email=None,):
self.username = username
self.password = password
self.name = name
self.email = email
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.username = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.password = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.email = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('registerRequest_args')
if self.username is not None:
oprot.writeFieldBegin('username', TType.STRING, 1)
oprot.writeString(self.username)
oprot.writeFieldEnd()
if self.password is not None:
oprot.writeFieldBegin('password', TType.STRING, 2)
oprot.writeString(self.password)
oprot.writeFieldEnd()
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 3)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.email is not None:
oprot.writeFieldBegin('email', TType.STRING, 4)
oprot.writeString(self.email)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class registerRequest_result:
thrift_spec = (
(0, TType.I32, 'success', None, None, ),
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('registerRequest_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class login_args:
thrift_spec = (
None,
(1, TType.STRING, 'username', None, None, ),
(2, TType.STRING, 'password', None, None, ),
(3, TType.STRING, 'mac', None, None, ),
)
def __init__(self, username=None, password=None, mac=None,):
self.username = username
self.password = password
self.mac = mac
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.username = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.password = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.mac = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('login_args')
if self.username is not None:
oprot.writeFieldBegin('username', TType.STRING, 1)
oprot.writeString(self.username)
oprot.writeFieldEnd()
if self.password is not None:
oprot.writeFieldBegin('password', TType.STRING, 2)
oprot.writeString(self.password)
oprot.writeFieldEnd()
if self.mac is not None:
oprot.writeFieldBegin('mac', TType.STRING, 3)
oprot.writeString(self.mac)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class login_result:
thrift_spec = (
(0, TType.STRING, 'success', None, None, ),
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('login_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class logout_args:
thrift_spec = (
None,
(1, TType.STRING, 'mac', None, None, ),
(2, TType.STRING, 'key', None, None, ),
)
def __init__(self, mac=None, key=None,):
self.mac = mac
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.mac = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('logout_args')
if self.mac is not None:
oprot.writeFieldBegin('mac', TType.STRING, 1)
oprot.writeString(self.mac)
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 2)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class logout_result:
thrift_spec = (
(0, TType.I32, 'success', None, None, ),
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('logout_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addDevice_args:
thrift_spec = (
None,
(1, TType.STRING, 'mac', None, None, ),
(2, TType.STRING, 'devicename', None, None, ),
(3, TType.STRING, 'certfile', None, None, ),
(4, TType.STRING, 'capabilities', None, None, ),
(5, TType.STRING, 'rsakey', None, None, ),
(6, TType.STRING, 'key', None, None, ),
)
def __init__(self, mac=None, devicename=None, certfile=None, capabilities=None, rsakey=None, key=None,):
self.mac = mac
self.devicename = devicename
self.certfile = certfile
self.capabilities = capabilities
self.rsakey = rsakey
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.mac = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.devicename = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.certfile = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.capabilities = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.rsakey = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addDevice_args')
if self.mac is not None:
oprot.writeFieldBegin('mac', TType.STRING, 1)
oprot.writeString(self.mac)
oprot.writeFieldEnd()
if self.devicename is not None:
oprot.writeFieldBegin('devicename', TType.STRING, 2)
oprot.writeString(self.devicename)
oprot.writeFieldEnd()
if self.certfile is not None:
oprot.writeFieldBegin('certfile', TType.STRING, 3)
oprot.writeString(self.certfile)
oprot.writeFieldEnd()
if self.capabilities is not None:
oprot.writeFieldBegin('capabilities', TType.STRING, 4)
oprot.writeString(self.capabilities)
oprot.writeFieldEnd()
if self.rsakey is not None:
oprot.writeFieldBegin('rsakey', TType.STRING, 5)
oprot.writeString(self.rsakey)
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 6)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addDevice_result:
thrift_spec = (
(0, TType.I32, 'success', None, None, ),
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addDevice_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class renameDevice_args:
thrift_spec = (
None,
(1, TType.STRING, 'mac', None, None, ),
(2, TType.STRING, 'devicename', None, None, ),
(3, TType.STRING, 'key', None, None, ),
)
def __init__(self, mac=None, devicename=None, key=None,):
self.mac = mac
self.devicename = devicename
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.mac = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.devicename = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('renameDevice_args')
if self.mac is not None:
oprot.writeFieldBegin('mac', TType.STRING, 1)
oprot.writeString(self.mac)
oprot.writeFieldEnd()
if self.devicename is not None:
oprot.writeFieldBegin('devicename', TType.STRING, 2)
oprot.writeString(self.devicename)
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 3)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class renameDevice_result:
thrift_spec = (
(0, TType.I32, 'success', None, None, ),
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('renameDevice_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class updateIp_args:
thrift_spec = (
None,
(1, TType.STRING, 'mac', None, None, ),
(2, TType.STRING, 'ip', None, None, ),
(3, TType.STRING, 'key', None, None, ),
)
def __init__(self, mac=None, ip=None, key=None,):
self.mac = mac
self.ip = ip
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.mac = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.ip = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('updateIp_args')
if self.mac is not None:
oprot.writeFieldBegin('mac', TType.STRING, 1)
oprot.writeString(self.mac)
oprot.writeFieldEnd()
if self.ip is not None:
oprot.writeFieldBegin('ip', TType.STRING, 2)
oprot.writeString(self.ip)
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 3)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class updateIp_result:
thrift_spec = (
(0, TType.I32, 'success', None, None, ),
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('updateIp_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getDevices_args:
thrift_spec = (
None,
(1, TType.STRING, 'key', None, None, ),
)
def __init__(self, key=None,):
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getDevices_args')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getDevices_result:
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(Device, Device.thrift_spec)), None, ),
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in xrange(_size7):
_elem12 = Device()
_elem12.read(iprot)
self.success.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getDevices_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter13 in self.success:
iter13.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getGroups_args:
thrift_spec = (
None,
(1, TType.LIST, 'friends', (TType.STRING,None), None, ),
(2, TType.STRING, 'key', None, None, ),
)
def __init__(self, friends=None, key=None,):
self.friends = friends
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.friends = []
(_etype17, _size14) = iprot.readListBegin()
for _i18 in xrange(_size14):
_elem19 = iprot.readString();
self.friends.append(_elem19)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getGroups_args')
if self.friends is not None:
oprot.writeFieldBegin('friends', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.friends))
for iter20 in self.friends:
oprot.writeString(iter20)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 2)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getGroups_result:
thrift_spec = (
(0, TType.STRING, 'success', None, None, ),
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getGroups_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addGroup_args:
thrift_spec = (
None,
(1, TType.STRING, 'gname', None, None, ),
(2, TType.STRING, 'key', None, None, ),
)
def __init__(self, gname=None, key=None,):
self.gname = gname
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.gname = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addGroup_args')
if self.gname is not None:
oprot.writeFieldBegin('gname', TType.STRING, 1)
oprot.writeString(self.gname)
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 2)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addGroup_result:
thrift_spec = (
(0, TType.I32, 'success', None, None, ),
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addGroup_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addUserToGroup_args:
thrift_spec = (
None,
(1, TType.I64, 'gid', None, None, ),
(2, TType.STRING, 'username', None, None, ),
(3, TType.STRING, 'key', None, None, ),
)
def __init__(self, gid=None, username=None, key=None,):
self.gid = gid
self.username = username
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.gid = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.username = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addUserToGroup_args')
if self.gid is not None:
oprot.writeFieldBegin('gid', TType.I64, 1)
oprot.writeI64(self.gid)
oprot.writeFieldEnd()
if self.username is not None:
oprot.writeFieldBegin('username', TType.STRING, 2)
oprot.writeString(self.username)
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 3)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addUserToGroup_result:
thrift_spec = (
(0, TType.I32, 'success', None, None, ),
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addUserToGroup_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addDeviceToGroup_args:
thrift_spec = (
None,
(1, TType.I64, 'gid', None, None, ),
(2, TType.STRING, 'device', None, None, ),
(3, TType.STRING, 'key', None, None, ),
)
def __init__(self, gid=None, device=None, key=None,):
self.gid = gid
self.device = device
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.gid = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.device = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addDeviceToGroup_args')
if self.gid is not None:
oprot.writeFieldBegin('gid', TType.I64, 1)
oprot.writeI64(self.gid)
oprot.writeFieldEnd()
if self.device is not None:
oprot.writeFieldBegin('device', TType.STRING, 2)
oprot.writeString(self.device)
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 3)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addDeviceToGroup_result:
thrift_spec = (
(0, TType.I32, 'success', None, None, ),
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addDeviceToGroup_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addDeviceToFacebook_args:
thrift_spec = (
None,
(1, TType.STRING, 'device', None, None, ),
(2, TType.STRING, 'key', None, None, ),
)
def __init__(self, device=None, key=None,):
self.device = device
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.device = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addDeviceToFacebook_args')
if self.device is not None:
oprot.writeFieldBegin('device', TType.STRING, 1)
oprot.writeString(self.device)
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 2)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addDeviceToFacebook_result:
thrift_spec = (
(0, TType.I32, 'success', None, None, ),
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addDeviceToFacebook_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| true | true |
1c387240f490f241b3ff3d21da0f3d7bafabda5d | 1,659 | py | Python | day15/day15.py | protocol114/AdventOfCode | eec7eed81cf9f096e2dce9a9a39f5e9890a3deaf | [
"MIT"
] | null | null | null | day15/day15.py | protocol114/AdventOfCode | eec7eed81cf9f096e2dce9a9a39f5e9890a3deaf | [
"MIT"
] | null | null | null | day15/day15.py | protocol114/AdventOfCode | eec7eed81cf9f096e2dce9a9a39f5e9890a3deaf | [
"MIT"
] | null | null | null | f = open("input.txt", "r")
ingredients = []
for line in f:
temp = []
_, _, capacity, _, durability, _, flavor, _, texture, _, calories = line.split()
temp.append(int(capacity.strip(',')))
temp.append(int(durability.strip(',')))
temp.append(int(flavor.strip(',')))
temp.append(int(texture.strip(',')))
temp.append(int(calories.strip(',')))
ingredients.append(temp)
highestScore = 0
highest500Cal = 0
for i in range(0, 100):
for j in range(0, 100 - i):
for k in range(0, 100 - i - j):
h = 100 - i - j - k
capScore = (ingredients[0][0] * i) + (ingredients[1][0] * j) + (ingredients[2][0] * k) + (ingredients[3][0] * h)
durScore = (ingredients[0][1] * i) + (ingredients[1][1] * j) + (ingredients[2][1] * k) + (ingredients[3][1] * h)
flvScore = (ingredients[0][2] * i) + (ingredients[1][2] * j) + (ingredients[2][2] * k) + (ingredients[3][2] * h)
texScore = (ingredients[0][3] * i) + (ingredients[1][3] * j) + (ingredients[2][3] * k) + (ingredients[3][3] * h)
calories = (ingredients[0][4] * i) + (ingredients[1][4] * j) + (ingredients[2][4] * k) + (ingredients[3][4] * h)
if capScore <= 0 or durScore <= 0 or flvScore <= 0:
continue
score = capScore * durScore * flvScore * texScore
if calories == 500:
if score > highest500Cal:
highest500Cal = score
else:
if score > highestScore:
highestScore = score
print("Highest score possible:", highestScore)
print("Highest 500 calorie cookie:", highest500Cal)
| 38.581395 | 124 | 0.537673 | f = open("input.txt", "r")
ingredients = []
for line in f:
temp = []
_, _, capacity, _, durability, _, flavor, _, texture, _, calories = line.split()
temp.append(int(capacity.strip(',')))
temp.append(int(durability.strip(',')))
temp.append(int(flavor.strip(',')))
temp.append(int(texture.strip(',')))
temp.append(int(calories.strip(',')))
ingredients.append(temp)
highestScore = 0
highest500Cal = 0
for i in range(0, 100):
for j in range(0, 100 - i):
for k in range(0, 100 - i - j):
h = 100 - i - j - k
capScore = (ingredients[0][0] * i) + (ingredients[1][0] * j) + (ingredients[2][0] * k) + (ingredients[3][0] * h)
durScore = (ingredients[0][1] * i) + (ingredients[1][1] * j) + (ingredients[2][1] * k) + (ingredients[3][1] * h)
flvScore = (ingredients[0][2] * i) + (ingredients[1][2] * j) + (ingredients[2][2] * k) + (ingredients[3][2] * h)
texScore = (ingredients[0][3] * i) + (ingredients[1][3] * j) + (ingredients[2][3] * k) + (ingredients[3][3] * h)
calories = (ingredients[0][4] * i) + (ingredients[1][4] * j) + (ingredients[2][4] * k) + (ingredients[3][4] * h)
if capScore <= 0 or durScore <= 0 or flvScore <= 0:
continue
score = capScore * durScore * flvScore * texScore
if calories == 500:
if score > highest500Cal:
highest500Cal = score
else:
if score > highestScore:
highestScore = score
print("Highest score possible:", highestScore)
print("Highest 500 calorie cookie:", highest500Cal)
| true | true |
1c38728a1651dbc1726420ff21bcd15bdfb63d88 | 1,070 | py | Python | images/models.py | lightguy875/Django_social_media | 11d9faf4dde4b1a3b74ad71c0d9f53d3e630c103 | [
"Apache-2.0"
] | 1 | 2020-08-10T13:03:13.000Z | 2020-08-10T13:03:13.000Z | images/models.py | lightguy875/Django_social_media | 11d9faf4dde4b1a3b74ad71c0d9f53d3e630c103 | [
"Apache-2.0"
] | 7 | 2021-06-04T23:50:18.000Z | 2022-03-12T00:45:48.000Z | images/models.py | lightguy875/Django_social_media | 11d9faf4dde4b1a3b74ad71c0d9f53d3e630c103 | [
"Apache-2.0"
] | null | null | null | from django.db import models
from django.conf import settings
from django.utils.text import slugify
from django.urls import reverse
# Create your models here.
class Image(models.Model):
users_like = models.ManyToManyField(settings.AUTH_USER_MODEL,related_name='images_liked',blank=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='images_created',on_delete=models.CASCADE)
title = models.CharField(max_length=200)
slug = models.SlugField(max_length=200,blank=True)
url = models.URLField()
image = models.ImageField(upload_to='images/%Y/%m/%d/')
description = models.TextField(blank=True)
created = models.DateField(auto_now_add=True,db_index=True)
total_likes = models.PositiveIntegerField(db_index=True,default = 0)
def __str__(self):
return self.title
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.title)
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse('images:detail', args=[self.id, self.slug])
| 39.62963 | 110 | 0.723364 | from django.db import models
from django.conf import settings
from django.utils.text import slugify
from django.urls import reverse
class Image(models.Model):
users_like = models.ManyToManyField(settings.AUTH_USER_MODEL,related_name='images_liked',blank=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='images_created',on_delete=models.CASCADE)
title = models.CharField(max_length=200)
slug = models.SlugField(max_length=200,blank=True)
url = models.URLField()
image = models.ImageField(upload_to='images/%Y/%m/%d/')
description = models.TextField(blank=True)
created = models.DateField(auto_now_add=True,db_index=True)
total_likes = models.PositiveIntegerField(db_index=True,default = 0)
def __str__(self):
return self.title
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.title)
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse('images:detail', args=[self.id, self.slug])
| true | true |
1c38751248735d1b99301b3fdab6685abfcc16e0 | 15,654 | py | Python | experimental/language_structure/psl/psl_model_multiwoz_test.py | jaeikjeon9919/uncertainty-baselines | 15aad70bb585452d84c0afa74208338f5db5f70e | [
"Apache-2.0"
] | null | null | null | experimental/language_structure/psl/psl_model_multiwoz_test.py | jaeikjeon9919/uncertainty-baselines | 15aad70bb585452d84c0afa74208338f5db5f70e | [
"Apache-2.0"
] | null | null | null | experimental/language_structure/psl/psl_model_multiwoz_test.py | jaeikjeon9919/uncertainty-baselines | 15aad70bb585452d84c0afa74208338f5db5f70e | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for MultiWoz rules."""
import tensorflow as tf
import constrained_evaluation as eval_model # local file import from experimental.language_structure.psl
import data # local file import from experimental.language_structure.psl
import psl_model_multiwoz as model # local file import from experimental.language_structure.psl
import psl_model_multiwoz_test_util as test_util # local file import from experimental.language_structure.psl
class PslRulesTest(tf.test.TestCase):
def setUp(self):
super(PslRulesTest, self).setUp()
self.config = test_util.TEST_MULTIWOZ_CONFIG
self.data = test_util.DATA
tf.random.set_seed(self.config['default_seed'])
train_dialogs = data.add_features(
self.data['train_data'],
vocab_mapping=self.data['vocab_mapping'],
accept_words=self.config['accept_words'],
cancel_words=self.config['cancel_words'],
end_words=self.config['end_words'],
greet_words=self.config['greet_words'],
info_question_words=self.config['info_question_words'],
insist_words=self.config['insist_words'],
slot_question_words=self.config['slot_question_words'],
includes_word=self.config['includes_word'],
excludes_word=self.config['excludes_word'],
accept_index=self.config['accept_index'],
cancel_index=self.config['cancel_index'],
end_index=self.config['end_index'],
greet_index=self.config['greet_index'],
info_question_index=self.config['info_question_index'],
insist_index=self.config['insist_index'],
slot_question_index=self.config['slot_question_index'],
utterance_mask=self.config['utterance_mask'],
pad_utterance_mask=self.config['pad_utterance_mask'],
last_utterance_mask=self.config['last_utterance_mask'],
mask_index=self.config['mask_index'])
train_data = data.pad_dialogs(train_dialogs, self.config['max_dialog_size'],
self.config['max_utterance_size'])
raw_train_labels = data.one_hot_string_encoding(self.data['train_labels'],
self.config['class_map'])
train_labels = data.pad_one_hot_labels(raw_train_labels,
self.config['max_dialog_size'],
self.config['class_map'])
self.train_ds = data.list_to_dataset(train_data[0], train_labels[0],
self.config['shuffle_train'],
self.config['batch_size'])
test_dialogs = data.add_features(
self.data['test_data'],
vocab_mapping=self.data['vocab_mapping'],
accept_words=self.config['accept_words'],
cancel_words=self.config['cancel_words'],
end_words=self.config['end_words'],
greet_words=self.config['greet_words'],
info_question_words=self.config['info_question_words'],
insist_words=self.config['insist_words'],
slot_question_words=self.config['slot_question_words'],
includes_word=self.config['includes_word'],
excludes_word=self.config['excludes_word'],
accept_index=self.config['accept_index'],
cancel_index=self.config['cancel_index'],
end_index=self.config['end_index'],
greet_index=self.config['greet_index'],
info_question_index=self.config['info_question_index'],
insist_index=self.config['insist_index'],
slot_question_index=self.config['slot_question_index'],
utterance_mask=self.config['utterance_mask'],
pad_utterance_mask=self.config['pad_utterance_mask'],
last_utterance_mask=self.config['last_utterance_mask'],
mask_index=self.config['mask_index'])
test_data = data.pad_dialogs(test_dialogs, self.config['max_dialog_size'],
self.config['max_utterance_size'])
raw_test_labels = data.one_hot_string_encoding(self.data['test_labels'],
self.config['class_map'])
self.test_labels = data.pad_one_hot_labels(raw_test_labels,
self.config['max_dialog_size'],
self.config['class_map'])
self.test_ds = data.list_to_dataset(test_data[0], self.test_labels[0],
self.config['shuffle_test'],
self.config['batch_size'])
def check_greet(self, predictions, mask, class_map):
for dialog_pred, dialog_mask in zip(predictions, mask):
first = True
for utterance_pred, utterance_mask in zip(dialog_pred, dialog_mask):
if first or utterance_mask == 0:
first = False
continue
if utterance_pred == class_map['greet']:
return False
return True
def test_psl_rule_1_run_model(self):
rule_weights = (1.0,)
rule_names = ('rule_1',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
result = self.check_greet(predictions, self.test_labels[1],
self.config['class_map'])
self.assertTrue(result)
def test_psl_rule_1(self):
rule_weights = (1.0,)
rule_names = ('rule_1',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_1(logits=tf.constant(logits))
self.assertEqual(loss, 1.4)
def test_psl_rule_2_run_model(self):
rule_weights = (10.0,)
rule_names = ('rule_2',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
self.assertEqual(predictions[2][0], self.config['class_map']['greet'])
self.assertEqual(predictions[3][0], self.config['class_map']['greet'])
def test_psl_rule_2(self):
rule_weights = (1.0,)
rule_names = ('rule_2',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_2(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertEqual(loss, 0.6)
def test_psl_rule_3_run_model(self):
rule_weights = (1.0,)
rule_names = ('rule_3',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
self.assertEqual(predictions[0][0],
self.config['class_map']['init_request'])
self.assertEqual(predictions[1][0],
self.config['class_map']['init_request'])
def test_psl_rule_3(self):
rule_weights = (1.0,)
rule_names = ('rule_3',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_3(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertEqual(loss, 0.8)
def test_psl_rule_4_run_model(self):
rule_weights = (1.0,)
rule_names = ('rule_4',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
self.assertEqual(predictions[1][1],
self.config['class_map']['second_request'])
self.assertEqual(predictions[2][1],
self.config['class_map']['second_request'])
def test_psl_rule_4(self):
rule_weights = (1.0,)
rule_names = ('rule_4',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_4(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 1.8, err=1e-6)
def test_psl_rule_5_run_model(self):
rule_weights = (1.0,)
rule_names = ('rule_5',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
self.assertNotEqual(predictions[1][1],
self.config['class_map']['init_request'])
self.assertNotEqual(predictions[2][1],
self.config['class_map']['init_request'])
def test_psl_rule_5(self):
rule_weights = (1.0,)
rule_names = ('rule_5',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_5(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 1.4, err=1e-6)
def test_psl_rule_6_run_model(self):
rule_weights = (1.0,)
rule_names = ('rule_6',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
self.assertNotEqual(predictions[1][0], self.config['class_map']['greet'])
self.assertNotEqual(predictions[2][0], self.config['class_map']['greet'])
def test_psl_rule_6(self):
rule_weights = (1.0,)
rule_names = ('rule_6',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_6(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 1.4, err=1e-6)
def test_psl_rule_7_run_model(self):
rule_weights = (1.0,)
rule_names = ('rule_7',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
self.assertEqual(predictions[1][2], self.config['class_map']['end'])
self.assertEqual(predictions[2][3], self.config['class_map']['end'])
def test_psl_rule_7(self):
rule_weights = (1.0,)
rule_names = ('rule_7',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_7(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 1.1, err=1e-6)
def test_psl_rule_8(self):
rule_weights = (1.0,)
rule_names = ('rule_8',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_8(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 0.9, err=1e-6)
def test_psl_rule_9(self):
rule_weights = (1.0,)
rule_names = ('rule_9',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_9(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 0.8, err=1e-6)
def test_psl_rule_10(self):
rule_weights = (1.0,)
rule_names = ('rule_10',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_10(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 0.3, err=1e-6)
def test_psl_rule_11(self):
rule_weights = (1.0,)
rule_names = ('rule_11',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_11(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 0.7, err=1e-6)
def test_psl_rule_12(self):
rule_weights = (1.0,)
rule_names = ('rule_12',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_12(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 0.1, err=1e-6)
if __name__ == '__main__':
tf.test.main()
| 41.855615 | 110 | 0.654401 |
import tensorflow as tf
import constrained_evaluation as eval_model
import data
import psl_model_multiwoz as model
import psl_model_multiwoz_test_util as test_util
class PslRulesTest(tf.test.TestCase):
def setUp(self):
super(PslRulesTest, self).setUp()
self.config = test_util.TEST_MULTIWOZ_CONFIG
self.data = test_util.DATA
tf.random.set_seed(self.config['default_seed'])
train_dialogs = data.add_features(
self.data['train_data'],
vocab_mapping=self.data['vocab_mapping'],
accept_words=self.config['accept_words'],
cancel_words=self.config['cancel_words'],
end_words=self.config['end_words'],
greet_words=self.config['greet_words'],
info_question_words=self.config['info_question_words'],
insist_words=self.config['insist_words'],
slot_question_words=self.config['slot_question_words'],
includes_word=self.config['includes_word'],
excludes_word=self.config['excludes_word'],
accept_index=self.config['accept_index'],
cancel_index=self.config['cancel_index'],
end_index=self.config['end_index'],
greet_index=self.config['greet_index'],
info_question_index=self.config['info_question_index'],
insist_index=self.config['insist_index'],
slot_question_index=self.config['slot_question_index'],
utterance_mask=self.config['utterance_mask'],
pad_utterance_mask=self.config['pad_utterance_mask'],
last_utterance_mask=self.config['last_utterance_mask'],
mask_index=self.config['mask_index'])
train_data = data.pad_dialogs(train_dialogs, self.config['max_dialog_size'],
self.config['max_utterance_size'])
raw_train_labels = data.one_hot_string_encoding(self.data['train_labels'],
self.config['class_map'])
train_labels = data.pad_one_hot_labels(raw_train_labels,
self.config['max_dialog_size'],
self.config['class_map'])
self.train_ds = data.list_to_dataset(train_data[0], train_labels[0],
self.config['shuffle_train'],
self.config['batch_size'])
test_dialogs = data.add_features(
self.data['test_data'],
vocab_mapping=self.data['vocab_mapping'],
accept_words=self.config['accept_words'],
cancel_words=self.config['cancel_words'],
end_words=self.config['end_words'],
greet_words=self.config['greet_words'],
info_question_words=self.config['info_question_words'],
insist_words=self.config['insist_words'],
slot_question_words=self.config['slot_question_words'],
includes_word=self.config['includes_word'],
excludes_word=self.config['excludes_word'],
accept_index=self.config['accept_index'],
cancel_index=self.config['cancel_index'],
end_index=self.config['end_index'],
greet_index=self.config['greet_index'],
info_question_index=self.config['info_question_index'],
insist_index=self.config['insist_index'],
slot_question_index=self.config['slot_question_index'],
utterance_mask=self.config['utterance_mask'],
pad_utterance_mask=self.config['pad_utterance_mask'],
last_utterance_mask=self.config['last_utterance_mask'],
mask_index=self.config['mask_index'])
test_data = data.pad_dialogs(test_dialogs, self.config['max_dialog_size'],
self.config['max_utterance_size'])
raw_test_labels = data.one_hot_string_encoding(self.data['test_labels'],
self.config['class_map'])
self.test_labels = data.pad_one_hot_labels(raw_test_labels,
self.config['max_dialog_size'],
self.config['class_map'])
self.test_ds = data.list_to_dataset(test_data[0], self.test_labels[0],
self.config['shuffle_test'],
self.config['batch_size'])
def check_greet(self, predictions, mask, class_map):
for dialog_pred, dialog_mask in zip(predictions, mask):
first = True
for utterance_pred, utterance_mask in zip(dialog_pred, dialog_mask):
if first or utterance_mask == 0:
first = False
continue
if utterance_pred == class_map['greet']:
return False
return True
def test_psl_rule_1_run_model(self):
rule_weights = (1.0,)
rule_names = ('rule_1',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
result = self.check_greet(predictions, self.test_labels[1],
self.config['class_map'])
self.assertTrue(result)
def test_psl_rule_1(self):
rule_weights = (1.0,)
rule_names = ('rule_1',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_1(logits=tf.constant(logits))
self.assertEqual(loss, 1.4)
def test_psl_rule_2_run_model(self):
rule_weights = (10.0,)
rule_names = ('rule_2',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
self.assertEqual(predictions[2][0], self.config['class_map']['greet'])
self.assertEqual(predictions[3][0], self.config['class_map']['greet'])
def test_psl_rule_2(self):
rule_weights = (1.0,)
rule_names = ('rule_2',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_2(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertEqual(loss, 0.6)
def test_psl_rule_3_run_model(self):
rule_weights = (1.0,)
rule_names = ('rule_3',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
self.assertEqual(predictions[0][0],
self.config['class_map']['init_request'])
self.assertEqual(predictions[1][0],
self.config['class_map']['init_request'])
def test_psl_rule_3(self):
rule_weights = (1.0,)
rule_names = ('rule_3',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_3(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertEqual(loss, 0.8)
def test_psl_rule_4_run_model(self):
rule_weights = (1.0,)
rule_names = ('rule_4',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
self.assertEqual(predictions[1][1],
self.config['class_map']['second_request'])
self.assertEqual(predictions[2][1],
self.config['class_map']['second_request'])
def test_psl_rule_4(self):
rule_weights = (1.0,)
rule_names = ('rule_4',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_4(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 1.8, err=1e-6)
def test_psl_rule_5_run_model(self):
rule_weights = (1.0,)
rule_names = ('rule_5',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
self.assertNotEqual(predictions[1][1],
self.config['class_map']['init_request'])
self.assertNotEqual(predictions[2][1],
self.config['class_map']['init_request'])
def test_psl_rule_5(self):
rule_weights = (1.0,)
rule_names = ('rule_5',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_5(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 1.4, err=1e-6)
def test_psl_rule_6_run_model(self):
rule_weights = (1.0,)
rule_names = ('rule_6',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
self.assertNotEqual(predictions[1][0], self.config['class_map']['greet'])
self.assertNotEqual(predictions[2][0], self.config['class_map']['greet'])
def test_psl_rule_6(self):
rule_weights = (1.0,)
rule_names = ('rule_6',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_6(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 1.4, err=1e-6)
def test_psl_rule_7_run_model(self):
rule_weights = (1.0,)
rule_names = ('rule_7',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
constrained_model = test_util.build_constrained_model(
[self.config['max_dialog_size'], self.config['max_utterance_size']])
constrained_model.fit(self.train_ds, epochs=self.config['train_epochs'])
logits = eval_model.evaluate_constrained_model(constrained_model,
self.test_ds,
psl_constraints)
predictions = tf.math.argmax(logits[0], axis=-1)
self.assertEqual(predictions[1][2], self.config['class_map']['end'])
self.assertEqual(predictions[2][3], self.config['class_map']['end'])
def test_psl_rule_7(self):
rule_weights = (1.0,)
rule_names = ('rule_7',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_7(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 1.1, err=1e-6)
def test_psl_rule_8(self):
rule_weights = (1.0,)
rule_names = ('rule_8',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_8(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 0.9, err=1e-6)
def test_psl_rule_9(self):
rule_weights = (1.0,)
rule_names = ('rule_9',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_9(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 0.8, err=1e-6)
def test_psl_rule_10(self):
rule_weights = (1.0,)
rule_names = ('rule_10',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_10(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 0.3, err=1e-6)
def test_psl_rule_11(self):
rule_weights = (1.0,)
rule_names = ('rule_11',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_11(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 0.7, err=1e-6)
def test_psl_rule_12(self):
rule_weights = (1.0,)
rule_names = ('rule_12',)
psl_constraints = model.PSLModelMultiWoZ(
rule_weights, rule_names, config=self.config)
logits = test_util.LOGITS
loss = psl_constraints.rule_12(
logits=tf.constant(logits), data=test_util.FEATURES)
self.assertNear(loss, 0.1, err=1e-6)
if __name__ == '__main__':
tf.test.main()
| true | true |
1c3875240b88f854a299beba1c25846f720cfbcb | 6,041 | py | Python | Duelist_Algorithm.py | ebruyasar11/Duelist-Algorithm | d740e0a26a33a230434441b5bcf67b1e7a7c5fd2 | [
"MIT"
] | 1 | 2021-06-02T15:34:00.000Z | 2021-06-02T15:34:00.000Z | Duelist_Algorithm.py | ebruyasar11/Duelist-Algorithm | d740e0a26a33a230434441b5bcf67b1e7a7c5fd2 | [
"MIT"
] | null | null | null | Duelist_Algorithm.py | ebruyasar11/Duelist-Algorithm | d740e0a26a33a230434441b5bcf67b1e7a7c5fd2 | [
"MIT"
] | null | null | null | import numpy as np
import random
import time
import matplotlib.pyplot as mp
class Duelist_Algorithm():
def __init__(self,f,x,altdeger,ustdeger,pop=200,sans=0.01,mutasyon=0.1,ogren=0.8,iterasyon=500,nc=5,karistir=False):
#Sınıf değişkenlerinin tanımlamaları
self.f = f
self.x = x
self.altdeger = altdeger
self.ustdeger = ustdeger
self.populasyon = pop
self.sans = sans
self.mutasyon = mutasyon
self.ogren = ogren
self.max_iterasyon = iterasyon
self.nc = nc
self.sampiyon = np.empty((x.__len__()+1,nc),dtype=np.float64)
self.kazan_kaybet = np.empty((pop,1),dtype=np.float64)
self.savas_puani = np.empty((pop,1),dtype=np.float64)
self.iterasyon = 0
self.karistir = karistir
self.x_dizi = []
self.y_dizi = []
self.fmin = []
#Çok değişkenli optimizasyonun yapılıp yapılmayacağının kontrolünü yapar
if type(x) is list:
self.mult=1
assert x.__len__()==altdeger.__len__()==ustdeger.__len__() , "Sinir hatasi, lutfen altdeger ve ustdegeri kontrol edin"
else:
self.mult=0
#Hesaplama için başlangıç matrisi oluşturur
if self.mult==1:
shape=(x.__len__(),pop)
else:
shape=(1,pop)
self.matrix=np.empty(shape,dtype=np.float64)
self.egitim=np.empty(shape,dtype=np.float64)
self.puan=np.empty(pop,dtype=np.float64)
self.cozum_degeri=np.empty((x.__len__()+1,pop),dtype=np.float64)
self.en_iyi_cozum=np.empty((0,x.__len__()+1),dtype=np.float64)
def baslangic(self):
#Düellocu algoritma adımları
self.kayit()
self.yeterlilik()
while self.iterasyon < self.max_iterasyon:
self.sampiyon_sec()
self.duello()
self.duellocu_egitimi()
self.yeterlilik_sonrasi()
self.ele()
self.iterasyon=self.iterasyon+1
self.sonuc_goster()
def kayit(self):
#Düello kayıt
for i in range(0,self.x.__len__()):
#Popülasyonu başlatmak için sözde rastgele oluşturucu
t = int( time.time() * 1000.0 )
np.random.seed( ((t & 0xff000000) >> 24) +
((t & 0x00ff0000) >> 8) +
((t & 0x0000ff00) << 8) +
((t & 0x000000ff) << 24) )
#Oluşturulan matrisi alt ve ust degere göre sınırla
self.matrix[i,:]=np.random.uniform(size=self.populasyon,low=self.altdeger[i],high=self.ustdeger[i])
def yeterlilik(self):
#Bu bölüm yalnızca nüfus iki katına çıktığında yeterlilik sonrası için işe yarar
if self.puan.shape[0]<self.matrix.shape[1]:
self.puan=np.append(self.puan,self.puan)
#Uygunluk fonksiyonuna göre bir uygunluk degeri hesapla
for i in range(0,self.matrix.shape[1]):
self.puan[i]=self.f(*self.matrix.T.tolist()[i])
self.puani_degerlendir()
def puani_degerlendir(self):
#Çözümleri en düşükten en yükseğe doğru sırala
self.puan=np.asarray([self.puan])
self.cozum_degeri=np.concatenate((self.puan,self.matrix),axis=0).T
self.cozum_degeri=self.cozum_degeri[self.cozum_degeri[:,0].argsort()].T
self.puan=self.cozum_degeri[0,:]
self.matrix=self.cozum_degeri[1:,:]
def yeterlilik_sonrasi(self):
#Matrisi sıralayabilmek için transpozunu al
self.matrix=self.matrix.T
#Tekrar karşılaştır
self.yeterlilik()
def sampiyon_sec(self):
#En iyi şampiyonu kaydet
for i in range(0,self.nc):
self.en_iyi_cozum=np.concatenate((self.en_iyi_cozum,np.asarray([self.cozum_degeri[:,i]])))
#Şampiyonları tüm sonuçlardan ayır
self.sampiyon=self.cozum_degeri[:,0:self.nc]
print(f"{self.iterasyon + 1}. iterasyon, cozum degeri {self.cozum_degeri[:,0][0]}, fmin {self.cozum_degeri[:,0][1::]}")
self.cozum = []
self.cozum.append(self.cozum_degeri[:,0][1::])
self.x_dizi.append(self.cozum[0][0])
self.y_dizi.append(self.cozum[0][1])
if self.fmin.__len__()==0:
self.fmin.append(self.cozum_degeri[:,0][0])
elif self.cozum_degeri[:,0][0]<min(self.fmin):
self.fmin.append(self.cozum_degeri[:,0][0])
else:
self.fmin.append(min(self.fmin))
#Benzer şampiyonları tekrar eğit
for j in range(0,self.nc):
for i in range(0,self.x.__len__()):
if (random.uniform(0,1)<self.mutasyon):
self.matrix[i,j]=random.uniform(self.altdeger[i],self.ustdeger[i])
def duello(self):
#Düellocuları popülasyondan rastgele eşleştir
self.matrix=self.matrix.T
if(self.karistir==True):
np.random.mut(self.matrix)
#Düello kuralları
i=0
while i<self.matrix.shape[0]:
#nüfus tekse, eşleşmeyen düellocu otomatik olarak kazanır
if(i==self.matrix.shape[0]-1):
self.kazan_kaybet[i]=1
else:
#iki düellocu için savaş puanını hesapla
tempmatrix=self.matrix.tolist()
self.savas_puani[i]=self.f(*tempmatrix[i])*(1+(self.sans+(random.uniform(0,1)*self.sans)))
self.savas_puani[i+1]=self.f(*tempmatrix[i+1])*(1+(self.sans+(random.uniform(0,1)*self.sans)))
#savaş puanına göre kazananı ve kaybedeni belirle
if(self.savas_puani[i]>self.savas_puani[i+1]):
self.kazan_kaybet[i]=1
self.kazan_kaybet[i+1]=0
else:
self.kazan_kaybet[i]=0
self.kazan_kaybet[i+1]=1
i=i+2
def duellocu_egitimi(self):
#Kazanan ve kaybedene göre eğit
self.egitim=np.copy(self.matrix)
for j in range(0,self.x.__len__()):
for i in range(0,self.populasyon):
if self.kazan_kaybet[i]==1:
#kazanan mutasyona uğrayarak kendini geliştirsin
if random.uniform(0,1)<self.mutasyon:
self.egitim[i,j]=random.uniform(self.altdeger[j],self.ustdeger[j])
else:
#Kaybeden kazanandan öğrensin
if random.uniform(0,1)<self.ogren:
if (i%2==0):
self.egitim[i,j]=self.matrix[i+1,j]
else:
self.egitim[i,j]=self.matrix[i-1,j]
#Matrise yeni eğitilmiş düellocu ekle
self.matrix=np.concatenate((self.matrix,self.egitim),axis=0)
def ele(self):
self.matrix=self.matrix[:,:self.populasyon]
def sonuc_goster(self):
sonuc=self.en_iyi_cozum[self.en_iyi_cozum[:,0].argsort()]
print("En iyi cozum degerleri:",sonuc[0][1::], "En iyi cozum", sonuc[0][0])
fig = fig = mp.figure()
ax1 = fig.add_subplot(211)
ax1.plot(self.fmin,'r.-')
ax1.legend(['MinUygunluk'])
ax2 = fig.add_subplot(212)
ax2.plot(self.x_dizi,'b.-')
ax2.plot(self.y_dizi,'g--')
mp.legend(['x1','x2'])
mp.show()
| 33.375691 | 121 | 0.692766 | import numpy as np
import random
import time
import matplotlib.pyplot as mp
class Duelist_Algorithm():
def __init__(self,f,x,altdeger,ustdeger,pop=200,sans=0.01,mutasyon=0.1,ogren=0.8,iterasyon=500,nc=5,karistir=False):
self.f = f
self.x = x
self.altdeger = altdeger
self.ustdeger = ustdeger
self.populasyon = pop
self.sans = sans
self.mutasyon = mutasyon
self.ogren = ogren
self.max_iterasyon = iterasyon
self.nc = nc
self.sampiyon = np.empty((x.__len__()+1,nc),dtype=np.float64)
self.kazan_kaybet = np.empty((pop,1),dtype=np.float64)
self.savas_puani = np.empty((pop,1),dtype=np.float64)
self.iterasyon = 0
self.karistir = karistir
self.x_dizi = []
self.y_dizi = []
self.fmin = []
if type(x) is list:
self.mult=1
assert x.__len__()==altdeger.__len__()==ustdeger.__len__() , "Sinir hatasi, lutfen altdeger ve ustdegeri kontrol edin"
else:
self.mult=0
if self.mult==1:
shape=(x.__len__(),pop)
else:
shape=(1,pop)
self.matrix=np.empty(shape,dtype=np.float64)
self.egitim=np.empty(shape,dtype=np.float64)
self.puan=np.empty(pop,dtype=np.float64)
self.cozum_degeri=np.empty((x.__len__()+1,pop),dtype=np.float64)
self.en_iyi_cozum=np.empty((0,x.__len__()+1),dtype=np.float64)
def baslangic(self):
self.kayit()
self.yeterlilik()
while self.iterasyon < self.max_iterasyon:
self.sampiyon_sec()
self.duello()
self.duellocu_egitimi()
self.yeterlilik_sonrasi()
self.ele()
self.iterasyon=self.iterasyon+1
self.sonuc_goster()
def kayit(self):
for i in range(0,self.x.__len__()):
t = int( time.time() * 1000.0 )
np.random.seed( ((t & 0xff000000) >> 24) +
((t & 0x00ff0000) >> 8) +
((t & 0x0000ff00) << 8) +
((t & 0x000000ff) << 24) )
self.matrix[i,:]=np.random.uniform(size=self.populasyon,low=self.altdeger[i],high=self.ustdeger[i])
def yeterlilik(self):
if self.puan.shape[0]<self.matrix.shape[1]:
self.puan=np.append(self.puan,self.puan)
for i in range(0,self.matrix.shape[1]):
self.puan[i]=self.f(*self.matrix.T.tolist()[i])
self.puani_degerlendir()
def puani_degerlendir(self):
self.puan=np.asarray([self.puan])
self.cozum_degeri=np.concatenate((self.puan,self.matrix),axis=0).T
self.cozum_degeri=self.cozum_degeri[self.cozum_degeri[:,0].argsort()].T
self.puan=self.cozum_degeri[0,:]
self.matrix=self.cozum_degeri[1:,:]
def yeterlilik_sonrasi(self):
self.matrix=self.matrix.T
self.yeterlilik()
def sampiyon_sec(self):
for i in range(0,self.nc):
self.en_iyi_cozum=np.concatenate((self.en_iyi_cozum,np.asarray([self.cozum_degeri[:,i]])))
self.sampiyon=self.cozum_degeri[:,0:self.nc]
print(f"{self.iterasyon + 1}. iterasyon, cozum degeri {self.cozum_degeri[:,0][0]}, fmin {self.cozum_degeri[:,0][1::]}")
self.cozum = []
self.cozum.append(self.cozum_degeri[:,0][1::])
self.x_dizi.append(self.cozum[0][0])
self.y_dizi.append(self.cozum[0][1])
if self.fmin.__len__()==0:
self.fmin.append(self.cozum_degeri[:,0][0])
elif self.cozum_degeri[:,0][0]<min(self.fmin):
self.fmin.append(self.cozum_degeri[:,0][0])
else:
self.fmin.append(min(self.fmin))
for j in range(0,self.nc):
for i in range(0,self.x.__len__()):
if (random.uniform(0,1)<self.mutasyon):
self.matrix[i,j]=random.uniform(self.altdeger[i],self.ustdeger[i])
def duello(self):
self.matrix=self.matrix.T
if(self.karistir==True):
np.random.mut(self.matrix)
i=0
while i<self.matrix.shape[0]:
if(i==self.matrix.shape[0]-1):
self.kazan_kaybet[i]=1
else:
tempmatrix=self.matrix.tolist()
self.savas_puani[i]=self.f(*tempmatrix[i])*(1+(self.sans+(random.uniform(0,1)*self.sans)))
self.savas_puani[i+1]=self.f(*tempmatrix[i+1])*(1+(self.sans+(random.uniform(0,1)*self.sans)))
if(self.savas_puani[i]>self.savas_puani[i+1]):
self.kazan_kaybet[i]=1
self.kazan_kaybet[i+1]=0
else:
self.kazan_kaybet[i]=0
self.kazan_kaybet[i+1]=1
i=i+2
def duellocu_egitimi(self):
self.egitim=np.copy(self.matrix)
for j in range(0,self.x.__len__()):
for i in range(0,self.populasyon):
if self.kazan_kaybet[i]==1:
if random.uniform(0,1)<self.mutasyon:
self.egitim[i,j]=random.uniform(self.altdeger[j],self.ustdeger[j])
else:
if random.uniform(0,1)<self.ogren:
if (i%2==0):
self.egitim[i,j]=self.matrix[i+1,j]
else:
self.egitim[i,j]=self.matrix[i-1,j]
self.matrix=np.concatenate((self.matrix,self.egitim),axis=0)
def ele(self):
self.matrix=self.matrix[:,:self.populasyon]
def sonuc_goster(self):
sonuc=self.en_iyi_cozum[self.en_iyi_cozum[:,0].argsort()]
print("En iyi cozum degerleri:",sonuc[0][1::], "En iyi cozum", sonuc[0][0])
fig = fig = mp.figure()
ax1 = fig.add_subplot(211)
ax1.plot(self.fmin,'r.-')
ax1.legend(['MinUygunluk'])
ax2 = fig.add_subplot(212)
ax2.plot(self.x_dizi,'b.-')
ax2.plot(self.y_dizi,'g--')
mp.legend(['x1','x2'])
mp.show()
| true | true |
1c3875eec768725e04f838322599ca3ca2fc43f3 | 8,450 | py | Python | tools/test_crowdhuman.py | Ernstsen/Pedestron | 0c5aa35881561bcd0acf5de8939472efd6409256 | [
"Apache-2.0"
] | 594 | 2020-03-20T11:52:59.000Z | 2022-03-30T11:58:55.000Z | tools/test_crowdhuman.py | Ernstsen/Pedestron | 0c5aa35881561bcd0acf5de8939472efd6409256 | [
"Apache-2.0"
] | 131 | 2020-03-25T09:48:04.000Z | 2022-03-30T17:54:38.000Z | tools/test_crowdhuman.py | Ernstsen/Pedestron | 0c5aa35881561bcd0acf5de8939472efd6409256 | [
"Apache-2.0"
] | 128 | 2020-03-20T14:22:11.000Z | 2022-03-22T09:41:39.000Z | import argparse
import os
import os.path as osp
import shutil
import tempfile
import json
import time
import mmcv
import torch
import torch.distributed as dist
from mmcv.runner import load_checkpoint, get_dist_info
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmdet.apis import init_dist
from mmdet.core import results2json, coco_eval, wrap_fp16_model
from mmdet.datasets import build_dataloader, build_dataset
from mmdet.models import build_detector
from tools.crowdhuman.eval_demo import validate
def single_gpu_test(model, data_loader, show=False, save_img=False, save_img_dir=''):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=not show, **data)
results.append(result)
if show:
model.module.show_result(data, result, dataset.img_norm_cfg, save_result=save_img, result_name=save_img_dir + '/' + str(i)+'.jpg')
batch_size = data['img'][0].size(0)
for _ in range(batch_size):
prog_bar.update()
return results
def multi_gpu_test(model, data_loader, tmpdir=None):
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
results.append(result)
if rank == 0:
batch_size = data['img'][0].size(0)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
results = collect_results(results, len(dataset), tmpdir)
return results
def collect_results(result_part, size, tmpdir=None):
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
tmpdir = tempfile.mkdtemp()
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('checkpoint_start', type=int, default=1)
parser.add_argument('checkpoint_end', type=int, default=100)
parser.add_argument('--out', help='output result file')
parser.add_argument(
'--eval',
type=str,
nargs='+',
choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'],
help='eval types')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--save_img', action='store_true', help='save result image')
parser.add_argument('--save_img_dir', type=str, help='the dir for result image', default='')
parser.add_argument('--tmpdir', help='tmp dir for writing some results')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--mean_teacher', action='store_true', help='test the mean teacher pth')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
if args.out is not None and not args.out.endswith(('.json', '.pickle')):
raise ValueError('The output file must be a pkl file.')
for i in range(args.checkpoint_start, args.checkpoint_end):
cfg = mmcv.Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
imgs_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
if not args.mean_teacher:
while not osp.exists(args.checkpoint + str(i) + '.pth'):
time.sleep(5)
while i+1 != args.checkpoint_end and not osp.exists(args.checkpoint + str(i+1) + '.pth'):
time.sleep(5)
checkpoint = load_checkpoint(model, args.checkpoint + str(i) + '.pth', map_location='cpu')
else:
while not osp.exists(args.checkpoint + str(i) + '.pth.stu'):
time.sleep(5)
while i+1 != args.checkpoint_end and not osp.exists(args.checkpoint + str(i+1) + '.pth.stu'):
time.sleep(5)
checkpoint = load_checkpoint(model, args.checkpoint + str(i) + '.pth.stu', map_location='cpu')
checkpoint['meta'] = dict()
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.save_img, args.save_img_dir)
else:
model = MMDistributedDataParallel(model.cuda())
outputs = multi_gpu_test(model, data_loader, args.tmpdir)
res = []
for id, boxes in enumerate(outputs):
boxes=boxes[0]
if type(boxes) == list:
boxes = boxes[0]
boxes[:, [2, 3]] -= boxes[:, [0, 1]]
if len(boxes) > 0:
for box in boxes:
# box[:4] = box[:4] / 0.6
temp = dict()
temp['image_id'] = id+1
temp['category_id'] = 1
temp['bbox'] = box[:4].tolist()
temp['score'] = float(box[4])
res.append(temp)
with open(args.out, 'w') as f:
json.dump(res, f)
MRs = validate('datasets/crowdhuman/validation.json', args.out)
print(MRs)
print('Checkpoint %d: [Reasonable: %.2f%%], [Bare: %.2f%%], [Partial: %.2f%%], [Heavy: %.2f%%]'
% (i, MRs[0] * 100, MRs[1] * 100, MRs[2] * 100, MRs[3] * 100))
if __name__ == '__main__':
main()
| 37.22467 | 142 | 0.60213 | import argparse
import os
import os.path as osp
import shutil
import tempfile
import json
import time
import mmcv
import torch
import torch.distributed as dist
from mmcv.runner import load_checkpoint, get_dist_info
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmdet.apis import init_dist
from mmdet.core import results2json, coco_eval, wrap_fp16_model
from mmdet.datasets import build_dataloader, build_dataset
from mmdet.models import build_detector
from tools.crowdhuman.eval_demo import validate
def single_gpu_test(model, data_loader, show=False, save_img=False, save_img_dir=''):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=not show, **data)
results.append(result)
if show:
model.module.show_result(data, result, dataset.img_norm_cfg, save_result=save_img, result_name=save_img_dir + '/' + str(i)+'.jpg')
batch_size = data['img'][0].size(0)
for _ in range(batch_size):
prog_bar.update()
return results
def multi_gpu_test(model, data_loader, tmpdir=None):
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
results.append(result)
if rank == 0:
batch_size = data['img'][0].size(0)
for _ in range(batch_size * world_size):
prog_bar.update()
results = collect_results(results, len(dataset), tmpdir)
return results
def collect_results(result_part, size, tmpdir=None):
rank, world_size = get_dist_info()
if tmpdir is None:
MAX_LEN = 512
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
tmpdir = tempfile.mkdtemp()
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
dist.barrier()
if rank != 0:
return None
else:
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
part_list.append(mmcv.load(part_file))
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
ordered_results = ordered_results[:size]
shutil.rmtree(tmpdir)
return ordered_results
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('checkpoint_start', type=int, default=1)
parser.add_argument('checkpoint_end', type=int, default=100)
parser.add_argument('--out', help='output result file')
parser.add_argument(
'--eval',
type=str,
nargs='+',
choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'],
help='eval types')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--save_img', action='store_true', help='save result image')
parser.add_argument('--save_img_dir', type=str, help='the dir for result image', default='')
parser.add_argument('--tmpdir', help='tmp dir for writing some results')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--mean_teacher', action='store_true', help='test the mean teacher pth')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
if args.out is not None and not args.out.endswith(('.json', '.pickle')):
raise ValueError('The output file must be a pkl file.')
for i in range(args.checkpoint_start, args.checkpoint_end):
cfg = mmcv.Config.fromfile(args.config)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
imgs_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
if not args.mean_teacher:
while not osp.exists(args.checkpoint + str(i) + '.pth'):
time.sleep(5)
while i+1 != args.checkpoint_end and not osp.exists(args.checkpoint + str(i+1) + '.pth'):
time.sleep(5)
checkpoint = load_checkpoint(model, args.checkpoint + str(i) + '.pth', map_location='cpu')
else:
while not osp.exists(args.checkpoint + str(i) + '.pth.stu'):
time.sleep(5)
while i+1 != args.checkpoint_end and not osp.exists(args.checkpoint + str(i+1) + '.pth.stu'):
time.sleep(5)
checkpoint = load_checkpoint(model, args.checkpoint + str(i) + '.pth.stu', map_location='cpu')
checkpoint['meta'] = dict()
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.save_img, args.save_img_dir)
else:
model = MMDistributedDataParallel(model.cuda())
outputs = multi_gpu_test(model, data_loader, args.tmpdir)
res = []
for id, boxes in enumerate(outputs):
boxes=boxes[0]
if type(boxes) == list:
boxes = boxes[0]
boxes[:, [2, 3]] -= boxes[:, [0, 1]]
if len(boxes) > 0:
for box in boxes:
temp = dict()
temp['image_id'] = id+1
temp['category_id'] = 1
temp['bbox'] = box[:4].tolist()
temp['score'] = float(box[4])
res.append(temp)
with open(args.out, 'w') as f:
json.dump(res, f)
MRs = validate('datasets/crowdhuman/validation.json', args.out)
print(MRs)
print('Checkpoint %d: [Reasonable: %.2f%%], [Bare: %.2f%%], [Partial: %.2f%%], [Heavy: %.2f%%]'
% (i, MRs[0] * 100, MRs[1] * 100, MRs[2] * 100, MRs[3] * 100))
if __name__ == '__main__':
main()
| true | true |
1c38768a0bf2c6600f6129ebdcec86e801059f7a | 262 | py | Python | setup.py | MarcMarabou/02476_ml_ops_project | 0a46bca731271287f6ba00270e8b18e76323fcae | [
"FTL"
] | null | null | null | setup.py | MarcMarabou/02476_ml_ops_project | 0a46bca731271287f6ba00270e8b18e76323fcae | [
"FTL"
] | 1 | 2022-01-20T09:16:37.000Z | 2022-01-20T09:16:37.000Z | setup.py | MarcMarabou/02476_ml_ops_project | 0a46bca731271287f6ba00270e8b18e76323fcae | [
"FTL"
] | null | null | null | from setuptools import find_packages, setup
setup(
name="src",
packages=find_packages(),
version="0.1.0",
description="A short description of the project.",
author="Your name (or your organization/company/team)",
license="",
)
| 23.818182 | 60 | 0.652672 | from setuptools import find_packages, setup
setup(
name="src",
packages=find_packages(),
version="0.1.0",
description="A short description of the project.",
author="Your name (or your organization/company/team)",
license="",
)
| true | true |
1c387750188c0c363aa55562e890fe6e14eb7296 | 7,878 | py | Python | api/utils.py | josuelopes512/mastercode_films_api | ab8ea5ce55a1eeee90a1ec3c5447434ca37d83bb | [
"Apache-2.0"
] | null | null | null | api/utils.py | josuelopes512/mastercode_films_api | ab8ea5ce55a1eeee90a1ec3c5447434ca37d83bb | [
"Apache-2.0"
] | null | null | null | api/utils.py | josuelopes512/mastercode_films_api | ab8ea5ce55a1eeee90a1ec3c5447434ca37d83bb | [
"Apache-2.0"
] | null | null | null | from django.utils.text import slugify
from threading import Thread
from random import randint
from pathlib import Path
import environ, base64, requests as req
BASE_DIR = Path(__file__).resolve().parent.parent
env = environ.Env()
environ.Env.read_env(BASE_DIR / '.env')
API_KEY= env('API_KEY')
URL_DB = 'https://api.themoviedb.org/3'
def add_infos(instance, save=False, new_items=None):
keys_d = [
"budget", "homepage", "imdb_id", "production_companies",
"production_countries", "revenue", "runtime", "spoken_languages",
"status", "tagline"
]
if new_items:
items = new_items
for i in list(items.keys()):
if not i in keys_d:
items.pop(i)
else:
items = add_to_db(instance.movie_id)
obj = instance.__class__
qs_budget = obj.objects.filter(movie_id=instance.movie_id).exclude(budget=None)
qs_homepage = obj.objects.filter(movie_id=instance.movie_id).exclude(homepage=None)
qs_imdb_id = obj.objects.filter(movie_id=instance.movie_id).exclude(imdb_id=None)
qs_production_companies = obj.objects.filter(movie_id=instance.movie_id).exclude(production_companies=None)
qs_production_countries = obj.objects.filter(movie_id=instance.movie_id).exclude(production_countries=None)
qs_revenue = obj.objects.filter(movie_id=instance.movie_id).exclude(revenue=None)
qs_runtime = obj.objects.filter(movie_id=instance.movie_id).exclude(runtime=None)
qs_spoken_languages = obj.objects.filter(movie_id=instance.movie_id).exclude(spoken_languages=None)
qs_status = obj.objects.filter(movie_id=instance.movie_id).exclude(status=None)
qs_tagline = obj.objects.filter(movie_id=instance.movie_id).exclude(tagline=None)
in_conds = [
qs_budget.exists(),
qs_homepage.exists(),
qs_imdb_id.exists(),
qs_production_companies.exists(),
qs_production_countries.exists(),
qs_revenue.exists(),
qs_runtime.exists(),
qs_spoken_languages.exists(),
qs_status.exists(),
qs_tagline.exists()
]
if any(in_conds):
return add_infos(instance, save=save, new_items=items)
instance.budget = items.get('budget', None)
instance.imdb_id = items.get('imdb_id', None)
instance.homepage = items.get('homepage', None)
instance.production_companies = items.get('production_companies', None)
instance.production_countries = items.get('production_countries', None)
instance.revenue = items.get('revenue', None)
instance.runtime = items.get('runtime', None)
instance.spoken_languages = items.get('spoken_languages', None)
instance.status = items.get('status', None)
instance.tagline = items.get('tagline', None)
if save:
instance.save()
return instance
def slugify_inst_title(instance, save=False, new_slug=None):
if new_slug:
slug = new_slug
else:
slug = slugify(instance.title)
obj = instance.__class__
qs = obj.objects.filter(slug=slug).exclude(id=instance.id)
if qs.exists():
rand_int = randint(300_00, 500_00)
slug = f"{slug}-{rand_int}"
return slugify_inst_title(instance, save=save, new_slug=slug)
instance.slug = slug
instance.title_norm = slug.replace('-', ' ')
if save:
instance.save()
return instance
def recommended_item(instance, save=False, new_rec=None):
if new_rec:
rec = new_rec
else:
rec = get_rec_similar(instance.movie_id)
obj = instance.__class__
qs = obj.objects.filter(recommended=rec).exclude(id=instance.id)
if qs.exists():
return recommended_item(instance, save=save, new_rec=rec)
instance.recommended = rec
if save:
instance.save()
return instance
def backdrop_inst_b64(instance, save=False, new_bd=None):
if new_bd:
bd = new_bd
else:
bd = jpg_to_base64(instance.backdrop_path)
obj = instance.__class__
qs = obj.objects.filter(backdrop_path=bd).exclude(id=instance.id)
if qs.exists():
rand_int = randint(300_00, 500_00)
bd = f"{bd}-{rand_int}"
return backdrop_inst_b64(instance, save=save, new_bd=bd)
instance.backdrop_b64 = bd
if save:
instance.save()
return instance
def poster_inst_b64(instance, save=False, new_bd=None):
if new_bd:
bd = new_bd
else:
bd = jpg_to_base64(instance.poster_path)
obj = instance.__class__
qs = obj.objects.filter(poster_path=bd).exclude(id=instance.id)
if qs.exists():
rand_int = randint(300_00, 500_00)
bd = f"{bd}-{rand_int}"
return poster_inst_b64(instance, save=save, new_bd=bd)
instance.poster_b64 = bd
if save:
instance.save()
return instance
def jpg_to_base64(link):
if '.jpg' in link[-4:] and '/' in link[0]:
try:
img = req.get(f'https://image.tmdb.org/t/p/w154{link}')
data = base64.b64encode(img.content).decode('utf-8')
return data
except:
return ""
def get_rec_similar(movie_id):
try:
recomendados = req.get(f"{URL_DB}/movie/{movie_id}/similar?api_key={API_KEY}&language=pt-BR&page=1")
filmes_json = recomendados.json()
filmes_json = filmes_json['results']
ids = [id['id'] for id in filmes_json]
return ids
except:
return []
def add_to_db(movie_id):
try:
movie_i_req = req.get(f"{URL_DB}/movie/{movie_id}?api_key={API_KEY}&language=pt-BR")
movie_i = movie_i_req.json()
movie_i['genre_ids'] = [i['id'] for i in movie_i['genres']]
movie_i['movie_id'] = movie_i['id']
movie_i.pop('id')
movie_i.pop('genres')
return movie_i
except Exception as e:
print(f"CAIU AQUI ERROR: {e}")
return {}
def get_add_trending(qtd=10):
result = []
for i in range(1, qtd):
try:
pag = req.get(f'{URL_DB}/trending/movie/week?api_key={API_KEY}&language=pt-BR&page={i}&include_adult=true')
json_pag = pag.json()
result += json_pag['results']
except:
continue
return result
###########################################################################################################################
def save_banco(dta):
try:
dta['movie_id'] = dta['id']
dta.pop('id')
teste = req.get(f"http://localhost:8000/api/get_id/{dta['movie_id']}")
if teste.status_code != 200:
teste = req.post("http://localhost:8000/api/post_v2", json=dta)
if teste.status_code != 200:
with open("error_400_z.json", "a") as f:
f.write(f"{dta['movie_id']} ------ {teste.json()}\n")
except Exception as e:
with open("errors_z.json", "a") as f:
f.write(f"ERROR: {e}, movie_id: {dta['movie_id']}\n")
def add_movie_id(movie_id):
try:
movie_i_req = req.get(f"{URL_DB}/movie/{movie_id}?api_key={API_KEY}&language=pt-BR")
movie_i = movie_i_req.json()
Thread(target=save_banco, args=(movie_i,)).start()
except:
pass
try:
recomendados = req.get(f"{URL_DB}/movie/{movie_id}/similar?api_key={API_KEY}&language=pt-BR&page=1")
filmes_json = recomendados.json()
filmes_json = filmes_json['results']
for mov in filmes_json:
Thread(target=save_banco, args=(mov,)).start()
except:
pass
def trending_movie(ini, fim):
for i in range(ini, fim+1):
try:
pag = req.get(f'{URL_DB}/trending/movie/week?api_key={API_KEY}&language=pt-BR&page={i}&include_adult=true')
json_pag = pag.json()
data = json_pag['results']
for dta in data:
Thread(target=add_movie_id, args=(dta['id'],)).start()
except:
continue | 35.327354 | 123 | 0.627444 | from django.utils.text import slugify
from threading import Thread
from random import randint
from pathlib import Path
import environ, base64, requests as req
BASE_DIR = Path(__file__).resolve().parent.parent
env = environ.Env()
environ.Env.read_env(BASE_DIR / '.env')
API_KEY= env('API_KEY')
URL_DB = 'https://api.themoviedb.org/3'
def add_infos(instance, save=False, new_items=None):
keys_d = [
"budget", "homepage", "imdb_id", "production_companies",
"production_countries", "revenue", "runtime", "spoken_languages",
"status", "tagline"
]
if new_items:
items = new_items
for i in list(items.keys()):
if not i in keys_d:
items.pop(i)
else:
items = add_to_db(instance.movie_id)
obj = instance.__class__
qs_budget = obj.objects.filter(movie_id=instance.movie_id).exclude(budget=None)
qs_homepage = obj.objects.filter(movie_id=instance.movie_id).exclude(homepage=None)
qs_imdb_id = obj.objects.filter(movie_id=instance.movie_id).exclude(imdb_id=None)
qs_production_companies = obj.objects.filter(movie_id=instance.movie_id).exclude(production_companies=None)
qs_production_countries = obj.objects.filter(movie_id=instance.movie_id).exclude(production_countries=None)
qs_revenue = obj.objects.filter(movie_id=instance.movie_id).exclude(revenue=None)
qs_runtime = obj.objects.filter(movie_id=instance.movie_id).exclude(runtime=None)
qs_spoken_languages = obj.objects.filter(movie_id=instance.movie_id).exclude(spoken_languages=None)
qs_status = obj.objects.filter(movie_id=instance.movie_id).exclude(status=None)
qs_tagline = obj.objects.filter(movie_id=instance.movie_id).exclude(tagline=None)
in_conds = [
qs_budget.exists(),
qs_homepage.exists(),
qs_imdb_id.exists(),
qs_production_companies.exists(),
qs_production_countries.exists(),
qs_revenue.exists(),
qs_runtime.exists(),
qs_spoken_languages.exists(),
qs_status.exists(),
qs_tagline.exists()
]
if any(in_conds):
return add_infos(instance, save=save, new_items=items)
instance.budget = items.get('budget', None)
instance.imdb_id = items.get('imdb_id', None)
instance.homepage = items.get('homepage', None)
instance.production_companies = items.get('production_companies', None)
instance.production_countries = items.get('production_countries', None)
instance.revenue = items.get('revenue', None)
instance.runtime = items.get('runtime', None)
instance.spoken_languages = items.get('spoken_languages', None)
instance.status = items.get('status', None)
instance.tagline = items.get('tagline', None)
if save:
instance.save()
return instance
def slugify_inst_title(instance, save=False, new_slug=None):
if new_slug:
slug = new_slug
else:
slug = slugify(instance.title)
obj = instance.__class__
qs = obj.objects.filter(slug=slug).exclude(id=instance.id)
if qs.exists():
rand_int = randint(300_00, 500_00)
slug = f"{slug}-{rand_int}"
return slugify_inst_title(instance, save=save, new_slug=slug)
instance.slug = slug
instance.title_norm = slug.replace('-', ' ')
if save:
instance.save()
return instance
def recommended_item(instance, save=False, new_rec=None):
if new_rec:
rec = new_rec
else:
rec = get_rec_similar(instance.movie_id)
obj = instance.__class__
qs = obj.objects.filter(recommended=rec).exclude(id=instance.id)
if qs.exists():
return recommended_item(instance, save=save, new_rec=rec)
instance.recommended = rec
if save:
instance.save()
return instance
def backdrop_inst_b64(instance, save=False, new_bd=None):
if new_bd:
bd = new_bd
else:
bd = jpg_to_base64(instance.backdrop_path)
obj = instance.__class__
qs = obj.objects.filter(backdrop_path=bd).exclude(id=instance.id)
if qs.exists():
rand_int = randint(300_00, 500_00)
bd = f"{bd}-{rand_int}"
return backdrop_inst_b64(instance, save=save, new_bd=bd)
instance.backdrop_b64 = bd
if save:
instance.save()
return instance
def poster_inst_b64(instance, save=False, new_bd=None):
if new_bd:
bd = new_bd
else:
bd = jpg_to_base64(instance.poster_path)
obj = instance.__class__
qs = obj.objects.filter(poster_path=bd).exclude(id=instance.id)
if qs.exists():
rand_int = randint(300_00, 500_00)
bd = f"{bd}-{rand_int}"
return poster_inst_b64(instance, save=save, new_bd=bd)
instance.poster_b64 = bd
if save:
instance.save()
return instance
def jpg_to_base64(link):
if '.jpg' in link[-4:] and '/' in link[0]:
try:
img = req.get(f'https://image.tmdb.org/t/p/w154{link}')
data = base64.b64encode(img.content).decode('utf-8')
return data
except:
return ""
def get_rec_similar(movie_id):
try:
recomendados = req.get(f"{URL_DB}/movie/{movie_id}/similar?api_key={API_KEY}&language=pt-BR&page=1")
filmes_json = recomendados.json()
filmes_json = filmes_json['results']
ids = [id['id'] for id in filmes_json]
return ids
except:
return []
def add_to_db(movie_id):
try:
movie_i_req = req.get(f"{URL_DB}/movie/{movie_id}?api_key={API_KEY}&language=pt-BR")
movie_i = movie_i_req.json()
movie_i['genre_ids'] = [i['id'] for i in movie_i['genres']]
movie_i['movie_id'] = movie_i['id']
movie_i.pop('id')
movie_i.pop('genres')
return movie_i
except Exception as e:
print(f"CAIU AQUI ERROR: {e}")
return {}
def get_add_trending(qtd=10):
result = []
for i in range(1, qtd):
try:
pag = req.get(f'{URL_DB}/trending/movie/week?api_key={API_KEY}&language=pt-BR&page={i}&include_adult=true')
json_pag = pag.json()
result += json_pag['results']
except:
continue
return result
| true | true |
1c387750eed6826249cb679584ede439dce7288e | 1,592 | py | Python | likeapp/utils.py | verhovensky/likeapprestapi | 4a87690a37568041d5498c8b94259704af080af1 | [
"MIT"
] | null | null | null | likeapp/utils.py | verhovensky/likeapprestapi | 4a87690a37568041d5498c8b94259704af080af1 | [
"MIT"
] | null | null | null | likeapp/utils.py | verhovensky/likeapprestapi | 4a87690a37568041d5498c8b94259704af080af1 | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from .models import Like
# import datetime
# Bishkek Time zone
# import pytz
# timezone = pytz.timezone("Asia/Bishkek")
User = get_user_model()
def add_like(obj, user):
"""Like `obj`.
"""
obj_type = ContentType.objects.get_for_model(obj)
like, is_created = Like.objects.get_or_create(
content_type=obj_type, object_id=obj.id, user=user)
return like
def remove_like(obj, user):
"""Delete like from `obj`.
"""
obj_type = ContentType.objects.get_for_model(obj)
Like.objects.filter(
content_type=obj_type, object_id=obj.id, user=user
).delete()
def is_liked(obj, user) -> bool:
# auth ckeck?
if not user:
return False
obj_type = ContentType.objects.get_for_model(obj)
likes = Like.objects.filter(
content_type=obj_type, object_id=obj.id, user=user)
return likes.exists()
# Validate input datetime passed in request.data
TIME_FORMATS = ['%Y-%m-%d',
'%Y %m %d',
'%Y-%m-%d %H',
'%Y-%m-%d %H:%M',
'%Y %m %d %H:%M',
'%Y-%m-%d %H:%M']
# in case we need more precise statistics
# def is_valid_datetime(time):
# if time is None:
# return None
# for time_format in TIME_FORMATS:
# try:
# obj_dt = datetime.datetime.strptime(time, time_format).astimezone(timezone)
# day = obj_dt.date()
# return day
# except ValueError as e:
# return e
| 26.533333 | 89 | 0.612437 | from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from .models import Like
User = get_user_model()
def add_like(obj, user):
obj_type = ContentType.objects.get_for_model(obj)
like, is_created = Like.objects.get_or_create(
content_type=obj_type, object_id=obj.id, user=user)
return like
def remove_like(obj, user):
obj_type = ContentType.objects.get_for_model(obj)
Like.objects.filter(
content_type=obj_type, object_id=obj.id, user=user
).delete()
def is_liked(obj, user) -> bool:
if not user:
return False
obj_type = ContentType.objects.get_for_model(obj)
likes = Like.objects.filter(
content_type=obj_type, object_id=obj.id, user=user)
return likes.exists()
TIME_FORMATS = ['%Y-%m-%d',
'%Y %m %d',
'%Y-%m-%d %H',
'%Y-%m-%d %H:%M',
'%Y %m %d %H:%M',
'%Y-%m-%d %H:%M']
| true | true |
1c387a98241666d68205340ff5d7afecd591a0f2 | 576 | py | Python | unit 9/exc. 9.2.3.py | AviKalPython/self.py | 44f8de33797a9ea28bbd1e01006920ba7c818b97 | [
"MIT"
] | null | null | null | unit 9/exc. 9.2.3.py | AviKalPython/self.py | 44f8de33797a9ea28bbd1e01006920ba7c818b97 | [
"MIT"
] | null | null | null | unit 9/exc. 9.2.3.py | AviKalPython/self.py | 44f8de33797a9ea28bbd1e01006920ba7c818b97 | [
"MIT"
] | null | null | null | # exc. 9.2.3
def who_is_missing(file_name):
find_the_num = open(file_name, "r")
numbers = find_the_num.read()
number_list = [int(x) for x in numbers.split(",")]
number_list.sort()
#print(number_list)
i = number_list[0]
for num in number_list:
if i not in number_list:
found = i
i += 1
find_the_num.close()
new_text = open(r"C:\Users\user\Desktop\found.txt", "w")
new_text.write(str(found))
new_text.close()
def main():
file_name = r"C:\Users\user\Desktop\findMe.txt"
who_is_missing(file_name)
if __name__ == '__main__':
main() | 23.04 | 58 | 0.663194 |
def who_is_missing(file_name):
find_the_num = open(file_name, "r")
numbers = find_the_num.read()
number_list = [int(x) for x in numbers.split(",")]
number_list.sort()
i = number_list[0]
for num in number_list:
if i not in number_list:
found = i
i += 1
find_the_num.close()
new_text = open(r"C:\Users\user\Desktop\found.txt", "w")
new_text.write(str(found))
new_text.close()
def main():
file_name = r"C:\Users\user\Desktop\findMe.txt"
who_is_missing(file_name)
if __name__ == '__main__':
main() | true | true |
1c387ab39db476e5a6f23a51533f27992deec27c | 749 | py | Python | daoagent/log.py | Symantec/dao-agent | 601ac49d2512b539c58a7026b1f8fb76645ba9a6 | [
"Apache-2.0"
] | null | null | null | daoagent/log.py | Symantec/dao-agent | 601ac49d2512b539c58a7026b1f8fb76645ba9a6 | [
"Apache-2.0"
] | null | null | null | daoagent/log.py | Symantec/dao-agent | 601ac49d2512b539c58a7026b1f8fb76645ba9a6 | [
"Apache-2.0"
] | 2 | 2019-09-18T22:51:31.000Z | 2021-02-26T10:19:32.000Z | # Copyright 2016 Symantec, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
logging.basicConfig(filename='/tmp/validation.log', level=logging.DEBUG)
def get_logger(*args, **kwargs):
return logging.getLogger(*args, **kwargs) | 35.666667 | 75 | 0.757009 |
import logging
logging.basicConfig(filename='/tmp/validation.log', level=logging.DEBUG)
def get_logger(*args, **kwargs):
return logging.getLogger(*args, **kwargs) | true | true |
1c387b3d34a16ac37f56f4e5e23e7b8e09236e94 | 11,755 | py | Python | src/plugins/NextStep/NextStep/__init__.py | rpicard92/petri-net-webgme-app | 3ab1808cd4f1f5bb516be66b39daf7a74b8660c5 | [
"MIT"
] | 1 | 2020-09-09T01:23:04.000Z | 2020-09-09T01:23:04.000Z | src/plugins/NextStep/NextStep/__init__.py | rpicard92/petri-net-webgme-app | 3ab1808cd4f1f5bb516be66b39daf7a74b8660c5 | [
"MIT"
] | null | null | null | src/plugins/NextStep/NextStep/__init__.py | rpicard92/petri-net-webgme-app | 3ab1808cd4f1f5bb516be66b39daf7a74b8660c5 | [
"MIT"
] | null | null | null | """
This is where the implementation of the plugin code goes.
The NextStep-class is imported from both run_plugin.py and run_debug.py
"""
import sys
import logging
import random
from webgme_bindings import PluginBase
# Setup a logger
logger = logging.getLogger('NextStep')
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout) # By default it logs to stderr..
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
class NextStep(PluginBase):
def main(self):
core = self.core
root_node = self.root_node
active_node = self.active_node
#global pos_fire
#name = core.get_attribute(active_node, 'name')
#logger.info('ActiveNode at "{0}" has name {1}'.format(core.get_path(active_node), name))
#core.set_attribute(active_node, 'name', 'newName')
#commit_info = self.util.save(root_node, self.commit_hash, 'master', 'Python plugin updated the model')
#logger.info('committed :{0}'.format(commit_info))
pos_fire={}
drains={}
feeds={}
def string_to_dict(input_string):
logger.info('yeah....about that')
dict_ret={}
str_ar=input_string.split(';')
logger.info('hmmm')
for entry in str_ar:
dict_ret[entry.split(':')[0]]=int(entry.split(':')[1])
logger.info('k')
return dict_ret
def cap_string_to_dict(col_string, cap_string):
dict_ret=string_to_dict(col_string)
cap_ar=cap_string.split(';')
for entry in cap_ar:
try:
if dict_ret[entry.split(':')[0]]>int(entry.split(':')[1]):
dict_ret[entry.split(':')[0]]=int(entry.split(':')[1])
except:
dict_ret[entry.split(':')[0]]=0
return dict_ret
def dict_add(tok_dict, col_string, cap_string):
col_dict=cap_string_to_dict(col_string, cap_string)
for entry in col_dict:
if entry in tok_dict:
tok_dict[entry]=tok_dict[entry]+col_dict[entry]
else:
tok_dict[entry]=col_dict[entry]
return tok_dict
def per_thing(node):
#global pos_fire
logger.info('huge success')
#use nonlocal keyword here to make sure stuff works
#for i in core.get_pointer_names(node):
# if i=='dst':
try:
dst_node=core.load_pointer(node, 'dst')
src_node=core.load_pointer(node, 'src')
dst_path=core.get_path(dst_node)
src_path=core.get_path(src_node)
meta_tr=core.get_meta_type(node)
if core.get_attribute(meta_tr, 'name')=='PlaceToTransition':
#check if thing is enabled
thresh=core.get_attribute(dst_node, 'Tokens')
tokens=core.get_attribute(src_node, 'Tokens')
cap=core.get_attribute(node, 'Tokens')
logger.info('OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO')
logger.info(thresh)
logger.info(tokens)
logger.info(cap)
if dst_path in pos_fire:
logger.info('AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA')
pos_fire[dst_path]=[pos_fire[dst_path][0], dict_add(pos_fire[dst_path][1], tokens, cap)]
else:
logger.info('BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB')
try:
pos_fire[dst_path]=[string_to_dict(thresh), cap_string_to_dict(tokens, cap)]
except Exception as e:
logger.info(e)
#logger.info(dst_path)
#logger.info(tokens)
#logger.info(cap)
#logger.info('CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC')
#logger.info(len(pos_fire))
#for i in pos_fire:
# logger.info(i)
# logger.info(pos_fire[i])
# logger.info('----------------------------------------------------------------------------------')
if dst_path in drains:
logger.info('ROUND 2 BBYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY')
drains[dst_path].append((src_path, cap))
else:
logger.info('i did it reddittttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt')
drains[dst_path]=[(src_path, cap)]
else:
#this can be done later.
#this should figure out weight assignments on the other side
try:
weight=core.get_attribute(node, 'Tokens')
thresh_path=core.get_path(src_node)
if src_path in feeds:
feeds[src_path].append((dst_path, weight))
else:
feeds[src_path]=[(dst_path, weight)]
except Exception as e:
logger.info(core.get_path(node))
logger.info(e)
logger.info("WHYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY DID THIS HAPPEN")
except Exception as e:
logger.info(core.get_path(node))
logger.info(core.get_attribute(node, 'name'))
logger.info(e)
logger.info('NOT A POINTER NODE THINGY')
#end of functions
# reset attribute on network
def reset_addtribute(node, attribute):
name = core.get_attribute(node, 'name')
if(name == 'Network'):
core.set_attribute(node, attribute, '')
else:
logger.info('[INFO]: Not the Network node')
reset_addtribute(active_node, 'StateSpace')
reset_addtribute(active_node, 'IsDeterministic')
self.util.traverse(active_node, per_thing)
enabled=[]
for i in pos_fire:
logger.info(i)
logger.info(pos_fire[i])
logger.info('----------------------------------------------------------------------------------')
for path, dicts in pos_fire.items():
thresh=dicts[0]
vals=dicts[1]
add_this=True
for key in thresh:
if key in vals:
if (vals[key]<thresh[key] and thresh[key]>0) or (vals[key]>= (-1)*thresh[key] and thresh[key]<0):
add_this=False
break
#else this requirement is satisfied
elif thresh[key]>0:
add_this=False
break
if add_this:
enabled.append(path)
logger.info('HI I RAN HERE')
logger.info(len(enabled))
for i in enabled:
logger.info(i)
if len(enabled)==0:
return
meh=random.randint(0, len(enabled)-1)
to_drain=drains[enabled[meh]]
to_feed=feeds[enabled[meh]]
logger.info('NOTICE ME SENPAI!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
logger.info(drains)
logger.info(to_drain)
for d in to_drain:
#logger.info(core.get_path(root_node))
#logger.info(d)
cur=core.load_by_path(root_node, d[0])
token_list=[]
if not d[1]=='':
to_change=core.get_attribute(cur, 'Tokens')
weights={}
if not to_change=='':
for entry in to_change.split(';'):
weights[entry.split(':')[0]]=int(entry.split(':')[1])
for entry in d[1].split(';'):
cur_key, cur_val=entry.split(':')
cur_val=int(cur_val)
if cur_key in weights:
if weights[cur_key]-cur_val>0:
token_list.append(cur_key+':'+str(weights[cur_key]-cur_val))
core.set_attribute(cur, 'Tokens', ';'.join(token_list))
for fe in to_feed:
f=fe[0]
weights={}
str_ar=fe[1].split(';')
logger.info('hmmm')
for entry in str_ar:
weights[entry.split(':')[0]]=int(entry.split(':')[1])
cur=core.load_by_path(root_node, f)
to_change=core.get_attribute(cur, 'Tokens')
if not to_change=='':
str_ar=to_change.split(';')
logger.info('we are at this path: {0}'.format(f))
logger.info('the following is to_change->{0}END'.format(to_change))
for entry in str_ar:
logger.info('the following is entry->{0}END'.format(entry))
if entry.split(':')[0] in weights:
logger.info('the following is what we had->{0}END'.format(weights[entry.split(':')[0]]))
logger.info('the following is what we add->{0}END'.format(int(entry.split(':')[1])))
weights[entry.split(':')[0]]=weights[entry.split(':')[0]]+int(entry.split(':')[1])
else:
weights[entry.split(':')[0]]=int(entry.split(':')[1])
new_string=[]
logger.info('heyo friendo find me pls')
logger.info(to_change)
logger.info('wat')
for col in weights:
logger.info(col)
new_string.append(col+':'+str(weights[col]))
core.set_attribute(cur, 'Tokens', (';').join(new_string))
commit_info = self.util.save(root_node, self.commit_hash, 'master', 'Python plugin updated the model')
| 47.979592 | 335 | 0.527946 | import sys
import logging
import random
from webgme_bindings import PluginBase
logger = logging.getLogger('NextStep')
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
class NextStep(PluginBase):
def main(self):
core = self.core
root_node = self.root_node
active_node = self.active_node
pos_fire={}
drains={}
feeds={}
def string_to_dict(input_string):
logger.info('yeah....about that')
dict_ret={}
str_ar=input_string.split(';')
logger.info('hmmm')
for entry in str_ar:
dict_ret[entry.split(':')[0]]=int(entry.split(':')[1])
logger.info('k')
return dict_ret
def cap_string_to_dict(col_string, cap_string):
dict_ret=string_to_dict(col_string)
cap_ar=cap_string.split(';')
for entry in cap_ar:
try:
if dict_ret[entry.split(':')[0]]>int(entry.split(':')[1]):
dict_ret[entry.split(':')[0]]=int(entry.split(':')[1])
except:
dict_ret[entry.split(':')[0]]=0
return dict_ret
def dict_add(tok_dict, col_string, cap_string):
col_dict=cap_string_to_dict(col_string, cap_string)
for entry in col_dict:
if entry in tok_dict:
tok_dict[entry]=tok_dict[entry]+col_dict[entry]
else:
tok_dict[entry]=col_dict[entry]
return tok_dict
def per_thing(node):
logger.info('huge success')
try:
dst_node=core.load_pointer(node, 'dst')
src_node=core.load_pointer(node, 'src')
dst_path=core.get_path(dst_node)
src_path=core.get_path(src_node)
meta_tr=core.get_meta_type(node)
if core.get_attribute(meta_tr, 'name')=='PlaceToTransition':
thresh=core.get_attribute(dst_node, 'Tokens')
tokens=core.get_attribute(src_node, 'Tokens')
cap=core.get_attribute(node, 'Tokens')
logger.info('OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO')
logger.info(thresh)
logger.info(tokens)
logger.info(cap)
if dst_path in pos_fire:
logger.info('AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA')
pos_fire[dst_path]=[pos_fire[dst_path][0], dict_add(pos_fire[dst_path][1], tokens, cap)]
else:
logger.info('BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB')
try:
pos_fire[dst_path]=[string_to_dict(thresh), cap_string_to_dict(tokens, cap)]
except Exception as e:
logger.info(e)
if dst_path in drains:
logger.info('ROUND 2 BBYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY')
drains[dst_path].append((src_path, cap))
else:
logger.info('i did it reddittttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt')
drains[dst_path]=[(src_path, cap)]
else:
try:
weight=core.get_attribute(node, 'Tokens')
thresh_path=core.get_path(src_node)
if src_path in feeds:
feeds[src_path].append((dst_path, weight))
else:
feeds[src_path]=[(dst_path, weight)]
except Exception as e:
logger.info(core.get_path(node))
logger.info(e)
logger.info("WHYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY DID THIS HAPPEN")
except Exception as e:
logger.info(core.get_path(node))
logger.info(core.get_attribute(node, 'name'))
logger.info(e)
logger.info('NOT A POINTER NODE THINGY')
def reset_addtribute(node, attribute):
name = core.get_attribute(node, 'name')
if(name == 'Network'):
core.set_attribute(node, attribute, '')
else:
logger.info('[INFO]: Not the Network node')
reset_addtribute(active_node, 'StateSpace')
reset_addtribute(active_node, 'IsDeterministic')
self.util.traverse(active_node, per_thing)
enabled=[]
for i in pos_fire:
logger.info(i)
logger.info(pos_fire[i])
logger.info('----------------------------------------------------------------------------------')
for path, dicts in pos_fire.items():
thresh=dicts[0]
vals=dicts[1]
add_this=True
for key in thresh:
if key in vals:
if (vals[key]<thresh[key] and thresh[key]>0) or (vals[key]>= (-1)*thresh[key] and thresh[key]<0):
add_this=False
break
elif thresh[key]>0:
add_this=False
break
if add_this:
enabled.append(path)
logger.info('HI I RAN HERE')
logger.info(len(enabled))
for i in enabled:
logger.info(i)
if len(enabled)==0:
return
meh=random.randint(0, len(enabled)-1)
to_drain=drains[enabled[meh]]
to_feed=feeds[enabled[meh]]
logger.info('NOTICE ME SENPAI!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
logger.info(drains)
logger.info(to_drain)
for d in to_drain:
cur=core.load_by_path(root_node, d[0])
token_list=[]
if not d[1]=='':
to_change=core.get_attribute(cur, 'Tokens')
weights={}
if not to_change=='':
for entry in to_change.split(';'):
weights[entry.split(':')[0]]=int(entry.split(':')[1])
for entry in d[1].split(';'):
cur_key, cur_val=entry.split(':')
cur_val=int(cur_val)
if cur_key in weights:
if weights[cur_key]-cur_val>0:
token_list.append(cur_key+':'+str(weights[cur_key]-cur_val))
core.set_attribute(cur, 'Tokens', ';'.join(token_list))
for fe in to_feed:
f=fe[0]
weights={}
str_ar=fe[1].split(';')
logger.info('hmmm')
for entry in str_ar:
weights[entry.split(':')[0]]=int(entry.split(':')[1])
cur=core.load_by_path(root_node, f)
to_change=core.get_attribute(cur, 'Tokens')
if not to_change=='':
str_ar=to_change.split(';')
logger.info('we are at this path: {0}'.format(f))
logger.info('the following is to_change->{0}END'.format(to_change))
for entry in str_ar:
logger.info('the following is entry->{0}END'.format(entry))
if entry.split(':')[0] in weights:
logger.info('the following is what we had->{0}END'.format(weights[entry.split(':')[0]]))
logger.info('the following is what we add->{0}END'.format(int(entry.split(':')[1])))
weights[entry.split(':')[0]]=weights[entry.split(':')[0]]+int(entry.split(':')[1])
else:
weights[entry.split(':')[0]]=int(entry.split(':')[1])
new_string=[]
logger.info('heyo friendo find me pls')
logger.info(to_change)
logger.info('wat')
for col in weights:
logger.info(col)
new_string.append(col+':'+str(weights[col]))
core.set_attribute(cur, 'Tokens', (';').join(new_string))
commit_info = self.util.save(root_node, self.commit_hash, 'master', 'Python plugin updated the model')
| true | true |
1c387b757cb81b71f694b90c41926c678de27943 | 1,656 | py | Python | import.py | hornetmadness/pfsence-voucher-printer | 5dd2a36f7f44480875131ca1724072d4a4444d4a | [
"MIT"
] | 2 | 2020-07-04T23:44:55.000Z | 2021-06-03T20:05:12.000Z | import.py | hornetmadness/pfsence-voucher-printer | 5dd2a36f7f44480875131ca1724072d4a4444d4a | [
"MIT"
] | null | null | null | import.py | hornetmadness/pfsence-voucher-printer | 5dd2a36f7f44480875131ca1724072d4a4444d4a | [
"MIT"
] | null | null | null | import config
import logging
import argparse
import sys
import csv
from db import Vouchers, connect_db
from sqlalchemy.orm import sessionmaker
db=connect_db() #establish connection
Session = sessionmaker(bind=db)
session = Session()
def importCSV(file, time):
bulkInsert=[]
keep={}
csv.register_dialect('myDialect',
skipinitialspace=True
)
with open(file) as csvfile:
reader = csv.reader(csvfile, dialect="myDialect")
for row in reader:
if row[0].startswith("#"):
continue
keep['vid'] = row[0].lstrip(' ')
keep['time'] = time
bulkInsert.append(Vouchers(**keep))
if bool(bulkInsert):
session.bulk_save_objects(bulkInsert)
session.commit()
logging.info(f"Inserted {len(bulkInsert)} vouchers")
else:
logging.error(f"Failed to parse {file}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--debug", help="enable debug output", action="store_true")
parser.add_argument("--file", help="Provide the path to the CSV", type=str)
parser.add_argument("--minutes", help="How many minutes are assigned to this roll", type=int)
args, unknown_args = parser.parse_known_args()
logging_level = logging.INFO
if args.debug:
config.debug = True
config.db_debug = True
logging_level = logging.DEBUG
logging.getLogger()
logging.basicConfig(format='%(asctime)s %(message)s', level=logging_level)
logging.info("args: {}".format(args))
if not args.file or not args.minutes:
logging.error("Both --minutes and --file are requred input for importing")
sys.exit(1)
importCSV(args.file, args.minutes)
| 26.709677 | 95 | 0.694444 | import config
import logging
import argparse
import sys
import csv
from db import Vouchers, connect_db
from sqlalchemy.orm import sessionmaker
db=connect_db()
Session = sessionmaker(bind=db)
session = Session()
def importCSV(file, time):
bulkInsert=[]
keep={}
csv.register_dialect('myDialect',
skipinitialspace=True
)
with open(file) as csvfile:
reader = csv.reader(csvfile, dialect="myDialect")
for row in reader:
if row[0].startswith("#"):
continue
keep['vid'] = row[0].lstrip(' ')
keep['time'] = time
bulkInsert.append(Vouchers(**keep))
if bool(bulkInsert):
session.bulk_save_objects(bulkInsert)
session.commit()
logging.info(f"Inserted {len(bulkInsert)} vouchers")
else:
logging.error(f"Failed to parse {file}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--debug", help="enable debug output", action="store_true")
parser.add_argument("--file", help="Provide the path to the CSV", type=str)
parser.add_argument("--minutes", help="How many minutes are assigned to this roll", type=int)
args, unknown_args = parser.parse_known_args()
logging_level = logging.INFO
if args.debug:
config.debug = True
config.db_debug = True
logging_level = logging.DEBUG
logging.getLogger()
logging.basicConfig(format='%(asctime)s %(message)s', level=logging_level)
logging.info("args: {}".format(args))
if not args.file or not args.minutes:
logging.error("Both --minutes and --file are requred input for importing")
sys.exit(1)
importCSV(args.file, args.minutes)
| true | true |
1c387b91c5689545d6a13dcbc60bb9934ed15e83 | 890 | py | Python | model.py | rahulkumar1112/Audio-Classification | 589fccae5eeb9feaf04073b1243c0004759e3255 | [
"MIT"
] | 1 | 2019-06-21T14:43:48.000Z | 2019-06-21T14:43:48.000Z | Example Audio and Deep Learning/model.py | felipegetulio/Artificial-Neural-Networks---Projects | 04d16703ccc2fb10cc1ba92850364ea49b9a5bfa | [
"MIT"
] | null | null | null | Example Audio and Deep Learning/model.py | felipegetulio/Artificial-Neural-Networks---Projects | 04d16703ccc2fb10cc1ba92850364ea49b9a5bfa | [
"MIT"
] | null | null | null | import os
from scipy.io import wavfile
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from keras.layers import Conv2D, MaxPool2D, Flatten, LSTM
from keras.layers import Dropout, Dense, TimeDistributed
from keras.models import Sequential
from keras.utils import to_categorical
from sklearn.utils.class_weight import compute_class_weight
from tqdm import tqdm
from python_speech_features import mfcc
df = pd.read_csv('instruments.csv')
df.set_index('fname', inplace=True)
for f in df.index:
rate, signal = wavfile.read('clean/'+f)
df.at[f, 'length'] = signal.shape[0]/rate
classes = list(np.unique(df.label))
class_dist = df.groupby(['label'])['length'].mean()
fig, ax = plt.subplots()
ax.set_title('Class Distribution', y=1.08)
ax.pie(class_dist, labels=class_dist.index, autopct='%1.1f%%',
shadow=False, startangle=90)
ax.axis('equal')
plt.show()
| 29.666667 | 62 | 0.75618 | import os
from scipy.io import wavfile
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from keras.layers import Conv2D, MaxPool2D, Flatten, LSTM
from keras.layers import Dropout, Dense, TimeDistributed
from keras.models import Sequential
from keras.utils import to_categorical
from sklearn.utils.class_weight import compute_class_weight
from tqdm import tqdm
from python_speech_features import mfcc
df = pd.read_csv('instruments.csv')
df.set_index('fname', inplace=True)
for f in df.index:
rate, signal = wavfile.read('clean/'+f)
df.at[f, 'length'] = signal.shape[0]/rate
classes = list(np.unique(df.label))
class_dist = df.groupby(['label'])['length'].mean()
fig, ax = plt.subplots()
ax.set_title('Class Distribution', y=1.08)
ax.pie(class_dist, labels=class_dist.index, autopct='%1.1f%%',
shadow=False, startangle=90)
ax.axis('equal')
plt.show()
| true | true |
1c387d96bc457f3a14396ce1eafbcba911ab775e | 2,317 | py | Python | chapter9-神经网络写诗(CharRNN)/utils.py | 1364354238/PYTORCH_LEARNING | d7ab877512ab41c80b37ab68bd1a42193916f31c | [
"MIT"
] | 137 | 2018-11-13T06:35:49.000Z | 2022-03-07T09:21:31.000Z | chapter9_CharRNN/utils.py | pythonProjectLearn/PytorchLearning | 835ce23656e5816f5fbc9018f85f8613bff8f24c | [
"MIT"
] | null | null | null | chapter9_CharRNN/utils.py | pythonProjectLearn/PytorchLearning | 835ce23656e5816f5fbc9018f85f8613bff8f24c | [
"MIT"
] | 54 | 2018-11-13T09:38:37.000Z | 2022-03-25T03:46:25.000Z | #coding:utf8
import visdom
import torch as t
import time
import torchvision as tv
import numpy as np
class Visualizer():
'''
封装了visdom的基本操作,但是你仍然可以通过`self.vis.function`
调用原生的visdom接口
'''
def __init__(self, env='default', **kwargs):
import visdom
self.vis = visdom.Visdom(env=env, **kwargs)
# 画的第几个数,相当于横座标
# 保存(’loss',23) 即loss的第23个点
self.index = {}
self.log_text = ''
def reinit(self,env='default',**kwargs):
'''
修改visdom的配置
'''
self.vis = visdom.Visdom(env=env,**kwargs)
return self
def plot_many(self, d):
'''
一次plot多个
@params d: dict (name,value) i.e. ('loss',0.11)
'''
for k, v in d.items():
self.plot(k, v)
def img_many(self, d):
for k, v in d.items():
self.img(k, v)
def plot(self, name, y):
'''
self.plot('loss',1.00)
'''
x = self.index.get(name, 0)
self.vis.line(Y=np.array([y]), X=np.array([x]),
win=name,
opts=dict(title=name),
update=None if x == 0 else 'append'
)
self.index[name] = x + 1
def img(self, name, img_):
'''
self.img('input_img',t.Tensor(64,64))
'''
if len(img_.size())<3:
img_ = img_.cpu().unsqueeze(0)
self.vis.image(img_.cpu(),
win=name,
opts=dict(title=name)
)
def img_grid_many(self,d):
for k, v in d.items():
self.img_grid(k, v)
def img_grid(self, name, input_3d):
'''
一个batch的图片转成一个网格图,i.e. input(36,64,64)
会变成 6*6 的网格图,每个格子大小64*64
'''
self.img(name, tv.utils.make_grid(
input_3d.cpu()[0].unsqueeze(1).clamp(max=1,min=0)))
def log(self,info,win='log_text'):
'''
self.log({'loss':1,'lr':0.0001})
'''
self.log_text += ('[{time}] {info} <br>'.format(
time=time.strftime('%m%d_%H%M%S'),\
info=info))
self.vis.text(self.log_text,win='log_text')
def __getattr__(self, name):
return getattr(self.vis, name)
| 25.184783 | 63 | 0.475615 |
import visdom
import torch as t
import time
import torchvision as tv
import numpy as np
class Visualizer():
def __init__(self, env='default', **kwargs):
import visdom
self.vis = visdom.Visdom(env=env, **kwargs)
self.index = {}
self.log_text = ''
def reinit(self,env='default',**kwargs):
self.vis = visdom.Visdom(env=env,**kwargs)
return self
def plot_many(self, d):
for k, v in d.items():
self.plot(k, v)
def img_many(self, d):
for k, v in d.items():
self.img(k, v)
def plot(self, name, y):
x = self.index.get(name, 0)
self.vis.line(Y=np.array([y]), X=np.array([x]),
win=name,
opts=dict(title=name),
update=None if x == 0 else 'append'
)
self.index[name] = x + 1
def img(self, name, img_):
if len(img_.size())<3:
img_ = img_.cpu().unsqueeze(0)
self.vis.image(img_.cpu(),
win=name,
opts=dict(title=name)
)
def img_grid_many(self,d):
for k, v in d.items():
self.img_grid(k, v)
def img_grid(self, name, input_3d):
self.img(name, tv.utils.make_grid(
input_3d.cpu()[0].unsqueeze(1).clamp(max=1,min=0)))
def log(self,info,win='log_text'):
self.log_text += ('[{time}] {info} <br>'.format(
time=time.strftime('%m%d_%H%M%S'),\
info=info))
self.vis.text(self.log_text,win='log_text')
def __getattr__(self, name):
return getattr(self.vis, name)
| true | true |
1c387f54b5725f7f26862a550a12f20335f91984 | 2,542 | py | Python | Generate battery report/Generate battery report.py | xinxin2021/my-projects | e2a95dff404c131c9795deea04829514a770e94d | [
"MIT"
] | null | null | null | Generate battery report/Generate battery report.py | xinxin2021/my-projects | e2a95dff404c131c9795deea04829514a770e94d | [
"MIT"
] | null | null | null | Generate battery report/Generate battery report.py | xinxin2021/my-projects | e2a95dff404c131c9795deea04829514a770e94d | [
"MIT"
] | null | null | null | # coding=utf-8
import os
import wx
def isi(path,name):
file = os.listdir(path)
if name in file:
return True
else:
return False
def isib(l,dx):
for i in range(len(l)):
if l[i] in dx:
return False
return True
class MyFrame(wx.Frame):
def __init__(self):
super().__init__(None, title='Generate battery report',size=(350,90))
panel = wx.Panel(parent=self)
self.icon = wx.Icon('battery icon.ico',wx.BITMAP_TYPE_ICO)
self.SetIcon(self.icon)
self.path = wx.StaticText(panel,label='Sava Path:C:\\')
self.path2 = wx.StaticText(panel,label='.html')
self.path3 = wx.TextCtrl(panel)
self.path3.SetValue('batteryreport')
self.button = wx.Button(panel,label='Save',pos=(100,50))
self.Bind(wx.EVT_BUTTON,self.on_click,self.button)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add(self.path,flag=wx.LEFT)
hbox.Add(self.path3,flag=wx.LEFT)
hbox.Add(self.path2,flag=wx.LEFT)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(hbox,flag=wx.LEFT)
vbox.Add(self.button,flag=wx.EXPAND,border=10)
panel.SetSizer(vbox)
def on_click(self,event):
name = self.path3.GetValue()
name += '.html'
if name == 'CON':
message = wx.MessageDialog(None,"You can't use CON as the file name!",'WARNING',wx.OK | wx.ICON_WARNING)
if message.ShowModel() == wx.ID_OK:
pass
elif isi('C:\\',name):
message = wx.MessageDialog(None,'This HTML file already exists in disk C!','ERROR',wx.OK | wx.ICON_ERROR)
if message.ShowModal() == wx.ID_OK:
pass
elif isib([' ', '/', '\\', '?', ':', '*', '"', '<', '>', '|'],self.path3.GetValue()):
command = 'powercfg /batteryreport /output C:\\'
command += self.path3.GetValue()
command += '.html'
os.popen(command)
message = wx.MessageDialog(None,'Saved successfully!','INFORMATION',wx.OK | wx.ICON_INFORMATION)
if message.ShowModal() == wx.ID_OK:
pass
else:
message = wx.MessageDialog(None,"Can't contain any of the following characters: (spaces),/,\\,?,:,*,<,> and |.Otherwise,an exception is thrown.",'WARNING',wx.OK | wx.ICON_WARNING)
if message.ShowModal() == wx.ID_OK:
pass
app = wx.App()
frm = MyFrame()
frm.Show()
app.MainLoop()
| 39.107692 | 192 | 0.560582 |
import os
import wx
def isi(path,name):
file = os.listdir(path)
if name in file:
return True
else:
return False
def isib(l,dx):
for i in range(len(l)):
if l[i] in dx:
return False
return True
class MyFrame(wx.Frame):
def __init__(self):
super().__init__(None, title='Generate battery report',size=(350,90))
panel = wx.Panel(parent=self)
self.icon = wx.Icon('battery icon.ico',wx.BITMAP_TYPE_ICO)
self.SetIcon(self.icon)
self.path = wx.StaticText(panel,label='Sava Path:C:\\')
self.path2 = wx.StaticText(panel,label='.html')
self.path3 = wx.TextCtrl(panel)
self.path3.SetValue('batteryreport')
self.button = wx.Button(panel,label='Save',pos=(100,50))
self.Bind(wx.EVT_BUTTON,self.on_click,self.button)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add(self.path,flag=wx.LEFT)
hbox.Add(self.path3,flag=wx.LEFT)
hbox.Add(self.path2,flag=wx.LEFT)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(hbox,flag=wx.LEFT)
vbox.Add(self.button,flag=wx.EXPAND,border=10)
panel.SetSizer(vbox)
def on_click(self,event):
name = self.path3.GetValue()
name += '.html'
if name == 'CON':
message = wx.MessageDialog(None,"You can't use CON as the file name!",'WARNING',wx.OK | wx.ICON_WARNING)
if message.ShowModel() == wx.ID_OK:
pass
elif isi('C:\\',name):
message = wx.MessageDialog(None,'This HTML file already exists in disk C!','ERROR',wx.OK | wx.ICON_ERROR)
if message.ShowModal() == wx.ID_OK:
pass
elif isib([' ', '/', '\\', '?', ':', '*', '"', '<', '>', '|'],self.path3.GetValue()):
command = 'powercfg /batteryreport /output C:\\'
command += self.path3.GetValue()
command += '.html'
os.popen(command)
message = wx.MessageDialog(None,'Saved successfully!','INFORMATION',wx.OK | wx.ICON_INFORMATION)
if message.ShowModal() == wx.ID_OK:
pass
else:
message = wx.MessageDialog(None,"Can't contain any of the following characters: (spaces),/,\\,?,:,*,<,> and |.Otherwise,an exception is thrown.",'WARNING',wx.OK | wx.ICON_WARNING)
if message.ShowModal() == wx.ID_OK:
pass
app = wx.App()
frm = MyFrame()
frm.Show()
app.MainLoop()
| true | true |
1c387f6af54f5ffd813b646aa95618c3133661bb | 13,234 | py | Python | mc/bookmarks/BookmarksTools.py | zy-sunshine/falkon-pyqt5 | bc2b60aa21c9b136439bd57a11f391d68c736f99 | [
"MIT"
] | 1 | 2021-04-29T05:36:44.000Z | 2021-04-29T05:36:44.000Z | mc/bookmarks/BookmarksTools.py | zy-sunshine/falkon-pyqt5 | bc2b60aa21c9b136439bd57a11f391d68c736f99 | [
"MIT"
] | 1 | 2020-03-28T17:43:18.000Z | 2020-03-28T17:43:18.000Z | mc/bookmarks/BookmarksTools.py | zy-sunshine/falkon-pyqt5 | bc2b60aa21c9b136439bd57a11f391d68c736f99 | [
"MIT"
] | 1 | 2021-01-15T20:09:24.000Z | 2021-01-15T20:09:24.000Z | from PyQt5.QtWidgets import QBoxLayout
from PyQt5.QtWidgets import QDialog
from PyQt5.QtWidgets import QDialogButtonBox
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QLineEdit
from PyQt5.QtWidgets import QMenu
from PyQt5.QtWidgets import QPushButton
from PyQt5.Qt import pyqtSignal
from PyQt5.Qt import QAction
from PyQt5.QtWidgets import QMessageBox
from PyQt5.Qt import QFontMetrics
from PyQt5.Qt import Qt
from PyQt5.Qt import QUrl
from PyQt5.QtWidgets import QFormLayout
from PyQt5.QtWidgets import QPlainTextEdit
from mc.tools.IconProvider import IconProvider
from mc.tools.EnhancedMenu import Menu, Action
from mc.common.globalvars import gVar
from mc.common import const
from .BookmarkItem import BookmarkItem
class BookmarksFoldersMenu(QMenu):
def __init__(self, parent=None):
'''
@param: parent QWidget
'''
super().__init__(parent)
self._selectedFolder = None # BookmarkItem
self._init()
def selectedFolder(self):
'''
@return: BookmarkItem
'''
return self._selectedFolder
# Q_SIGNALS:
folderSelected = pyqtSignal(BookmarkItem)
# private Q_SLOTS:
def _folderChoosed(self):
act = self.sender()
if not isinstance(act, QAction):
return
folder = act.data()
if not isinstance(folder, BookmarkItem):
return
self.folderSelected.emit(folder)
def _ADD_MENU(self, name):
bookmarks = gVar.app.bookmarks()
item = getattr(bookmarks, name)()
menu = self.addMenu(item.icon(), item.title())
self._createMenu(menu, item)
# private:
def _init(self):
self._ADD_MENU('toolbarFolder')
self._ADD_MENU('menuFolder')
self._ADD_MENU('unsortedFolder')
def _createMenu(self, menu, parent):
'''
@param: menu QMenu
@param: parent BookmarkItem
'''
act = menu.addAction(_('Choose %s') % parent.title())
act.setData(parent)
act.triggered.connect(self._folderChoosed)
menu.addSeparator()
for child in parent.children():
if not child.isFolder(): continue
m = menu.addMenu(child.icon(), child.title())
self._createMenu(m, child)
class BookmarksFoldersButton(QPushButton):
def __init__(self, parent, folder=None):
'''
@param: parent QWidget
@param: folder BookmarkItem
'''
super().__init__(parent)
self._menu = BookmarksFoldersMenu(self) # BookmarksFoldersMenu
self._selectedFolder = None # BookmarkItem
if folder:
self._selectedFolder = folder
else:
self._selectedFolder = gVar.app.bookmarks().lastUsedFolder()
self._init()
self._menu.folderSelected.connect(self.setSelectedFolder)
def selectedFolder(self):
'''
@return: BookmarkItem
'''
return self._selectedFolder
# Q_SIGNALS:
selectedFolderChanged = pyqtSignal(BookmarkItem)
# public Q_SLOTS:
def setSelectedFolder(self, folder):
'''
@param: folder BookmarkItem
'''
assert(folder)
assert(folder.isFolder())
self._selectedFolder = folder
self.setText(folder.title())
self.setIcon(folder.icon())
if self.sender():
self.selectedFolderChanged.emit(folder)
def _init(self):
self.setMenu(self._menu)
self.setSelectedFolder(self._selectedFolder)
class BookmarksTools(object):
@classmethod
def addBookmarkDialog(cls, parent, url, title, folder=None):
'''
@brief: Add Bookmark Dialogs
@param: parent QWidget
@param: url QUrl
@param: title QString
@param: folder BookmarkItem
'''
dialog = QDialog(parent)
layout = QBoxLayout(QBoxLayout.TopToBottom, dialog)
label = QLabel(dialog)
edit = QLineEdit(dialog)
folderButton = BookmarksFoldersButton(dialog, folder)
box = QDialogButtonBox(dialog)
box.addButton(QDialogButtonBox.Ok)
box.addButton(QDialogButtonBox.Cancel)
box.rejected.connect(dialog.reject)
box.accepted.connect(dialog.accept)
layout.addWidget(label)
layout.addWidget(edit)
layout.addWidget(folderButton)
layout.addWidget(box)
label.setText(_('Choose name and location of this bookmark.'))
edit.setText(title)
edit.setCursorPosition(0)
dialog.setWindowIcon(IconProvider.iconForUrl(url))
dialog.setWindowTitle(_('Add New Bookmark'))
size = dialog.size()
size.setWidth(350)
dialog.resize(size)
dialog.exec_()
if dialog.result() == QDialog.Rejected or not edit.text():
del dialog
return False
bookmark = BookmarkItem(BookmarkItem.Url)
bookmark.setTitle(edit.text())
bookmark.setUrl(url)
gVar.app.bookmarks().addBookmark(folderButton.selectedFolder(), bookmark)
del dialog
return True
@classmethod
def bookmarkAllTabsDialog(cls, parent, tabWidget, folder=None):
'''
@param: parent QWidget
@param: tabWidget TabWidget
@param: folder BookmarkItem
'''
assert(tabWidget)
dialog = QDialog(parent)
layout = QBoxLayout(QBoxLayout.TopToBottom, dialog)
label = QLabel(dialog)
folderButton = BookmarksFoldersButton(dialog, folder)
box = QDialogButtonBox(dialog)
box.addButton(QDialogButtonBox.Ok)
box.addButton(QDialogButtonBox.Cancel)
box.rejected.connect(dialog.reject)
box.accepted.connect(dialog.accept)
layout.addWidget(label)
layout.addWidget(folderButton)
layout.addWidget(box)
label.setText(_('Choose folder for bookmarks:'))
dialog.setWindowTitle(_('Bookmark All Tabs'))
size = dialog.size()
size.setWidth(350)
dialog.resize(size)
dialog.exec_()
if dialog.result() == QDialog.Rejected:
return False
for tab in tabWidget.allTabs(False):
if tab.url().isEmpty(): continue
bookmark = BookmarkItem(BookmarkItem.Url)
bookmark.setTitle(tab.title())
bookmark.setUrl(tab.url())
gVar.app.bookmarks().addBookmark(folderButton.selectedFolder(), bookmark)
del dialog
return True
@classmethod
def editBookmarkDialog(cls, parent, item):
'''
@param: parent QWidget
@param: item BookmarkItem
'''
dialog = QDialog(parent)
layout = QFormLayout(dialog)
title = QLineEdit()
address = QLineEdit()
keyword = QLineEdit()
description = QPlainTextEdit()
box = QDialogButtonBox(dialog)
box.addButton(QDialogButtonBox.Ok)
box.addButton(QDialogButtonBox.Cancel)
box.rejected.connect(dialog.reject)
box.accepted.connect(dialog.accept)
layout.addRow(_('Title:'), title)
title.setText(item.title())
if not item.isFolder():
layout.addRow(_('Address:'), address)
address.setText(item.urlString())
layout.addRow(_('Keyword:'), keyword)
keyword.setText(item.keyword())
layout.addRow(_('Description:'), description)
description.document().setPlainText(item.description())
layout.addWidget(box)
dialog.setWindowIcon(item.icon())
dialog.setWindowTitle(_('Edit Bookmark'))
dialog.exec_()
if dialog.result() == QDialog.Rejected:
del dialog
return False
item.setTitle(title.text())
if not item.isFolder():
item.setUrl(QUrl.fromEncoded(address.text().encode()))
item.setKeyword(keyword.text())
item.setDescription(description.toPlainText())
del dialog
return True
@classmethod
def openBookmark(cls, window, item):
'''
@param: window BrowserWindow
@param: item BookmarkItem
'''
assert(window)
if not item or not item.isUrl():
return
item.updateVisitCount()
window.loadAddress(item.url())
@classmethod
def openBookmarkInNewTab(cls, window, item):
'''
@param: window BrowserWindow
@param: item BookmarkItem
'''
assert(window)
if not item:
return
if item.isFolder():
cls.openFolderInTabs(window, item)
elif item.isUrl():
item.updateVisitCount()
window.tabWidget().addViewByUrlTitle(item.url(), item.title(),
gVar.appSettings.newTabPosition)
@classmethod
def openBookmarkInNewWindow(cls, window, item):
'''
@param: window BrowserWindow
@param: item BookmarkItem
'''
if not item.isUrl():
return
item.updateVisitCount()
gVar.app.createWindow(const.BW_NewWindow, item.url())
@classmethod
def openBookmarkInNewPrivateWindow(cls, window, item):
'''
@param: window BrowserWindow
@param: item BookmarkItem
'''
if not item.isUrl():
return
item.updateVisitCount()
gVar.app.startPrivateBrowsing(item.url())
@classmethod
def openFolderInTabs(cls, window, folder):
'''
@param: window BrowserWindow
@param: folder BookmarkItem
'''
assert(window)
assert(folder.isFolder())
showWarning = len(folder.children()) > 10
if not showWarning:
for child in folder.children():
if child.isFolder():
showWarning = True
break
if showWarning:
button = QMessageBox.warning(window, _('Confirmation'),
_('Are you sure you want to open all bookmarks from "%s" folder in tabs?') % folder.title(),
QMessageBox.Yes | QMessageBox.No)
if button != QMessageBox.Yes:
return
for child in folder.children():
if child.isUrl():
cls.openBookmarkInNewTab(window, child)
elif child.isFolder():
cls.openFolderInTabs(window, child)
@classmethod
def addActionToMenu(cls, receiver, menu, item):
'''
@param: receiver QObject
@param: menu Menu
@param: item BookmarkItem
'''
assert(menu)
assert(item)
type_ = item.type()
if type_ == BookmarkItem.Url:
cls.addUrlToMenu(receiver, menu, item)
elif type_ == BookmarkItem.Folder:
cls.addFolderToMenu(receiver, menu, item)
elif type_ == BookmarkItem.Separator:
cls.addSeparatorToMenu(menu, item)
@classmethod
def addFolderToMenu(cls, receiver, menu, folder):
'''
@param: receiver QObject
@param: menu Menu
@param: folder BookmarkItem
'''
assert(menu)
assert(folder)
assert(folder.isFolder())
subMenu = Menu(menu)
title = QFontMetrics(subMenu.font()).elidedText(folder.title(), Qt.ElideRight, 250)
subMenu.setTitle(title)
subMenu.setIcon(folder.icon())
cls.addFolderContentsToMenu(receiver, subMenu, folder)
# QAction
act = menu.addMenu(subMenu)
act.setData(folder)
act.setIconVisibleInMenu(True)
@classmethod
def addUrlToMenu(cls, receiver, menu, bookmark):
'''
@param: receiver QObject
@param: menu Menu
@param: bookmark BookmarkItem
'''
assert(menu)
assert(bookmark)
assert(bookmark.isUrl())
act = Action(menu)
title = QFontMetrics(act.font()).elidedText(bookmark.title(), Qt.ElideRight, 250)
act.setText(title)
act.setData(bookmark)
act.setIconVisibleInMenu(True)
act.triggered.connect(receiver._bookmarkActivated)
act.ctrlTriggered.connect(receiver._bookmarkCtrlActivated)
act.shiftTriggered.connect(receiver._bookmarkShiftActivated)
menu.addAction(act)
@classmethod
def addSeparatorToMenu(cls, menu, separator):
'''
@param: menu Menu
@param: separator BookmarkItem
'''
assert(menu)
assert(separator.isSeparator())
menu.addSeparator()
@classmethod
def addFolderContentsToMenu(cls, receiver, menu, folder):
'''
@param: receiver QObject
@param: menu Menu
@param: folder BookmarkItem
'''
menu.aboutToShow.connect(receiver._menuAboutToShow)
menu.menuMiddleClicked.connect(receiver._menuMiddleClicked)
for child in folder.children():
cls.addActionToMenu(receiver, menu, child)
if menu.isEmpty():
menu.addAction(_('Empty')).setDisabled(True)
#@classmethod
#def migrateBookmarksIfNecessary(cls, bookmarks):
# '''
# @brief: Migration from Sql Bookmarks (returns tree if bookmarks migrated)
# '''
# pass
| 29.540179 | 108 | 0.610473 | from PyQt5.QtWidgets import QBoxLayout
from PyQt5.QtWidgets import QDialog
from PyQt5.QtWidgets import QDialogButtonBox
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QLineEdit
from PyQt5.QtWidgets import QMenu
from PyQt5.QtWidgets import QPushButton
from PyQt5.Qt import pyqtSignal
from PyQt5.Qt import QAction
from PyQt5.QtWidgets import QMessageBox
from PyQt5.Qt import QFontMetrics
from PyQt5.Qt import Qt
from PyQt5.Qt import QUrl
from PyQt5.QtWidgets import QFormLayout
from PyQt5.QtWidgets import QPlainTextEdit
from mc.tools.IconProvider import IconProvider
from mc.tools.EnhancedMenu import Menu, Action
from mc.common.globalvars import gVar
from mc.common import const
from .BookmarkItem import BookmarkItem
class BookmarksFoldersMenu(QMenu):
def __init__(self, parent=None):
super().__init__(parent)
self._selectedFolder = None
self._init()
def selectedFolder(self):
return self._selectedFolder
folderSelected = pyqtSignal(BookmarkItem)
def _folderChoosed(self):
act = self.sender()
if not isinstance(act, QAction):
return
folder = act.data()
if not isinstance(folder, BookmarkItem):
return
self.folderSelected.emit(folder)
def _ADD_MENU(self, name):
bookmarks = gVar.app.bookmarks()
item = getattr(bookmarks, name)()
menu = self.addMenu(item.icon(), item.title())
self._createMenu(menu, item)
def _init(self):
self._ADD_MENU('toolbarFolder')
self._ADD_MENU('menuFolder')
self._ADD_MENU('unsortedFolder')
def _createMenu(self, menu, parent):
act = menu.addAction(_('Choose %s') % parent.title())
act.setData(parent)
act.triggered.connect(self._folderChoosed)
menu.addSeparator()
for child in parent.children():
if not child.isFolder(): continue
m = menu.addMenu(child.icon(), child.title())
self._createMenu(m, child)
class BookmarksFoldersButton(QPushButton):
def __init__(self, parent, folder=None):
super().__init__(parent)
self._menu = BookmarksFoldersMenu(self)
self._selectedFolder = None
if folder:
self._selectedFolder = folder
else:
self._selectedFolder = gVar.app.bookmarks().lastUsedFolder()
self._init()
self._menu.folderSelected.connect(self.setSelectedFolder)
def selectedFolder(self):
return self._selectedFolder
selectedFolderChanged = pyqtSignal(BookmarkItem)
def setSelectedFolder(self, folder):
assert(folder)
assert(folder.isFolder())
self._selectedFolder = folder
self.setText(folder.title())
self.setIcon(folder.icon())
if self.sender():
self.selectedFolderChanged.emit(folder)
def _init(self):
self.setMenu(self._menu)
self.setSelectedFolder(self._selectedFolder)
class BookmarksTools(object):
@classmethod
def addBookmarkDialog(cls, parent, url, title, folder=None):
dialog = QDialog(parent)
layout = QBoxLayout(QBoxLayout.TopToBottom, dialog)
label = QLabel(dialog)
edit = QLineEdit(dialog)
folderButton = BookmarksFoldersButton(dialog, folder)
box = QDialogButtonBox(dialog)
box.addButton(QDialogButtonBox.Ok)
box.addButton(QDialogButtonBox.Cancel)
box.rejected.connect(dialog.reject)
box.accepted.connect(dialog.accept)
layout.addWidget(label)
layout.addWidget(edit)
layout.addWidget(folderButton)
layout.addWidget(box)
label.setText(_('Choose name and location of this bookmark.'))
edit.setText(title)
edit.setCursorPosition(0)
dialog.setWindowIcon(IconProvider.iconForUrl(url))
dialog.setWindowTitle(_('Add New Bookmark'))
size = dialog.size()
size.setWidth(350)
dialog.resize(size)
dialog.exec_()
if dialog.result() == QDialog.Rejected or not edit.text():
del dialog
return False
bookmark = BookmarkItem(BookmarkItem.Url)
bookmark.setTitle(edit.text())
bookmark.setUrl(url)
gVar.app.bookmarks().addBookmark(folderButton.selectedFolder(), bookmark)
del dialog
return True
@classmethod
def bookmarkAllTabsDialog(cls, parent, tabWidget, folder=None):
assert(tabWidget)
dialog = QDialog(parent)
layout = QBoxLayout(QBoxLayout.TopToBottom, dialog)
label = QLabel(dialog)
folderButton = BookmarksFoldersButton(dialog, folder)
box = QDialogButtonBox(dialog)
box.addButton(QDialogButtonBox.Ok)
box.addButton(QDialogButtonBox.Cancel)
box.rejected.connect(dialog.reject)
box.accepted.connect(dialog.accept)
layout.addWidget(label)
layout.addWidget(folderButton)
layout.addWidget(box)
label.setText(_('Choose folder for bookmarks:'))
dialog.setWindowTitle(_('Bookmark All Tabs'))
size = dialog.size()
size.setWidth(350)
dialog.resize(size)
dialog.exec_()
if dialog.result() == QDialog.Rejected:
return False
for tab in tabWidget.allTabs(False):
if tab.url().isEmpty(): continue
bookmark = BookmarkItem(BookmarkItem.Url)
bookmark.setTitle(tab.title())
bookmark.setUrl(tab.url())
gVar.app.bookmarks().addBookmark(folderButton.selectedFolder(), bookmark)
del dialog
return True
@classmethod
def editBookmarkDialog(cls, parent, item):
dialog = QDialog(parent)
layout = QFormLayout(dialog)
title = QLineEdit()
address = QLineEdit()
keyword = QLineEdit()
description = QPlainTextEdit()
box = QDialogButtonBox(dialog)
box.addButton(QDialogButtonBox.Ok)
box.addButton(QDialogButtonBox.Cancel)
box.rejected.connect(dialog.reject)
box.accepted.connect(dialog.accept)
layout.addRow(_('Title:'), title)
title.setText(item.title())
if not item.isFolder():
layout.addRow(_('Address:'), address)
address.setText(item.urlString())
layout.addRow(_('Keyword:'), keyword)
keyword.setText(item.keyword())
layout.addRow(_('Description:'), description)
description.document().setPlainText(item.description())
layout.addWidget(box)
dialog.setWindowIcon(item.icon())
dialog.setWindowTitle(_('Edit Bookmark'))
dialog.exec_()
if dialog.result() == QDialog.Rejected:
del dialog
return False
item.setTitle(title.text())
if not item.isFolder():
item.setUrl(QUrl.fromEncoded(address.text().encode()))
item.setKeyword(keyword.text())
item.setDescription(description.toPlainText())
del dialog
return True
@classmethod
def openBookmark(cls, window, item):
assert(window)
if not item or not item.isUrl():
return
item.updateVisitCount()
window.loadAddress(item.url())
@classmethod
def openBookmarkInNewTab(cls, window, item):
assert(window)
if not item:
return
if item.isFolder():
cls.openFolderInTabs(window, item)
elif item.isUrl():
item.updateVisitCount()
window.tabWidget().addViewByUrlTitle(item.url(), item.title(),
gVar.appSettings.newTabPosition)
@classmethod
def openBookmarkInNewWindow(cls, window, item):
if not item.isUrl():
return
item.updateVisitCount()
gVar.app.createWindow(const.BW_NewWindow, item.url())
@classmethod
def openBookmarkInNewPrivateWindow(cls, window, item):
if not item.isUrl():
return
item.updateVisitCount()
gVar.app.startPrivateBrowsing(item.url())
@classmethod
def openFolderInTabs(cls, window, folder):
assert(window)
assert(folder.isFolder())
showWarning = len(folder.children()) > 10
if not showWarning:
for child in folder.children():
if child.isFolder():
showWarning = True
break
if showWarning:
button = QMessageBox.warning(window, _('Confirmation'),
_('Are you sure you want to open all bookmarks from "%s" folder in tabs?') % folder.title(),
QMessageBox.Yes | QMessageBox.No)
if button != QMessageBox.Yes:
return
for child in folder.children():
if child.isUrl():
cls.openBookmarkInNewTab(window, child)
elif child.isFolder():
cls.openFolderInTabs(window, child)
@classmethod
def addActionToMenu(cls, receiver, menu, item):
assert(menu)
assert(item)
type_ = item.type()
if type_ == BookmarkItem.Url:
cls.addUrlToMenu(receiver, menu, item)
elif type_ == BookmarkItem.Folder:
cls.addFolderToMenu(receiver, menu, item)
elif type_ == BookmarkItem.Separator:
cls.addSeparatorToMenu(menu, item)
@classmethod
def addFolderToMenu(cls, receiver, menu, folder):
assert(menu)
assert(folder)
assert(folder.isFolder())
subMenu = Menu(menu)
title = QFontMetrics(subMenu.font()).elidedText(folder.title(), Qt.ElideRight, 250)
subMenu.setTitle(title)
subMenu.setIcon(folder.icon())
cls.addFolderContentsToMenu(receiver, subMenu, folder)
act = menu.addMenu(subMenu)
act.setData(folder)
act.setIconVisibleInMenu(True)
@classmethod
def addUrlToMenu(cls, receiver, menu, bookmark):
assert(menu)
assert(bookmark)
assert(bookmark.isUrl())
act = Action(menu)
title = QFontMetrics(act.font()).elidedText(bookmark.title(), Qt.ElideRight, 250)
act.setText(title)
act.setData(bookmark)
act.setIconVisibleInMenu(True)
act.triggered.connect(receiver._bookmarkActivated)
act.ctrlTriggered.connect(receiver._bookmarkCtrlActivated)
act.shiftTriggered.connect(receiver._bookmarkShiftActivated)
menu.addAction(act)
@classmethod
def addSeparatorToMenu(cls, menu, separator):
assert(menu)
assert(separator.isSeparator())
menu.addSeparator()
@classmethod
def addFolderContentsToMenu(cls, receiver, menu, folder):
menu.aboutToShow.connect(receiver._menuAboutToShow)
menu.menuMiddleClicked.connect(receiver._menuMiddleClicked)
for child in folder.children():
cls.addActionToMenu(receiver, menu, child)
if menu.isEmpty():
menu.addAction(_('Empty')).setDisabled(True)
# @brief: Migration from Sql Bookmarks (returns tree if bookmarks migrated)
# '''
| true | true |
1c387f79fe2cfc8681e3cba4788cfe8bebea297d | 13,532 | py | Python | funfact/lang/interpreter/test_type_deduction.py | campsd/FunFact | 477bcf06794f09608240eba992823fae6fde8dad | [
"BSD-3-Clause-LBNL"
] | 37 | 2021-09-22T17:28:35.000Z | 2022-03-07T00:11:17.000Z | funfact/lang/interpreter/test_type_deduction.py | campsd/FunFact | 477bcf06794f09608240eba992823fae6fde8dad | [
"BSD-3-Clause-LBNL"
] | 125 | 2021-11-04T16:50:24.000Z | 2022-03-28T17:54:13.000Z | funfact/lang/interpreter/test_type_deduction.py | campsd/FunFact | 477bcf06794f09608240eba992823fae6fde8dad | [
"BSD-3-Clause-LBNL"
] | 2 | 2021-12-13T07:28:42.000Z | 2021-12-13T07:51:41.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest # noqa: F401
from unittest.mock import MagicMock as M
from .._ast import Primitives as P
from ._type_deduction import TypeDeducer, _add_attr
@pytest.fixture
def intr():
return TypeDeducer()
def as_payload(live_indices, keep_indices, kron_indices, shape):
return dict(
live_indices=live_indices,
keep_indices=keep_indices,
kron_indices=kron_indices,
shape=shape
)
@pytest.mark.parametrize('test_case', [
(P.literal('value'),
as_payload(None, None, None, ())),
(P.tensor(M(shape='shape')),
as_payload(None, None, None, 'shape')),
(P.index('item', False, False),
as_payload(['item'], [], [], None)),
(P.index('item', True, False),
as_payload(['item'], ['item'], [], None)),
(P.index('item', False, True),
as_payload(['item'], ['item'], ['item'], None)),
(P.indices([
M(**as_payload(['i'], ['j'], ['k'], None)),
M(**as_payload(['p'], ['q'], ['r'], None)),
]),
as_payload(['i', 'p'], ['j', 'q'], ['k', 'r'], None)),
(P.indexed_tensor(
M(shape='shape'),
M(**as_payload(['i'], ['j'], ['k'], None))
),
as_payload(['i'], ['j'], ['k'], 'shape')),
(P.call('fun', M(**as_payload(['i'], ['j'], ['k'], 'shape'))),
as_payload(['i'], ['j'], ['k'], 'shape')),
(P.neg(M(**as_payload(['i'], ['j'], ['k'], 'shape'))),
as_payload(['i'], ['j'], ['k'], 'shape')),
(P.elem(
M(shape=(2, 3)),
M(shape=(2, 3)),
'', ''),
as_payload(None, None, None, (2, 3))),
(P.elem(
M(shape=(2, 3)),
M(shape=(2, 1)),
'', ''),
as_payload(None, None, None, (2, 3))),
(P.elem(
M(shape=(1, 3)),
M(shape=(2, 3)),
'', ''),
as_payload(None, None, None, (2, 3))),
(P.elem(
M(shape=(1, 10)),
M(shape=(3, 1)),
'', ''),
as_payload(None, None, None, (3, 10))),
(P.elem(
M(shape=(2, 3, 1)),
M(shape=(1, 3, 5)),
'', ''),
as_payload(None, None, None, (2, 3, 5))),
(P.elem(
M(shape=(2, 3)),
M(shape=(2, 4)),
'', ''),
SyntaxError),
(P.elem(
M(shape=(2, 3)),
M(shape=(2, 3, 1)),
'', ''),
SyntaxError),
# i,i
(P.ein(
M(**as_payload(['i'], None, None, (5,))),
M(**as_payload(['i'], None, None, (5,))),
'', '', '',
None
),
as_payload([], [], [], ())),
# i,j
(P.ein(
M(**as_payload(['i'], None, None, (5,))),
M(**as_payload(['j'], None, None, (2,))),
'', '', '',
None
),
as_payload(['i', 'j'], [], [], (5, 2))),
# ~i~j,ij
(P.ein(
M(**as_payload(['i', 'j'], ['i', 'j'], None, (1, 10))),
M(**as_payload(['i', 'j'], None, None, (3, 1))),
'', '', '',
None
),
as_payload(['i', 'j'], [], [], (3, 10))),
# ij,jk
(P.ein(
M(**as_payload(['i', 'j'], None, None, (5, 2))),
M(**as_payload(['j', 'k'], None, None, (2, 10))),
'', '', '',
None
),
as_payload(['i', 'k'], [], [], (5, 10))),
# i~j,jk
(P.ein(
M(**as_payload(['i', 'j'], ['j'], None, (5, 2))),
M(**as_payload(['j', 'k'], None, None, (2, 10))),
'', '', '',
None
),
as_payload(['i', 'j', 'k'], [], [], (5, 2, 10))),
# ij,~jk
(P.ein(
M(**as_payload(['i', 'j'], None, None, (5, 2))),
M(**as_payload(['j', 'k'], ['j'], None, (2, 10))),
'', '', '',
None
),
as_payload(['i', 'j', 'k'], [], [], (5, 2, 10))),
# i~j,~jk
(P.ein(
M(**as_payload(['i', 'j'], ['j'], None, (5, 2))),
M(**as_payload(['j', 'k'], ['j'], None, (2, 10))),
'', '', '',
None
),
as_payload(['i', 'j', 'k'], [], [], (5, 2, 10))),
# ij,jk->ki
(P.ein(
M(**as_payload(['i', 'j'], None, None, (5, 2))),
M(**as_payload(['j', 'k'], None, None, (2, 10))),
'', '', '',
M(live_indices=['k', 'i'])
),
as_payload(['k', 'i'], [], [], (10, 5))),
# ij,jk->jki
(P.ein(
M(**as_payload(['i', 'j'], None, None, (5, 2))),
M(**as_payload(['j', 'k'], None, None, (2, 10))),
'', '', '',
M(live_indices=['j', 'k', 'i'])
),
as_payload(['j', 'k', 'i'], [], [], (2, 10, 5))),
# *i,*i
(P.ein(
M(**as_payload(['i'], ['i'], ['i'], (3,))),
M(**as_payload(['i'], ['i'], ['i'], (7,))),
'', '', '',
None
),
as_payload(['i'], [], [], (21,))),
# *i,*i,*i
(P.ein(
P.ein(
M(**as_payload(['i'], ['i'], ['i'], (2,))),
M(**as_payload(['i'], ['i'], ['i'], (3,))),
'', '', '',
None
),
M(**as_payload(['i'], ['i'], ['i'], (5,))),
'', '', '',
None
),
as_payload(['i'], [], [], (30,))),
# *i,*i,i
(P.ein(
P.ein(
M(**as_payload(['i'], ['i'], ['i'], (2,))),
M(**as_payload(['i'], ['i'], ['i'], (3,))),
'', '', '',
None
),
M(**as_payload(['i'], [], [], (6,))),
'', '', '',
None
),
as_payload([], [], [], ())),
# *i,*i,i
(P.ein(
P.ein(
M(**as_payload(['i'], ['i'], ['i'], (2,))),
M(**as_payload(['i'], ['i'], ['i'], (3,))),
'', '', '',
None
),
M(**as_payload(['i'], [], [], (7,))),
'', '', '',
None
),
SyntaxError),
(P.tran(
M(
live_indices=['i', 'j', 'k'],
shape=(2, 3, 4)
),
M(
live_indices=['j', 'k', 'i'],
keep_indices='keep',
kron_indices='kron',
ascii='indices'
)
),
as_payload(['j', 'k', 'i'], 'keep', 'kron', (3, 4, 2))),
])
def test_concrete(test_case, intr):
node, result = test_case
if isinstance(result, type) and issubclass(result, Exception):
with pytest.raises(result):
compiled = intr(node)
else:
compiled = intr(node)
for key, val in result.items():
assert getattr(compiled, key) == val
@pytest.mark.parametrize('test_case', [
(
P.abstract_dest(
P.ein(
M(**as_payload(['i', 'j'], None, None, (5, 3))),
M(**as_payload(['j', 'k'], None, None, (3, 4))),
'', '', '',
None
),
M(live_indices=['k', 'i'])
),
M(name_='ein', live_indices=['k', 'i'], shape=(4, 5))
),
(
P.abstract_dest(
P.ein(
M(**as_payload(['i', 'j'], None, None, (5, 3))),
M(**as_payload(['j', 'k'], None, None, (3, 4))),
'', '', '',
None
),
M(live_indices=['k', 'j', 'i'])
),
M(name_='ein', live_indices=['k', 'j', 'i'], shape=(4, 3, 5))
),
(
P.abstract_dest(
P.ein(
M(**as_payload(['i', 'j'], None, None, (5, 3))),
M(**as_payload(['j', 'k'], None, None, (3, 5))),
'', '', '',
None
),
M(live_indices=['k'])
),
SyntaxError
),
(
P.abstract_dest(
P.ein(
M(**as_payload(['i', 'j'], None, None, (5, 3))),
M(**as_payload(['j', 'k'], None, None, (3, 5))),
'', '', '',
None
),
M(live_indices=['k', 'l'])
),
SyntaxError
),
(
P.abstract_dest(
P.ein(
M(**as_payload(['i', 'j'], ['j'], None, (5, 3))),
M(**as_payload(['j', 'k'], None, None, (3, 5))),
'', '', '',
None
),
M(live_indices=['i', 'k'])
),
SyntaxError
),
(
P.abstract_dest(
_add_attr(
P.indexed_tensor(
P.tensor(M(shape=(2, 3))),
P.indices([
P.index('i', False, False),
P.index('j', False, False)
]),
),
indexed=True
),
P.indices([
P.index('j', False, False),
P.index('i', False, False)
])
),
M(name_='tran', live_indices=['j', 'i'], shape=(3, 2))
),
])
def test_abstract_dest(intr, test_case):
abstract, concrete = test_case
if isinstance(concrete, type) and issubclass(concrete, Exception):
with pytest.raises(concrete):
intr(abstract)
else:
compiled = intr(abstract)
assert compiled.name == concrete.name_
assert compiled.live_indices == concrete.live_indices
assert compiled.shape == concrete.shape
@pytest.mark.parametrize('test_case', [
(
P.abstract_index_notation(
M(indexed=False, shape='shape'),
M(**as_payload(['i'], ['j'], ['k'], None))
),
'indexed_tensor',
as_payload(['i'], ['j'], ['k'], 'shape'),
),
(
P.abstract_index_notation(
_add_attr(
P.indexed_tensor(
P.tensor(M(shape='shape')),
P.indices([P.index('i', False, False)]),
),
indexed=True
),
P.indices([P.index('j', False, False)])
),
'indexed_tensor',
as_payload(['j'], [], [], 'shape'),
),
(
P.abstract_index_notation(
_add_attr(
P.indexed_tensor(
P.tensor(M(shape='shape')),
P.indices([P.index('i', False, False)]),
),
indexed=True
),
P.indices([P.index('j', False, False), P.index('k', False, False)])
),
'',
SyntaxError,
),
(
P.abstract_index_notation(
_add_attr(
P.indexed_tensor(
P.tensor(M(shape='shape')),
P.indices([
P.index('i', False, False),
P.index('j', False, False)
]),
),
indexed=True
),
P.indices([
P.index('j', False, False),
P.index('k', False, False)
])
),
'indexed_tensor',
as_payload(['j', 'k'], [], [], 'shape'),
),
])
def test_abstract_index_notation(intr, test_case):
abstract, cname, payload = test_case
if isinstance(payload, type) and issubclass(payload, Exception):
with pytest.raises(payload):
intr(abstract)
else:
compiled = intr(abstract)
assert compiled.name == cname
for key, val in payload.items():
assert getattr(compiled, key) == val
@pytest.mark.parametrize('test_case', [
# matmul
(
_add_attr(
P.abstract_binary(
M(indexed=False, shape=(2, 3)),
M(indexed=False, shape=(3, 4)),
'', 'matmul'
),
indexed=None
),
'ein',
(2, 4),
),
(
_add_attr(
P.abstract_binary(
M(indexed=False, shape=(2, 2)),
M(indexed=False, shape=(3, 4)),
'', 'matmul'
),
indexed=None
),
SyntaxError
),
# kron
(
_add_attr(
P.abstract_binary(
M(indexed=False, shape=(3,)),
M(indexed=False, shape=(5,)),
'', 'kron'
),
indexed=None
),
'ein',
(15,),
),
(
_add_attr(
P.abstract_binary(
M(indexed=False, shape=(2, 3)),
M(indexed=False, shape=(5, 7)),
'', 'kron'
),
indexed=None
),
'ein',
(10, 21),
),
# einop
(
_add_attr(
P.abstract_binary(
M(**as_payload(['i', 'j'], [], [], (2, 5))),
M(**as_payload(['j', 'k'], [], [], (5, 7))),
'', 'multiply'
),
indexed=True
),
'ein',
(2, 7),
),
# elementwise
(
_add_attr(
P.abstract_binary(
M(indexed=False, shape=(2, 5)),
M(indexed=False, shape=(2, 5)),
'', 'add'
),
indexed=False
),
'elem',
(2, 5),
),
(
_add_attr(
P.abstract_binary(
M(indexed=False, shape=(2, 5, 1)),
M(indexed=False, shape=(1, 5, 4)),
'', 'subtract'
),
indexed=False
),
'elem',
(2, 5, 4),
),
])
def test_abstract_binary(intr, test_case):
try:
abstract, cname, shape = test_case
expect_exception = False
except Exception:
abstract, exception = test_case
expect_exception = True
if expect_exception:
with pytest.raises(exception):
intr(abstract)
else:
compiled = intr(abstract)
assert compiled.name == cname
assert compiled.shape == shape
| 25.726236 | 79 | 0.381392 |
import pytest
from unittest.mock import MagicMock as M
from .._ast import Primitives as P
from ._type_deduction import TypeDeducer, _add_attr
@pytest.fixture
def intr():
return TypeDeducer()
def as_payload(live_indices, keep_indices, kron_indices, shape):
return dict(
live_indices=live_indices,
keep_indices=keep_indices,
kron_indices=kron_indices,
shape=shape
)
@pytest.mark.parametrize('test_case', [
(P.literal('value'),
as_payload(None, None, None, ())),
(P.tensor(M(shape='shape')),
as_payload(None, None, None, 'shape')),
(P.index('item', False, False),
as_payload(['item'], [], [], None)),
(P.index('item', True, False),
as_payload(['item'], ['item'], [], None)),
(P.index('item', False, True),
as_payload(['item'], ['item'], ['item'], None)),
(P.indices([
M(**as_payload(['i'], ['j'], ['k'], None)),
M(**as_payload(['p'], ['q'], ['r'], None)),
]),
as_payload(['i', 'p'], ['j', 'q'], ['k', 'r'], None)),
(P.indexed_tensor(
M(shape='shape'),
M(**as_payload(['i'], ['j'], ['k'], None))
),
as_payload(['i'], ['j'], ['k'], 'shape')),
(P.call('fun', M(**as_payload(['i'], ['j'], ['k'], 'shape'))),
as_payload(['i'], ['j'], ['k'], 'shape')),
(P.neg(M(**as_payload(['i'], ['j'], ['k'], 'shape'))),
as_payload(['i'], ['j'], ['k'], 'shape')),
(P.elem(
M(shape=(2, 3)),
M(shape=(2, 3)),
'', ''),
as_payload(None, None, None, (2, 3))),
(P.elem(
M(shape=(2, 3)),
M(shape=(2, 1)),
'', ''),
as_payload(None, None, None, (2, 3))),
(P.elem(
M(shape=(1, 3)),
M(shape=(2, 3)),
'', ''),
as_payload(None, None, None, (2, 3))),
(P.elem(
M(shape=(1, 10)),
M(shape=(3, 1)),
'', ''),
as_payload(None, None, None, (3, 10))),
(P.elem(
M(shape=(2, 3, 1)),
M(shape=(1, 3, 5)),
'', ''),
as_payload(None, None, None, (2, 3, 5))),
(P.elem(
M(shape=(2, 3)),
M(shape=(2, 4)),
'', ''),
SyntaxError),
(P.elem(
M(shape=(2, 3)),
M(shape=(2, 3, 1)),
'', ''),
SyntaxError),
(P.ein(
M(**as_payload(['i'], None, None, (5,))),
M(**as_payload(['i'], None, None, (5,))),
'', '', '',
None
),
as_payload([], [], [], ())),
(P.ein(
M(**as_payload(['i'], None, None, (5,))),
M(**as_payload(['j'], None, None, (2,))),
'', '', '',
None
),
as_payload(['i', 'j'], [], [], (5, 2))),
(P.ein(
M(**as_payload(['i', 'j'], ['i', 'j'], None, (1, 10))),
M(**as_payload(['i', 'j'], None, None, (3, 1))),
'', '', '',
None
),
as_payload(['i', 'j'], [], [], (3, 10))),
(P.ein(
M(**as_payload(['i', 'j'], None, None, (5, 2))),
M(**as_payload(['j', 'k'], None, None, (2, 10))),
'', '', '',
None
),
as_payload(['i', 'k'], [], [], (5, 10))),
(P.ein(
M(**as_payload(['i', 'j'], ['j'], None, (5, 2))),
M(**as_payload(['j', 'k'], None, None, (2, 10))),
'', '', '',
None
),
as_payload(['i', 'j', 'k'], [], [], (5, 2, 10))),
(P.ein(
M(**as_payload(['i', 'j'], None, None, (5, 2))),
M(**as_payload(['j', 'k'], ['j'], None, (2, 10))),
'', '', '',
None
),
as_payload(['i', 'j', 'k'], [], [], (5, 2, 10))),
(P.ein(
M(**as_payload(['i', 'j'], ['j'], None, (5, 2))),
M(**as_payload(['j', 'k'], ['j'], None, (2, 10))),
'', '', '',
None
),
as_payload(['i', 'j', 'k'], [], [], (5, 2, 10))),
(P.ein(
M(**as_payload(['i', 'j'], None, None, (5, 2))),
M(**as_payload(['j', 'k'], None, None, (2, 10))),
'', '', '',
M(live_indices=['k', 'i'])
),
as_payload(['k', 'i'], [], [], (10, 5))),
(P.ein(
M(**as_payload(['i', 'j'], None, None, (5, 2))),
M(**as_payload(['j', 'k'], None, None, (2, 10))),
'', '', '',
M(live_indices=['j', 'k', 'i'])
),
as_payload(['j', 'k', 'i'], [], [], (2, 10, 5))),
(P.ein(
M(**as_payload(['i'], ['i'], ['i'], (3,))),
M(**as_payload(['i'], ['i'], ['i'], (7,))),
'', '', '',
None
),
as_payload(['i'], [], [], (21,))),
(P.ein(
P.ein(
M(**as_payload(['i'], ['i'], ['i'], (2,))),
M(**as_payload(['i'], ['i'], ['i'], (3,))),
'', '', '',
None
),
M(**as_payload(['i'], ['i'], ['i'], (5,))),
'', '', '',
None
),
as_payload(['i'], [], [], (30,))),
(P.ein(
P.ein(
M(**as_payload(['i'], ['i'], ['i'], (2,))),
M(**as_payload(['i'], ['i'], ['i'], (3,))),
'', '', '',
None
),
M(**as_payload(['i'], [], [], (6,))),
'', '', '',
None
),
as_payload([], [], [], ())),
(P.ein(
P.ein(
M(**as_payload(['i'], ['i'], ['i'], (2,))),
M(**as_payload(['i'], ['i'], ['i'], (3,))),
'', '', '',
None
),
M(**as_payload(['i'], [], [], (7,))),
'', '', '',
None
),
SyntaxError),
(P.tran(
M(
live_indices=['i', 'j', 'k'],
shape=(2, 3, 4)
),
M(
live_indices=['j', 'k', 'i'],
keep_indices='keep',
kron_indices='kron',
ascii='indices'
)
),
as_payload(['j', 'k', 'i'], 'keep', 'kron', (3, 4, 2))),
])
def test_concrete(test_case, intr):
node, result = test_case
if isinstance(result, type) and issubclass(result, Exception):
with pytest.raises(result):
compiled = intr(node)
else:
compiled = intr(node)
for key, val in result.items():
assert getattr(compiled, key) == val
@pytest.mark.parametrize('test_case', [
(
P.abstract_dest(
P.ein(
M(**as_payload(['i', 'j'], None, None, (5, 3))),
M(**as_payload(['j', 'k'], None, None, (3, 4))),
'', '', '',
None
),
M(live_indices=['k', 'i'])
),
M(name_='ein', live_indices=['k', 'i'], shape=(4, 5))
),
(
P.abstract_dest(
P.ein(
M(**as_payload(['i', 'j'], None, None, (5, 3))),
M(**as_payload(['j', 'k'], None, None, (3, 4))),
'', '', '',
None
),
M(live_indices=['k', 'j', 'i'])
),
M(name_='ein', live_indices=['k', 'j', 'i'], shape=(4, 3, 5))
),
(
P.abstract_dest(
P.ein(
M(**as_payload(['i', 'j'], None, None, (5, 3))),
M(**as_payload(['j', 'k'], None, None, (3, 5))),
'', '', '',
None
),
M(live_indices=['k'])
),
SyntaxError
),
(
P.abstract_dest(
P.ein(
M(**as_payload(['i', 'j'], None, None, (5, 3))),
M(**as_payload(['j', 'k'], None, None, (3, 5))),
'', '', '',
None
),
M(live_indices=['k', 'l'])
),
SyntaxError
),
(
P.abstract_dest(
P.ein(
M(**as_payload(['i', 'j'], ['j'], None, (5, 3))),
M(**as_payload(['j', 'k'], None, None, (3, 5))),
'', '', '',
None
),
M(live_indices=['i', 'k'])
),
SyntaxError
),
(
P.abstract_dest(
_add_attr(
P.indexed_tensor(
P.tensor(M(shape=(2, 3))),
P.indices([
P.index('i', False, False),
P.index('j', False, False)
]),
),
indexed=True
),
P.indices([
P.index('j', False, False),
P.index('i', False, False)
])
),
M(name_='tran', live_indices=['j', 'i'], shape=(3, 2))
),
])
def test_abstract_dest(intr, test_case):
abstract, concrete = test_case
if isinstance(concrete, type) and issubclass(concrete, Exception):
with pytest.raises(concrete):
intr(abstract)
else:
compiled = intr(abstract)
assert compiled.name == concrete.name_
assert compiled.live_indices == concrete.live_indices
assert compiled.shape == concrete.shape
@pytest.mark.parametrize('test_case', [
(
P.abstract_index_notation(
M(indexed=False, shape='shape'),
M(**as_payload(['i'], ['j'], ['k'], None))
),
'indexed_tensor',
as_payload(['i'], ['j'], ['k'], 'shape'),
),
(
P.abstract_index_notation(
_add_attr(
P.indexed_tensor(
P.tensor(M(shape='shape')),
P.indices([P.index('i', False, False)]),
),
indexed=True
),
P.indices([P.index('j', False, False)])
),
'indexed_tensor',
as_payload(['j'], [], [], 'shape'),
),
(
P.abstract_index_notation(
_add_attr(
P.indexed_tensor(
P.tensor(M(shape='shape')),
P.indices([P.index('i', False, False)]),
),
indexed=True
),
P.indices([P.index('j', False, False), P.index('k', False, False)])
),
'',
SyntaxError,
),
(
P.abstract_index_notation(
_add_attr(
P.indexed_tensor(
P.tensor(M(shape='shape')),
P.indices([
P.index('i', False, False),
P.index('j', False, False)
]),
),
indexed=True
),
P.indices([
P.index('j', False, False),
P.index('k', False, False)
])
),
'indexed_tensor',
as_payload(['j', 'k'], [], [], 'shape'),
),
])
def test_abstract_index_notation(intr, test_case):
abstract, cname, payload = test_case
if isinstance(payload, type) and issubclass(payload, Exception):
with pytest.raises(payload):
intr(abstract)
else:
compiled = intr(abstract)
assert compiled.name == cname
for key, val in payload.items():
assert getattr(compiled, key) == val
@pytest.mark.parametrize('test_case', [
(
_add_attr(
P.abstract_binary(
M(indexed=False, shape=(2, 3)),
M(indexed=False, shape=(3, 4)),
'', 'matmul'
),
indexed=None
),
'ein',
(2, 4),
),
(
_add_attr(
P.abstract_binary(
M(indexed=False, shape=(2, 2)),
M(indexed=False, shape=(3, 4)),
'', 'matmul'
),
indexed=None
),
SyntaxError
),
(
_add_attr(
P.abstract_binary(
M(indexed=False, shape=(3,)),
M(indexed=False, shape=(5,)),
'', 'kron'
),
indexed=None
),
'ein',
(15,),
),
(
_add_attr(
P.abstract_binary(
M(indexed=False, shape=(2, 3)),
M(indexed=False, shape=(5, 7)),
'', 'kron'
),
indexed=None
),
'ein',
(10, 21),
),
(
_add_attr(
P.abstract_binary(
M(**as_payload(['i', 'j'], [], [], (2, 5))),
M(**as_payload(['j', 'k'], [], [], (5, 7))),
'', 'multiply'
),
indexed=True
),
'ein',
(2, 7),
),
(
_add_attr(
P.abstract_binary(
M(indexed=False, shape=(2, 5)),
M(indexed=False, shape=(2, 5)),
'', 'add'
),
indexed=False
),
'elem',
(2, 5),
),
(
_add_attr(
P.abstract_binary(
M(indexed=False, shape=(2, 5, 1)),
M(indexed=False, shape=(1, 5, 4)),
'', 'subtract'
),
indexed=False
),
'elem',
(2, 5, 4),
),
])
def test_abstract_binary(intr, test_case):
try:
abstract, cname, shape = test_case
expect_exception = False
except Exception:
abstract, exception = test_case
expect_exception = True
if expect_exception:
with pytest.raises(exception):
intr(abstract)
else:
compiled = intr(abstract)
assert compiled.name == cname
assert compiled.shape == shape
| true | true |
1c387fd94b6bfb20418bc9254eb731521eb4b65f | 10,660 | py | Python | vision/databunch.py | srujandeshpande/DashAI | 85581abc17ff3a8bf03a1e8b6aae7a06e5ab0dc8 | [
"Apache-2.0"
] | 7 | 2020-08-26T09:36:54.000Z | 2021-12-21T15:46:32.000Z | vision/databunch.py | JoeRishon/DashAI | 68d1b01088a65e2062d96b47b083fa8f5c82b27c | [
"Apache-2.0"
] | 16 | 2020-08-25T18:44:45.000Z | 2022-03-25T19:12:36.000Z | vision/databunch.py | JoeRishon/DashAI | 68d1b01088a65e2062d96b47b083fa8f5c82b27c | [
"Apache-2.0"
] | 10 | 2020-09-30T19:27:17.000Z | 2021-04-04T14:50:31.000Z | from fastai.vision import *
from fastai.vision.gan import GANItemList
from core.databunch import DashDatabunch
from pathlib import Path
class DashVisionDatabunch:
def create_vision_databunch(response):
path = Path('./')
src = DashVisionDatabunch.get_itemlist(response)
src = DashDatabunch.split_databunch(response, src)
# src=src.split_by_rand_pct()
src = DashDatabunch.label_databunch(response, src)
# Add test
tra = DashVisionDatabunch.create_transform(response['vision']['transform'])
src = src.transform(tra, tfm_y=False,size=response['vision']['transform']['size'])
#src = src.transform(tra)
#print(tra[0])
#print(tra[1])
# manually putting extra args like collate_fn, if we pass stuff from dictionary, it will be taken as a string
return DashDatabunch.create_databunch(response, src)
@staticmethod
def get_itemlist(response):
# path = Path('data/mnist_tiny')
# if response["subtask"] == "classification-single-label":
# might be a better way to do this
if response["vision"]["subtask"] == "object-detection":
return ObjectItemList.from_folder(path = response["vision"]["input"]["from_folder"]["path"])
if response["vision"]['subtask'] == 'gan':
return GANItemList.from_folder(
path=response["vision"]['input']['from_folder']['path'],
noise_sz=response["vision"]['subtask']['gan']['noise_sz'])
if response["vision"]["subtask"] == "segmentation":
return SegmentationItemList.from_folder(path=response["vision"]["input"]["from_folder"]["path"])
if response["vision"]["input"]["method"] == "from_folder":
return ImageList.from_folder(**response["vision"]["input"]["from_folder"])
if response["vision"]["input"]["method"] == "from_csv":
return ImageList.from_csv(**response["vision"]["input"]["from_csv"])
@staticmethod
def create_transform(response):
if(response['train']):
response_tr=response['train']
if(response['chosen_aug_train']=='basic_transforms'):
if(response_tr['basic_transforms']['do_flip']):
do_flip=bool(response_tr['basic_transforms']['do_flip'])
#if(response['transforms']['flip_vert']):
flip_vert=bool(response_tr['basic_transforms']['flip_vert'])
if(response_tr['basic_transforms']['max_rotate']):
max_rotate=response_tr['basic_transforms']['max_rotate']
if(response_tr['basic_transforms']['max_zoom']):
max_zoom=response_tr['basic_transforms']['max_zoom']
if(response_tr['basic_transforms']['max_lighting']):
max_lighting=response_tr['basic_transforms']['max_lighting']
if(response_tr['basic_transforms']['max_warp']):
max_warp=response_tr['basic_transforms']['max_warp']
if(response_tr['basic_transforms']['p_affine']):
p_affine=response_tr['basic_transforms']['p_affine']
if(response_tr['basic_transforms']['p_lighting']):
p_lighting=response_tr['basic_transforms']['p_lighting']
tfms_1 = get_transforms(do_flip,flip_vert,max_rotate,max_zoom,max_lighting,max_warp,p_affine,p_lighting)
return tfms_1
if(response['chosen_aug_train']=='zoom_crop'):
tfms_1 = zoom_crop(scale=response_tr['zoom_crop']['scale'], do_rand=response_tr['zoom_crop']['do_rand'],p=response_tr['zoom_crop']['p'])
return tfms_1
if(response['chosen_aug_train']=='manual'):
tras=list()
p=response_tr['manual']
if('brightness' in response['manual_train']):
tras.append(brightness(change=p['brightness']['change']))
#Passed high and low as the same value in a tuple
#Accept high and low seperately
if('contrast' in response['manual_train']):
tras.append(contrast(scale=(p['contrast']['l_scale'],p['contrast']['h_scale'])))
if('crop' in response['manual_train']):
tras.append(crop(size=p['crop']['size'],row_pct=p['crop']['row_pct'],col_pct=p['crop']['col_pct']))
if('crop_pad' in response['manual_train']):
tras.append(crop_pad(size=p['crop_pad']['size'],padding_mode=p['crop_pad']['padding_mode'],row_pct=p['crop']['row_pct'],col_pct=p['crop']['col_pct']))
'''
if('dihedral' in response['manual_train']):
tras.append(dihedral(k=p['dihedral']['k']))
if('dihedral_affine' in response['manual_train]):
tras.append(dihedral_affine(k=p['dihedral']['k']))
'''
if('flip_lr' in response['manual_train']):
tras.append(flip_lr())
if('flip_affine' in response['manual_train']):
tras.append(flip_affine())
if('jitter' in response['manual_train']):
tras.append(jitter(magnitude=p['jitter']['magnitude']))
#padding should be less than input less
if('pad' in response['manual_train']):
tras.append(pad(padding=p['pad']['padding'],mode=p['pad']['mode']))
if('rotate' in response['manual_train']):
tras.append(rotate(degrees=p['rotate']['degrees']))
'''
# It's not possible to apply those transforms to your dataset:invalid literal for int() with base 10: 'Red'
if(p['rgb_randomize']):
tras.append(rgb_randomize(channel=p['rgb_randomize']['chosen_channels'],thresh=p['rgb_randomize']['chosen_thresh']))
'''
#Passed high and low as the same value in a tuple
if('skew' in response['manual_train']):
tras.append(skew(direction=(p['skew']['l_direction'],p['skew']['h_direction']),invert=p['skew']['invert'],magnitude=p['skew']['magnitude']))
if('squish' in response['manual_train']):
tras.append(squish(scale=p['squish']['scale'],row_pct=p['squish']['row_pct'],col_pct=p['squish']['col_pct']))
if('symmetric_wrap' in response['manual_train']):
tras.append(symmetric_warp(magnitude=p['symmetric_wrap']['magnitude']))
#Passed high and low as the same value in a tuple
if('tilt' in response['manual_train']):
tras.append(tilt(magnitude=p['tilt']['magnitude'],direction=(p['tilt']['l_direction'],p['tilt']['h_direction'])))
if('zoom' in response['manual_train']):
tras.append(zoom(scale=p['zoom']['scale'],row_pct=p['zoom']['row_pct'],col_pct=p['zoom']['col_pct']))
#Passed high and low as the same value in a tuple
if('cutout' in response['manual_train']):
tras.append(cutout(length=(p['cutout']['l_length'],p['cutout']['h_length']),n_holes=(p['cutout']['l_n_holes'],p['cutout']['h_n_holes'])))
tfms_1= tras
if(response['valid']):
response_va=response['valid']
if(response['chosen_aug_valid']=='basic_transforms'):
if(response_va['basic_transforms']['do_flip']):
do_flip=bool(response_va['basic_transforms']['do_flip'])
#if(response['transforms']['flip_vert']):
flip_vert=bool(response_va['basic_transforms']['flip_vert'])
if(response_va['basic_transforms']['max_rotate']):
max_rotate=response_va['basic_transforms']['max_rotate']
if(response_va['basic_transforms']['max_zoom']):
max_zoom=response_va['basic_transforms']['max_zoom']
if(response_va['basic_transforms']['max_lighting']):
max_lighting=response_va['basic_transforms']['max_lighting']
if(response_va['basic_transforms']['max_warp']):
max_warp=response_va['basic_transforms']['max_warp']
if(response_va['basic_transforms']['p_affine']):
p_affine=response_va['basic_transforms']['p_affine']
if(response_va['basic_transforms']['p_lighting']):
p_lighting=response_va['basic_transforms']['p_lighting']
tfms_2 = get_transforms(do_flip,flip_vert,max_rotate,max_zoom,max_lighting,max_warp,p_affine,p_lighting)
return tfms_2
if(response['chosen_aug_valid']=='zoom_crop'):
tfms_2 = zoom_crop(scale=response_va['zoom_crop']['scale'], do_rand=response_va['zoom_crop']['do_rand'],p=response_va['zoom_crop']['p'])
return tfms_2
if(response['chosen_aug_valid']=='manual'):
tras=list()
p=response_tr['manual']
if('brightness' in response['manual_valid']):
tras.append(brightness(change=p['brightness']['change']))
#Passed high and low as the same value in a tuple
#Accept high and low seperately
if('contrast' in response['manual_valid']):
tras.append(contrast(scale=(p['contrast']['l_scale'],p['contrast']['h_scale'])))
if('crop' in response['manual_valid']):
tras.append(crop(size=p['crop']['size'],row_pct=p['crop']['row_pct'],col_pct=p['crop']['col_pct']))
if('crop_pad' in response['manual_valid']):
tras.append(crop_pad(size=p['crop_pad']['size'],padding_mode=p['crop_pad']['padding_mode'],row_pct=p['crop']['row_pct'],col_pct=p['crop']['col_pct']))
'''
if('dihedral' in response['manual_valid']):
tras.append(dihedral(k=p['dihedral']['k']))
if('dihedral_affine' in response['manual_valid']):
tras.append(dihedral_affine(k=p['dihedral']['k']))
'''
if('flip_lr' in response['manual_valid']):
tras.append(flip_lr())
if('flip_affine' in response['manual_valid']):
tras.append(flip_affine())
if('jitter' in response['manual_valid']):
tras.append(jitter(magnitude=p['jitter']['magnitude']))
#padding should be less than input less
if('pad' in response['manual_valid']):
tras.append(pad(padding=p['pad']['padding'],mode=p['pad']['mode']))
if('rotate' in response['manual_valid']):
tras.append(rotate(degrees=p['rotate']['degrees']))
'''
# It's not possible to apply those transforms to your dataset:invalid literal for int() with base 10: 'Red'
if(p['rgb_randomize']):
tras.append(rgb_randomize(channel=p['rgb_randomize']['chosen_channels'],thresh=p['rgb_randomize']['chosen_thresh']))
'''
#Passed high and low as the same value in a tuple
if('skew' in response['manual_valid']):
tras.append(skew(direction=(p['skew']['l_direction'],p['skew']['h_direction']),invert=p['skew']['invert'],magnitude=p['skew']['magnitude']))
if('squish' in response['manual_valid']):
tras.append(squish(scale=p['squish']['scale'],row_pct=p['squish']['row_pct'],col_pct=p['squish']['col_pct']))
if('symmetric_wrap' in response['manual_valid']):
tras.append(symmetric_warp(magnitude=p['symmetric_wrap']['magnitude']))
#Passed high and low as the same value in a tuple
if('tilt' in response['manual_valid']):
tras.append(tilt(magnitude=p['tilt']['magnitude'],direction=(p['tilt']['l_direction'],p['tilt']['h_direction'])))
if('zoom' in response['manual_valid']):
tras.append(zoom(scale=p['zoom']['scale'],row_pct=p['zoom']['row_pct'],col_pct=p['zoom']['col_pct']))
#Passed high and low as the same value in a tuple
if('cutout' in response['manual_valid']):
tras.append(cutout(length=(p['cutout']['l_length'],p['cutout']['h_length']),n_holes=(p['cutout']['l_n_holes'],p['cutout']['h_n_holes'])))
tfms_2= tras
return (tfms_1,tfms_2) | 47.802691 | 155 | 0.679081 | from fastai.vision import *
from fastai.vision.gan import GANItemList
from core.databunch import DashDatabunch
from pathlib import Path
class DashVisionDatabunch:
def create_vision_databunch(response):
path = Path('./')
src = DashVisionDatabunch.get_itemlist(response)
src = DashDatabunch.split_databunch(response, src)
src = DashDatabunch.label_databunch(response, src)
tra = DashVisionDatabunch.create_transform(response['vision']['transform'])
src = src.transform(tra, tfm_y=False,size=response['vision']['transform']['size'])
return DashDatabunch.create_databunch(response, src)
@staticmethod
def get_itemlist(response):
if response["vision"]["subtask"] == "object-detection":
return ObjectItemList.from_folder(path = response["vision"]["input"]["from_folder"]["path"])
if response["vision"]['subtask'] == 'gan':
return GANItemList.from_folder(
path=response["vision"]['input']['from_folder']['path'],
noise_sz=response["vision"]['subtask']['gan']['noise_sz'])
if response["vision"]["subtask"] == "segmentation":
return SegmentationItemList.from_folder(path=response["vision"]["input"]["from_folder"]["path"])
if response["vision"]["input"]["method"] == "from_folder":
return ImageList.from_folder(**response["vision"]["input"]["from_folder"])
if response["vision"]["input"]["method"] == "from_csv":
return ImageList.from_csv(**response["vision"]["input"]["from_csv"])
@staticmethod
def create_transform(response):
if(response['train']):
response_tr=response['train']
if(response['chosen_aug_train']=='basic_transforms'):
if(response_tr['basic_transforms']['do_flip']):
do_flip=bool(response_tr['basic_transforms']['do_flip'])
flip_vert=bool(response_tr['basic_transforms']['flip_vert'])
if(response_tr['basic_transforms']['max_rotate']):
max_rotate=response_tr['basic_transforms']['max_rotate']
if(response_tr['basic_transforms']['max_zoom']):
max_zoom=response_tr['basic_transforms']['max_zoom']
if(response_tr['basic_transforms']['max_lighting']):
max_lighting=response_tr['basic_transforms']['max_lighting']
if(response_tr['basic_transforms']['max_warp']):
max_warp=response_tr['basic_transforms']['max_warp']
if(response_tr['basic_transforms']['p_affine']):
p_affine=response_tr['basic_transforms']['p_affine']
if(response_tr['basic_transforms']['p_lighting']):
p_lighting=response_tr['basic_transforms']['p_lighting']
tfms_1 = get_transforms(do_flip,flip_vert,max_rotate,max_zoom,max_lighting,max_warp,p_affine,p_lighting)
return tfms_1
if(response['chosen_aug_train']=='zoom_crop'):
tfms_1 = zoom_crop(scale=response_tr['zoom_crop']['scale'], do_rand=response_tr['zoom_crop']['do_rand'],p=response_tr['zoom_crop']['p'])
return tfms_1
if(response['chosen_aug_train']=='manual'):
tras=list()
p=response_tr['manual']
if('brightness' in response['manual_train']):
tras.append(brightness(change=p['brightness']['change']))
if('contrast' in response['manual_train']):
tras.append(contrast(scale=(p['contrast']['l_scale'],p['contrast']['h_scale'])))
if('crop' in response['manual_train']):
tras.append(crop(size=p['crop']['size'],row_pct=p['crop']['row_pct'],col_pct=p['crop']['col_pct']))
if('crop_pad' in response['manual_train']):
tras.append(crop_pad(size=p['crop_pad']['size'],padding_mode=p['crop_pad']['padding_mode'],row_pct=p['crop']['row_pct'],col_pct=p['crop']['col_pct']))
if('flip_lr' in response['manual_train']):
tras.append(flip_lr())
if('flip_affine' in response['manual_train']):
tras.append(flip_affine())
if('jitter' in response['manual_train']):
tras.append(jitter(magnitude=p['jitter']['magnitude']))
if('pad' in response['manual_train']):
tras.append(pad(padding=p['pad']['padding'],mode=p['pad']['mode']))
if('rotate' in response['manual_train']):
tras.append(rotate(degrees=p['rotate']['degrees']))
if('skew' in response['manual_train']):
tras.append(skew(direction=(p['skew']['l_direction'],p['skew']['h_direction']),invert=p['skew']['invert'],magnitude=p['skew']['magnitude']))
if('squish' in response['manual_train']):
tras.append(squish(scale=p['squish']['scale'],row_pct=p['squish']['row_pct'],col_pct=p['squish']['col_pct']))
if('symmetric_wrap' in response['manual_train']):
tras.append(symmetric_warp(magnitude=p['symmetric_wrap']['magnitude']))
if('tilt' in response['manual_train']):
tras.append(tilt(magnitude=p['tilt']['magnitude'],direction=(p['tilt']['l_direction'],p['tilt']['h_direction'])))
if('zoom' in response['manual_train']):
tras.append(zoom(scale=p['zoom']['scale'],row_pct=p['zoom']['row_pct'],col_pct=p['zoom']['col_pct']))
if('cutout' in response['manual_train']):
tras.append(cutout(length=(p['cutout']['l_length'],p['cutout']['h_length']),n_holes=(p['cutout']['l_n_holes'],p['cutout']['h_n_holes'])))
tfms_1= tras
if(response['valid']):
response_va=response['valid']
if(response['chosen_aug_valid']=='basic_transforms'):
if(response_va['basic_transforms']['do_flip']):
do_flip=bool(response_va['basic_transforms']['do_flip'])
flip_vert=bool(response_va['basic_transforms']['flip_vert'])
if(response_va['basic_transforms']['max_rotate']):
max_rotate=response_va['basic_transforms']['max_rotate']
if(response_va['basic_transforms']['max_zoom']):
max_zoom=response_va['basic_transforms']['max_zoom']
if(response_va['basic_transforms']['max_lighting']):
max_lighting=response_va['basic_transforms']['max_lighting']
if(response_va['basic_transforms']['max_warp']):
max_warp=response_va['basic_transforms']['max_warp']
if(response_va['basic_transforms']['p_affine']):
p_affine=response_va['basic_transforms']['p_affine']
if(response_va['basic_transforms']['p_lighting']):
p_lighting=response_va['basic_transforms']['p_lighting']
tfms_2 = get_transforms(do_flip,flip_vert,max_rotate,max_zoom,max_lighting,max_warp,p_affine,p_lighting)
return tfms_2
if(response['chosen_aug_valid']=='zoom_crop'):
tfms_2 = zoom_crop(scale=response_va['zoom_crop']['scale'], do_rand=response_va['zoom_crop']['do_rand'],p=response_va['zoom_crop']['p'])
return tfms_2
if(response['chosen_aug_valid']=='manual'):
tras=list()
p=response_tr['manual']
if('brightness' in response['manual_valid']):
tras.append(brightness(change=p['brightness']['change']))
if('contrast' in response['manual_valid']):
tras.append(contrast(scale=(p['contrast']['l_scale'],p['contrast']['h_scale'])))
if('crop' in response['manual_valid']):
tras.append(crop(size=p['crop']['size'],row_pct=p['crop']['row_pct'],col_pct=p['crop']['col_pct']))
if('crop_pad' in response['manual_valid']):
tras.append(crop_pad(size=p['crop_pad']['size'],padding_mode=p['crop_pad']['padding_mode'],row_pct=p['crop']['row_pct'],col_pct=p['crop']['col_pct']))
if('flip_lr' in response['manual_valid']):
tras.append(flip_lr())
if('flip_affine' in response['manual_valid']):
tras.append(flip_affine())
if('jitter' in response['manual_valid']):
tras.append(jitter(magnitude=p['jitter']['magnitude']))
if('pad' in response['manual_valid']):
tras.append(pad(padding=p['pad']['padding'],mode=p['pad']['mode']))
if('rotate' in response['manual_valid']):
tras.append(rotate(degrees=p['rotate']['degrees']))
if('skew' in response['manual_valid']):
tras.append(skew(direction=(p['skew']['l_direction'],p['skew']['h_direction']),invert=p['skew']['invert'],magnitude=p['skew']['magnitude']))
if('squish' in response['manual_valid']):
tras.append(squish(scale=p['squish']['scale'],row_pct=p['squish']['row_pct'],col_pct=p['squish']['col_pct']))
if('symmetric_wrap' in response['manual_valid']):
tras.append(symmetric_warp(magnitude=p['symmetric_wrap']['magnitude']))
if('tilt' in response['manual_valid']):
tras.append(tilt(magnitude=p['tilt']['magnitude'],direction=(p['tilt']['l_direction'],p['tilt']['h_direction'])))
if('zoom' in response['manual_valid']):
tras.append(zoom(scale=p['zoom']['scale'],row_pct=p['zoom']['row_pct'],col_pct=p['zoom']['col_pct']))
if('cutout' in response['manual_valid']):
tras.append(cutout(length=(p['cutout']['l_length'],p['cutout']['h_length']),n_holes=(p['cutout']['l_n_holes'],p['cutout']['h_n_holes'])))
tfms_2= tras
return (tfms_1,tfms_2) | true | true |
1c387ffffca64bd1c952d2445da2b5c5569fbcfc | 2,406 | py | Python | data/p4VQE/R1/benchmark/startQiskit61.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R1/benchmark/startQiskit61.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R1/benchmark/startQiskit61.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=3
# total number=10
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.cx(input_qubit[1],input_qubit[0]) # number=5
prog.h(input_qubit[0]) # number=7
prog.cz(input_qubit[1],input_qubit[0]) # number=8
prog.h(input_qubit[0]) # number=9
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5200
writefile = open("../data/startQiskit61.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 27.033708 | 118 | 0.632585 |
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.cx(input_qubit[1],input_qubit[0])
prog.h(input_qubit[0])
prog.cz(input_qubit[1],input_qubit[0])
prog.h(input_qubit[0])
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5200
writefile = open("../data/startQiskit61.csv", "w")
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| true | true |
1c388050475bd1bdb984e23752da29323d62e673 | 57,210 | py | Python | test/engine/test_pool.py | hbusul/sqlalchemy | cd58836d3e19489d5203c02f7cc5f2f2d7c82a20 | [
"MIT"
] | null | null | null | test/engine/test_pool.py | hbusul/sqlalchemy | cd58836d3e19489d5203c02f7cc5f2f2d7c82a20 | [
"MIT"
] | null | null | null | test/engine/test_pool.py | hbusul/sqlalchemy | cd58836d3e19489d5203c02f7cc5f2f2d7c82a20 | [
"MIT"
] | null | null | null | import collections
import random
import threading
import time
import weakref
import sqlalchemy as tsa
from sqlalchemy import event
from sqlalchemy import pool
from sqlalchemy import select
from sqlalchemy import testing
from sqlalchemy.engine import default
from sqlalchemy.pool.impl import _AsyncConnDialect
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_context_ok
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_none
from sqlalchemy.testing import is_not
from sqlalchemy.testing import is_not_none
from sqlalchemy.testing import is_true
from sqlalchemy.testing import mock
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.testing.mock import ANY
from sqlalchemy.testing.mock import call
from sqlalchemy.testing.mock import Mock
from sqlalchemy.testing.mock import patch
from sqlalchemy.testing.util import gc_collect
from sqlalchemy.testing.util import lazy_gc
join_timeout = 10
def MockDBAPI(): # noqa
def cursor():
return Mock()
def connect(*arg, **kw):
def close():
conn.closed = True
# mock seems like it might have an issue logging
# call_count correctly under threading, not sure.
# adding a side_effect for close seems to help.
conn = Mock(
cursor=Mock(side_effect=cursor),
close=Mock(side_effect=close),
closed=False,
)
return conn
def shutdown(value):
if value:
db.connect = Mock(side_effect=Exception("connect failed"))
else:
db.connect = Mock(side_effect=connect)
db.is_shutdown = value
db = Mock(
connect=Mock(side_effect=connect), shutdown=shutdown, is_shutdown=False
)
return db
class PoolTestBase(fixtures.TestBase):
def setup_test(self):
pool.clear_managers()
self._teardown_conns = []
def teardown_test(self):
for ref in self._teardown_conns:
conn = ref()
if conn:
conn.close()
@classmethod
def teardown_test_class(cls):
pool.clear_managers()
def _with_teardown(self, connection):
self._teardown_conns.append(weakref.ref(connection))
return connection
def _queuepool_fixture(self, **kw):
dbapi, pool = self._queuepool_dbapi_fixture(**kw)
return pool
def _queuepool_dbapi_fixture(self, **kw):
dbapi = MockDBAPI()
_is_asyncio = kw.pop("_is_asyncio", False)
p = pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw)
if _is_asyncio:
p._is_asyncio = True
p._dialect = _AsyncConnDialect()
return dbapi, p
class PoolTest(PoolTestBase):
@testing.fails_on(
"+pyodbc", "pyodbc cursor doesn't implement tuple __eq__"
)
@testing.fails_on("+pg8000", "returns [1], not (1,)")
def test_cursor_iterable(self):
conn = testing.db.raw_connection()
cursor = conn.cursor()
cursor.execute(str(select(1).compile(testing.db)))
expected = [(1,)]
for row in cursor:
eq_(row, expected.pop(0))
def test_no_connect_on_recreate(self):
def creator():
raise Exception("no creates allowed")
for cls in (
pool.SingletonThreadPool,
pool.StaticPool,
pool.QueuePool,
pool.NullPool,
pool.AssertionPool,
):
p = cls(creator=creator)
p.dispose()
p2 = p.recreate()
assert p2.__class__ is cls
mock_dbapi = MockDBAPI()
p = cls(creator=mock_dbapi.connect)
conn = p.connect()
conn.close()
mock_dbapi.connect.side_effect = Exception("error!")
p.dispose()
p.recreate()
def test_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.info)
self.assert_(c.info is c._connection_record.info)
c.info["foo"] = "bar"
c.close()
del c
c = p.connect()
self.assert_("foo" in c.info)
c.invalidate()
c = p.connect()
self.assert_("foo" not in c.info)
c.info["foo2"] = "bar2"
c.detach()
self.assert_("foo2" in c.info)
c2 = p.connect()
is_not(c.connection, c2.connection)
assert not c2.info
assert "foo2" in c.info
def test_rec_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.record_info)
self.assert_(c.record_info is c._connection_record.record_info)
c.record_info["foo"] = "bar"
c.close()
del c
c = p.connect()
self.assert_("foo" in c.record_info)
c.invalidate()
c = p.connect()
self.assert_("foo" in c.record_info)
c.record_info["foo2"] = "bar2"
c.detach()
is_(c.record_info, None)
is_(c._connection_record, None)
c2 = p.connect()
assert c2.record_info
assert "foo2" in c2.record_info
def test_rec_unconnected(self):
# test production of a _ConnectionRecord with an
# initially unconnected state.
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
r1 = pool._ConnectionRecord(p1, connect=False)
assert not r1.connection
c1 = r1.get_connection()
is_(c1, r1.connection)
def test_rec_close_reopen(self):
# test that _ConnectionRecord.close() allows
# the record to be reusable
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
r1 = pool._ConnectionRecord(p1)
c1 = r1.connection
c2 = r1.get_connection()
is_(c1, c2)
r1.close()
assert not r1.connection
eq_(c1.mock_calls, [call.close()])
c2 = r1.get_connection()
is_not(c1, c2)
is_(c2, r1.connection)
eq_(c2.mock_calls, [])
@testing.combinations(
(
pool.QueuePool,
dict(pool_size=8, max_overflow=10, timeout=25, use_lifo=True),
),
(pool.QueuePool, {}),
(pool.NullPool, {}),
(pool.SingletonThreadPool, {}),
(pool.StaticPool, {}),
(pool.AssertionPool, {}),
)
def test_recreate_state(self, pool_cls, pool_args):
creator = object()
pool_args["pre_ping"] = True
pool_args["reset_on_return"] = "commit"
pool_args["recycle"] = 35
pool_args["logging_name"] = "somepool"
pool_args["dialect"] = default.DefaultDialect()
pool_args["echo"] = "debug"
p1 = pool_cls(creator=creator, **pool_args)
cls_keys = dir(pool_cls)
d1 = dict(p1.__dict__)
p2 = p1.recreate()
d2 = dict(p2.__dict__)
for k in cls_keys:
d1.pop(k, None)
d2.pop(k, None)
for k in (
"_invoke_creator",
"_pool",
"_overflow_lock",
"_fairy",
"_conn",
"logger",
):
if k in d2:
d2[k] = mock.ANY
eq_(d1, d2)
eq_(p1.echo, p2.echo)
is_(p1._dialect, p2._dialect)
if "use_lifo" in pool_args:
eq_(p1._pool.use_lifo, p2._pool.use_lifo)
class PoolDialectTest(PoolTestBase):
def _dialect(self):
canary = []
class PoolDialect(object):
is_async = False
def do_rollback(self, dbapi_connection):
canary.append("R")
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
canary.append("C")
dbapi_connection.commit()
def do_close(self, dbapi_connection):
canary.append("CL")
dbapi_connection.close()
return PoolDialect(), canary
def _do_test(self, pool_cls, assertion):
mock_dbapi = MockDBAPI()
dialect, canary = self._dialect()
p = pool_cls(creator=mock_dbapi.connect)
p._dialect = dialect
conn = p.connect()
conn.close()
p.dispose()
p.recreate()
conn = p.connect()
conn.close()
eq_(canary, assertion)
def test_queue_pool(self):
self._do_test(pool.QueuePool, ["R", "CL", "R"])
def test_assertion_pool(self):
self._do_test(pool.AssertionPool, ["R", "CL", "R"])
def test_singleton_pool(self):
self._do_test(pool.SingletonThreadPool, ["R", "CL", "R"])
def test_null_pool(self):
self._do_test(pool.NullPool, ["R", "CL", "R", "CL"])
def test_static_pool(self):
self._do_test(pool.StaticPool, ["R", "CL", "R"])
class PoolEventsTest(PoolTestBase):
def _first_connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def first_connect(*arg, **kw):
canary.append("first_connect")
event.listen(p, "first_connect", first_connect)
return p, canary
def _connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def connect(*arg, **kw):
canary.append("connect")
event.listen(p, "connect", connect)
return p, canary
def _checkout_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkout(*arg, **kw):
canary.append("checkout")
event.listen(p, "checkout", checkout)
return p, canary
def _checkin_event_fixture(self, _is_asyncio=False):
p = self._queuepool_fixture(_is_asyncio=_is_asyncio)
canary = []
@event.listens_for(p, "checkin")
def checkin(*arg, **kw):
canary.append("checkin")
@event.listens_for(p, "close_detached")
def close_detached(*arg, **kw):
canary.append("close_detached")
@event.listens_for(p, "detach")
def detach(*arg, **kw):
canary.append("detach")
return p, canary
def _reset_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def reset(*arg, **kw):
canary.append("reset")
event.listen(p, "reset", reset)
return p, canary
def _invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "invalidate", canary)
return p, canary
def _soft_invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "soft_invalidate", canary)
return p, canary
def _close_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "close", canary)
return p, canary
def _detach_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "detach", canary)
return p, canary
def _close_detached_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "close_detached", canary)
return p, canary
def test_close(self):
p, canary = self._close_event_fixture()
c1 = p.connect()
connection = c1.connection
rec = c1._connection_record
c1.close()
eq_(canary.mock_calls, [])
p.dispose()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach(self):
p, canary = self._detach_event_fixture()
c1 = p.connect()
connection = c1.connection
rec = c1._connection_record
c1.detach()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach_close(self):
p, canary = self._close_detached_event_fixture()
c1 = p.connect()
connection = c1.connection
c1.detach()
c1.close()
eq_(canary.mock_calls, [call(connection)])
def test_first_connect_event(self):
p, canary = self._first_connect_event_fixture()
p.connect()
eq_(canary, ["first_connect"])
def test_first_connect_event_fires_once(self):
p, canary = self._first_connect_event_fixture()
p.connect()
p.connect()
eq_(canary, ["first_connect"])
def test_first_connect_on_previously_recreated(self):
p, canary = self._first_connect_event_fixture()
p2 = p.recreate()
p.connect()
p2.connect()
eq_(canary, ["first_connect", "first_connect"])
def test_first_connect_on_subsequently_recreated(self):
p, canary = self._first_connect_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["first_connect", "first_connect"])
def test_connect_event(self):
p, canary = self._connect_event_fixture()
p.connect()
eq_(canary, ["connect"])
def test_connect_insert_event(self):
p = self._queuepool_fixture()
canary = []
def connect_one(*arg, **kw):
canary.append("connect_one")
def connect_two(*arg, **kw):
canary.append("connect_two")
def connect_three(*arg, **kw):
canary.append("connect_three")
event.listen(p, "connect", connect_one)
event.listen(p, "connect", connect_two, insert=True)
event.listen(p, "connect", connect_three)
p.connect()
eq_(canary, ["connect_two", "connect_one", "connect_three"])
def test_connect_event_fires_subsequent(self):
p, canary = self._connect_event_fixture()
c1 = p.connect() # noqa
c2 = p.connect() # noqa
eq_(canary, ["connect", "connect"])
def test_connect_on_previously_recreated(self):
p, canary = self._connect_event_fixture()
p2 = p.recreate()
p.connect()
p2.connect()
eq_(canary, ["connect", "connect"])
def test_connect_on_subsequently_recreated(self):
p, canary = self._connect_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["connect", "connect"])
def test_checkout_event(self):
p, canary = self._checkout_event_fixture()
p.connect()
eq_(canary, ["checkout"])
def test_checkout_event_fires_subsequent(self):
p, canary = self._checkout_event_fixture()
p.connect()
p.connect()
eq_(canary, ["checkout", "checkout"])
def test_checkout_event_on_subsequently_recreated(self):
p, canary = self._checkout_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["checkout", "checkout"])
def test_checkin_event(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["checkin"])
def test_reset_event(self):
p, canary = self._reset_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["reset"])
def test_soft_invalidate_event_no_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
c1.invalidate(soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_soft_invalidate_event_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
exc = Exception("hi")
c1.invalidate(exc, soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
def test_invalidate_event_no_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
c1.invalidate()
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_invalidate_event_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
exc = Exception("hi")
c1.invalidate(exc)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
@testing.combinations((True, testing.requires.python3), (False,))
def test_checkin_event_gc(self, detach_gced):
p, canary = self._checkin_event_fixture(_is_asyncio=detach_gced)
c1 = p.connect()
dbapi_connection = weakref.ref(c1.connection)
eq_(canary, [])
del c1
lazy_gc()
if detach_gced:
# "close_detached" is not called because for asyncio the
# connection is just lost.
eq_(canary, ["detach"])
else:
eq_(canary, ["checkin"])
gc_collect()
if detach_gced:
is_none(dbapi_connection())
else:
is_not_none(dbapi_connection())
def test_checkin_event_on_subsequently_recreated(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["checkin"])
c2.close()
eq_(canary, ["checkin", "checkin"])
def test_listen_targets_scope(self):
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
def listen_four(*args):
canary.append("listen_four")
engine = testing_engine(testing.db.url)
event.listen(pool.Pool, "connect", listen_one)
event.listen(engine.pool, "connect", listen_two)
event.listen(engine, "connect", listen_three)
event.listen(engine.__class__, "connect", listen_four)
engine.execute(select(1)).close()
eq_(
canary, ["listen_one", "listen_four", "listen_two", "listen_three"]
)
def test_listen_targets_per_subclass(self):
"""test that listen() called on a subclass remains specific to
that subclass."""
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
event.listen(pool.Pool, "connect", listen_one)
event.listen(pool.QueuePool, "connect", listen_two)
event.listen(pool.SingletonThreadPool, "connect", listen_three)
p1 = pool.QueuePool(creator=MockDBAPI().connect)
p2 = pool.SingletonThreadPool(creator=MockDBAPI().connect)
assert listen_one in p1.dispatch.connect
assert listen_two in p1.dispatch.connect
assert listen_three not in p1.dispatch.connect
assert listen_one in p2.dispatch.connect
assert listen_two not in p2.dispatch.connect
assert listen_three in p2.dispatch.connect
p1.connect()
eq_(canary, ["listen_one", "listen_two"])
p2.connect()
eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"])
def test_connect_event_fails_invalidates(self):
fail = False
def listen_one(conn, rec):
if fail:
raise Exception("it failed")
def listen_two(conn, rec):
rec.info["important_flag"] = True
p1 = pool.QueuePool(
creator=MockDBAPI().connect, pool_size=1, max_overflow=0
)
event.listen(p1, "connect", listen_one)
event.listen(p1, "connect", listen_two)
conn = p1.connect()
eq_(conn.info["important_flag"], True)
conn.invalidate()
conn.close()
fail = True
assert_raises(Exception, p1.connect)
fail = False
conn = p1.connect()
eq_(conn.info["important_flag"], True)
conn.close()
def teardown_test(self):
# TODO: need to get remove() functionality
# going
pool.Pool.dispatch._clear()
class PoolFirstConnectSyncTest(PoolTestBase):
# test [ticket:2964]
@testing.requires.timing_intensive
def test_sync(self):
pool = self._queuepool_fixture(pool_size=3, max_overflow=0)
evt = Mock()
@event.listens_for(pool, "first_connect")
def slow_first_connect(dbapi_con, rec):
time.sleep(1)
evt.first_connect()
@event.listens_for(pool, "connect")
def on_connect(dbapi_con, rec):
evt.connect()
def checkout():
for j in range(2):
c1 = pool.connect()
time.sleep(0.02)
c1.close()
time.sleep(0.02)
threads = []
# what we're trying to do here is have concurrent use of
# all three pooled connections at once, and the thing we want
# to test is that first_connect() finishes completely before
# any of the connections get returned. so first_connect()
# sleeps for one second, then pings the mock. the threads should
# not have made it to the "checkout() event for that one second.
for i in range(5):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
# there is a very unlikely condition observed in CI on windows
# where even though we have five threads above all calling upon the
# pool, we didn't get concurrent use of all three connections, two
# connections were enough. so here we purposely just check out
# all three at once just to get a consistent test result.
make_sure_all_three_are_connected = [pool.connect() for i in range(3)]
for conn in make_sure_all_three_are_connected:
conn.close()
eq_(
evt.mock_calls,
[
call.first_connect(),
call.connect(),
call.connect(),
call.connect(),
],
)
class QueuePoolTest(PoolTestBase):
def test_queuepool_del(self):
self._do_testqueuepool(useclose=False)
def test_queuepool_close(self):
self._do_testqueuepool(useclose=True)
def _do_testqueuepool(self, useclose=False):
p = self._queuepool_fixture(pool_size=3, max_overflow=-1)
def status(pool):
return (
pool.size(),
pool.checkedin(),
pool.overflow(),
pool.checkedout(),
)
c1 = p.connect()
self.assert_(status(p) == (3, 0, -2, 1))
c2 = p.connect()
self.assert_(status(p) == (3, 0, -1, 2))
c3 = p.connect()
self.assert_(status(p) == (3, 0, 0, 3))
c4 = p.connect()
self.assert_(status(p) == (3, 0, 1, 4))
c5 = p.connect()
self.assert_(status(p) == (3, 0, 2, 5))
c6 = p.connect()
self.assert_(status(p) == (3, 0, 3, 6))
if useclose:
c4.close()
c3.close()
c2.close()
else:
c4 = c3 = c2 = None
lazy_gc()
eq_(status(p), (3, 3, 3, 3))
if useclose:
c1.close()
c5.close()
c6.close()
else:
c1 = c5 = c6 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 0, 0))
c1 = p.connect()
c2 = p.connect()
self.assert_(status(p) == (3, 1, 0, 2), status(p))
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
self.assert_(status(p) == (3, 2, 0, 1))
c1.close()
def test_timeout_accessor(self):
expected_timeout = 123
p = self._queuepool_fixture(timeout=expected_timeout)
eq_(p.timeout(), expected_timeout)
@testing.requires.timing_intensive
def test_timeout(self):
p = self._queuepool_fixture(pool_size=3, max_overflow=0, timeout=2)
c1 = p.connect() # noqa
c2 = p.connect() # noqa
c3 = p.connect() # noqa
now = time.time()
assert_raises(tsa.exc.TimeoutError, p.connect)
assert int(time.time() - now) == 2
@testing.requires.timing_intensive
def test_timeout_subsecond_precision(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0, timeout=0.5)
c1 = p.connect() # noqa
with expect_raises(tsa.exc.TimeoutError):
now = time.time()
c2 = p.connect() # noqa
# Python timing is not very accurate, the time diff should be very
# close to 0.5s but we give 200ms of slack.
assert 0.3 <= time.time() - now <= 0.7, "Pool timeout not respected"
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_timeout_race(self):
# test a race condition where the initial connecting threads all race
# to queue.Empty, then block on the mutex. each thread consumes a
# connection as they go in. when the limit is reached, the remaining
# threads go in, and get TimeoutError; even though they never got to
# wait for the timeout on queue.get(). the fix involves checking the
# timeout again within the mutex, and if so, unlocking and throwing
# them back to the start of do_get()
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=lambda: dbapi.connect(delay=0.05),
pool_size=2,
max_overflow=1,
timeout=3,
)
timeouts = []
def checkout():
for x in range(1):
now = time.time()
try:
c1 = p.connect()
except tsa.exc.TimeoutError:
timeouts.append(time.time() - now)
continue
time.sleep(4)
c1.close()
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
assert len(timeouts) > 0
for t in timeouts:
assert t >= 3, "Not all timeouts were >= 3 seconds %r" % timeouts
# normally, the timeout should under 4 seconds,
# but on a loaded down buildbot it can go up.
assert t < 14, "Not all timeouts were < 14 seconds %r" % timeouts
def _test_overflow(self, thread_count, max_overflow):
reaper = testing.engines.ConnectionKiller()
dbapi = MockDBAPI()
mutex = threading.Lock()
def creator():
time.sleep(0.05)
with mutex:
return dbapi.connect()
p = pool.QueuePool(
creator=creator, pool_size=3, timeout=2, max_overflow=max_overflow
)
reaper.add_pool(p)
peaks = []
def whammy():
for i in range(10):
try:
con = p.connect()
time.sleep(0.005)
peaks.append(p.overflow())
con.close()
del con
except tsa.exc.TimeoutError:
pass
threads = []
for i in range(thread_count):
th = threading.Thread(target=whammy)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
self.assert_(max(peaks) <= max_overflow)
reaper.assert_all_closed()
def test_overflow_reset_on_failed_connect(self):
dbapi = Mock()
def failing_dbapi():
raise Exception("connection failed")
creator = dbapi.connect
def create():
return creator()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
c1 = self._with_teardown(p.connect()) # noqa
c2 = self._with_teardown(p.connect()) # noqa
c3 = self._with_teardown(p.connect()) # noqa
eq_(p._overflow, 1)
creator = failing_dbapi
assert_raises(Exception, p.connect)
eq_(p._overflow, 1)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_hanging_connect_within_overflow(self):
"""test that a single connect() call which is hanging
does not block other connections from proceeding."""
dbapi = Mock()
mutex = threading.Lock()
def hanging_dbapi():
time.sleep(2)
with mutex:
return dbapi.connect()
def fast_dbapi():
with mutex:
return dbapi.connect()
creator = threading.local()
def create():
return creator.mock_connector()
def run_test(name, pool, should_hang):
if should_hang:
creator.mock_connector = hanging_dbapi
else:
creator.mock_connector = fast_dbapi
conn = pool.connect()
conn.operation(name)
time.sleep(1)
conn.close()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
threads = [
threading.Thread(target=run_test, args=("success_one", p, False)),
threading.Thread(target=run_test, args=("success_two", p, False)),
threading.Thread(target=run_test, args=("overflow_one", p, True)),
threading.Thread(target=run_test, args=("overflow_two", p, False)),
threading.Thread(
target=run_test, args=("overflow_three", p, False)
),
]
for t in threads:
t.start()
time.sleep(0.2)
for t in threads:
t.join(timeout=join_timeout)
eq_(
dbapi.connect().operation.mock_calls,
[
call("success_one"),
call("success_two"),
call("overflow_two"),
call("overflow_three"),
call("overflow_one"),
],
)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_waiters_handled(self):
"""test that threads waiting for connections are
handled when the pool is replaced.
"""
mutex = threading.Lock()
dbapi = MockDBAPI()
def creator():
with mutex:
return dbapi.connect()
success = []
for timeout in (None, 30):
for max_overflow in (0, -1, 3):
p = pool.QueuePool(
creator=creator,
pool_size=2,
timeout=timeout,
max_overflow=max_overflow,
)
def waiter(p, timeout, max_overflow):
success_key = (timeout, max_overflow)
conn = p.connect()
success.append(success_key)
time.sleep(0.1)
conn.close()
c1 = p.connect() # noqa
c2 = p.connect()
threads = []
for i in range(2):
t = threading.Thread(
target=waiter, args=(p, timeout, max_overflow)
)
t.daemon = True
t.start()
threads.append(t)
# this sleep makes sure that the
# two waiter threads hit upon wait()
# inside the queue, before we invalidate the other
# two conns
time.sleep(0.2)
p._invalidate(c2)
for t in threads:
t.join(join_timeout)
eq_(len(success), 12, "successes: %s" % success)
def test_connrec_invalidated_within_checkout_no_race(self):
"""Test that a concurrent ConnectionRecord.invalidate() which
occurs after the ConnectionFairy has called
_ConnectionRecord.checkout()
but before the ConnectionFairy tests "fairy.connection is None"
will not result in an InvalidRequestError.
This use case assumes that a listener on the checkout() event
will be raising DisconnectionError so that a reconnect attempt
may occur.
"""
dbapi = MockDBAPI()
def creator():
return dbapi.connect()
p = pool.QueuePool(creator=creator, pool_size=1, max_overflow=0)
conn = p.connect()
conn.close()
_existing_checkout = pool._ConnectionRecord.checkout
@classmethod
def _decorate_existing_checkout(cls, *arg, **kw):
fairy = _existing_checkout(*arg, **kw)
connrec = fairy._connection_record
connrec.invalidate()
return fairy
with patch(
"sqlalchemy.pool._ConnectionRecord.checkout",
_decorate_existing_checkout,
):
conn = p.connect()
is_(conn._connection_record.connection, None)
conn.close()
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_notify_waiters(self):
dbapi = MockDBAPI()
canary = []
def creator():
canary.append(1)
return dbapi.connect()
p1 = pool.QueuePool(
creator=creator, pool_size=1, timeout=None, max_overflow=0
)
def waiter(p):
conn = p.connect()
canary.append(2)
time.sleep(0.5)
conn.close()
c1 = p1.connect()
threads = []
for i in range(5):
t = threading.Thread(target=waiter, args=(p1,))
t.start()
threads.append(t)
time.sleep(0.5)
eq_(canary, [1])
# this also calls invalidate()
# on c1
p1._invalidate(c1)
for t in threads:
t.join(join_timeout)
eq_(canary, [1, 1, 2, 2, 2, 2, 2])
def test_dispose_closes_pooled(self):
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=dbapi.connect, pool_size=2, timeout=None, max_overflow=0
)
c1 = p.connect()
c2 = p.connect()
c1_con = c1.connection
c2_con = c2.connection
c1.close()
eq_(c1_con.close.call_count, 0)
eq_(c2_con.close.call_count, 0)
p.dispose()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# currently, if a ConnectionFairy is closed
# after the pool has been disposed, there's no
# flag that states it should be invalidated
# immediately - it just gets returned to the
# pool normally...
c2.close()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# ...and that's the one we'll get back next.
c3 = p.connect()
assert c3.connection is c2_con
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_no_overflow(self):
self._test_overflow(40, 0)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_max_overflow(self):
self._test_overflow(40, 5)
def test_overflow_no_gc(self):
p = self._queuepool_fixture(pool_size=2, max_overflow=2)
# disable weakref collection of the
# underlying connections
strong_refs = set()
def _conn():
c = p.connect()
strong_refs.add(c.connection)
return c
for j in range(5):
# open 4 conns at a time. each time this
# will yield two pooled connections + two
# overflow connections.
conns = [_conn() for i in range(4)]
for c in conns:
c.close()
# doing that for a total of 5 times yields
# ten overflow connections closed plus the
# two pooled connections unclosed.
eq_(
set([c.close.call_count for c in strong_refs]),
set([1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0]),
)
def test_recycle(self):
with patch("sqlalchemy.pool.base.time.time") as mock:
mock.return_value = 10000
p = self._queuepool_fixture(
pool_size=1, max_overflow=0, recycle=30
)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
mock.return_value = 10001
c2 = p.connect()
is_(c2.connection, c_ref())
c2.close()
mock.return_value = 10035
c3 = p.connect()
is_not(c3.connection, c_ref())
@testing.requires.timing_intensive
def test_recycle_on_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
c2 = p.connect()
is_(c2.connection, c_ref())
c2_rec = c2._connection_record
p._invalidate(c2)
assert c2_rec.connection is None
c2.close()
time.sleep(0.5)
c3 = p.connect()
is_not(c3.connection, c_ref())
@testing.requires.timing_intensive
def test_recycle_on_soft_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
c2 = p.connect()
is_(c2.connection, c_ref())
c2_rec = c2._connection_record
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
c2.invalidate(soft=True)
is_(c2_rec.connection, c2.connection)
c2.close()
c3 = p.connect()
is_not(c3.connection, c_ref())
is_(c3._connection_record, c2_rec)
is_(c2_rec.connection, c3.connection)
def _no_wr_finalize(self):
finalize_fairy = pool._finalize_fairy
def assert_no_wr_callback(
connection, connection_record, pool, ref, echo, fairy=None
):
if fairy is None:
raise AssertionError(
"finalize fairy was called as a weakref callback"
)
return finalize_fairy(
connection, connection_record, pool, ref, echo, fairy
)
return patch.object(pool, "_finalize_fairy", assert_no_wr_callback)
def _assert_cleanup_on_pooled_reconnect(self, dbapi, p):
# p is QueuePool with size=1, max_overflow=2,
# and one connection in the pool that will need to
# reconnect when next used (either due to recycle or invalidate)
with self._no_wr_finalize():
eq_(p.checkedout(), 0)
eq_(p._overflow, 0)
dbapi.shutdown(True)
assert_raises_context_ok(Exception, p.connect)
eq_(p._overflow, 0)
eq_(p.checkedout(), 0) # and not 1
dbapi.shutdown(False)
c1 = self._with_teardown(p.connect()) # noqa
assert p._pool.empty() # poolsize is one, so we're empty OK
c2 = self._with_teardown(p.connect()) # noqa
eq_(p._overflow, 1) # and not 2
# this hangs if p._overflow is 2
c3 = self._with_teardown(p.connect())
c3.close()
def test_error_on_pooled_reconnect_cleanup_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.invalidate()
c1.close()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_error_on_pooled_reconnect_cleanup_recycle(self):
dbapi, p = self._queuepool_dbapi_fixture(
pool_size=1, max_overflow=2, recycle=1
)
c1 = p.connect()
c1.close()
time.sleep(1.5)
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_connect_handler_not_called_for_recycled(self):
"""test [ticket:3497]"""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=2, max_overflow=2)
canary = Mock()
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
event.listen(p, "connect", canary.connect)
event.listen(p, "checkout", canary.checkout)
assert_raises(Exception, p.connect)
p._pool.queue = collections.deque(
[c for c in p._pool.queue if c.connection is not None]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
eq_(
canary.mock_calls,
[call.connect(ANY, ANY), call.checkout(ANY, ANY, ANY)],
)
@testing.requires.timing_intensive
def test_connect_checkout_handler_always_gets_info(self):
"""test [ticket:3497]"""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=2, max_overflow=2)
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
@event.listens_for(p, "connect")
def connect(conn, conn_rec):
conn_rec.info["x"] = True
@event.listens_for(p, "checkout")
def checkout(conn, conn_rec, conn_f):
assert "x" in conn_rec.info
assert_raises(Exception, p.connect)
p._pool.queue = collections.deque(
[c for c in p._pool.queue if c.connection is not None]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
def test_error_on_pooled_reconnect_cleanup_wcheckout_event(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.close()
@event.listens_for(p, "checkout")
def handle_checkout_event(dbapi_con, con_record, con_proxy):
if dbapi.is_shutdown:
raise tsa.exc.DisconnectionError()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.combinations((True, testing.requires.python3), (False,))
def test_userspace_disconnectionerror_weakref_finalizer(self, detach_gced):
dbapi, pool = self._queuepool_dbapi_fixture(
pool_size=1, max_overflow=2, _is_asyncio=detach_gced
)
if detach_gced:
pool._dialect.is_async = True
@event.listens_for(pool, "checkout")
def handle_checkout_event(dbapi_con, con_record, con_proxy):
if getattr(dbapi_con, "boom") == "yes":
raise tsa.exc.DisconnectionError()
conn = pool.connect()
old_dbapi_conn = conn.connection
conn.close()
eq_(old_dbapi_conn.mock_calls, [call.rollback()])
old_dbapi_conn.boom = "yes"
conn = pool.connect()
dbapi_conn = conn.connection
del conn
gc_collect()
if detach_gced:
# new connection was detached + abandoned on return
eq_(dbapi_conn.mock_calls, [])
else:
# new connection reset and returned to pool
eq_(dbapi_conn.mock_calls, [call.rollback()])
# old connection was just closed - did not get an
# erroneous reset on return
eq_(old_dbapi_conn.mock_calls, [call.rollback(), call.close()])
@testing.requires.timing_intensive
def test_recycle_pool_no_race(self):
def slow_close():
slow_closing_connection._slow_close()
time.sleep(0.5)
slow_closing_connection = Mock()
slow_closing_connection.connect.return_value.close = slow_close
class Error(Exception):
pass
dialect = Mock()
dialect.is_disconnect = lambda *arg, **kw: True
dialect.dbapi.Error = Error
pools = []
class TrackQueuePool(pool.QueuePool):
def __init__(self, *arg, **kw):
pools.append(self)
super(TrackQueuePool, self).__init__(*arg, **kw)
def creator():
return slow_closing_connection.connect()
p1 = TrackQueuePool(creator=creator, pool_size=20)
from sqlalchemy import create_engine
eng = create_engine(testing.db.url, pool=p1, _initialize=False)
eng.dialect = dialect
# 15 total connections
conns = [eng.connect() for i in range(15)]
# return 8 back to the pool
for conn in conns[3:10]:
conn.close()
def attempt(conn):
time.sleep(random.random())
try:
conn._handle_dbapi_exception(
Error(), "statement", {}, Mock(), Mock()
)
except tsa.exc.DBAPIError:
pass
# run an error + invalidate operation on the remaining 7 open
# connections
threads = []
for conn in conns:
t = threading.Thread(target=attempt, args=(conn,))
t.start()
threads.append(t)
for t in threads:
t.join()
# return all 15 connections to the pool
for conn in conns:
conn.close()
# re-open 15 total connections
conns = [eng.connect() for i in range(15)]
# 15 connections have been fully closed due to invalidate
assert slow_closing_connection._slow_close.call_count == 15
# 15 initial connections + 15 reconnections
assert slow_closing_connection.connect.call_count == 30
assert len(pools) <= 2, len(pools)
def test_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_recreate(self):
p = self._queuepool_fixture(
reset_on_return=None, pool_size=1, max_overflow=0
)
p2 = p.recreate()
assert p2.size() == 1
assert p2._reset_on_return is pool.reset_none
assert p2._max_overflow == 0
def test_reconnect(self):
"""tests reconnect operations at the pool level. SA's
engine/dialect includes another layer of reconnect support for
'database was lost' errors."""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
dbapi.raise_error = True
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_detach(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1.detach()
c2 = p.connect() # noqa
eq_(dbapi.connect.mock_calls, [call("foo.db"), call("foo.db")])
c1_con = c1.connection
assert c1_con is not None
eq_(c1_con.close.call_count, 0)
c1.close()
eq_(c1_con.close.call_count, 1)
def test_detach_via_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1_con = c1.connection
c1.invalidate()
assert c1.connection is None
eq_(c1_con.close.call_count, 1)
c2 = p.connect()
assert c2.connection is not c1_con
c2_con = c2.connection
c2.close()
eq_(c2_con.close.call_count, 0)
def test_no_double_checkin(self):
p = self._queuepool_fixture(pool_size=1)
c1 = p.connect()
rec = c1._connection_record
c1.close()
assert_raises_message(
Warning, "Double checkin attempted on %s" % rec, rec.checkin
)
def test_lifo(self):
c1, c2, c3 = Mock(), Mock(), Mock()
connections = [c1, c2, c3]
def creator():
return connections.pop(0)
p = pool.QueuePool(creator, use_lifo=True)
pc1 = p.connect()
pc2 = p.connect()
pc3 = p.connect()
pc1.close()
pc2.close()
pc3.close()
for i in range(5):
pc1 = p.connect()
is_(pc1.connection, c3)
pc1.close()
pc1 = p.connect()
is_(pc1.connection, c3)
pc2 = p.connect()
is_(pc2.connection, c2)
pc2.close()
pc3 = p.connect()
is_(pc3.connection, c2)
pc2 = p.connect()
is_(pc2.connection, c1)
pc2.close()
pc3.close()
pc1.close()
def test_fifo(self):
c1, c2, c3 = Mock(), Mock(), Mock()
connections = [c1, c2, c3]
def creator():
return connections.pop(0)
p = pool.QueuePool(creator)
pc1 = p.connect()
pc2 = p.connect()
pc3 = p.connect()
pc1.close()
pc2.close()
pc3.close()
pc1 = p.connect()
is_(pc1.connection, c1)
pc1.close()
pc1 = p.connect()
is_(pc1.connection, c2)
pc2 = p.connect()
is_(pc2.connection, c3)
pc2.close()
pc3 = p.connect()
is_(pc3.connection, c1)
pc2 = p.connect()
is_(pc2.connection, c3)
pc2.close()
pc3.close()
pc1.close()
class ResetOnReturnTest(PoolTestBase):
def _fixture(self, **kw):
dbapi = Mock()
return (
dbapi,
pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw),
)
def test_plain_rollback(self):
dbapi, p = self._fixture(reset_on_return="rollback")
c1 = p.connect()
c1.close()
assert dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_plain_commit(self):
dbapi, p = self._fixture(reset_on_return="commit")
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert dbapi.connect().commit.called
def test_plain_none(self):
dbapi, p = self._fixture(reset_on_return=None)
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
class SingletonThreadPoolTest(PoolTestBase):
@testing.requires.threading_with_mock
def test_cleanup(self):
self._test_cleanup(False)
# TODO: the SingletonThreadPool cleanup method
# has an unfixed race condition within the "cleanup" system that
# leads to this test being off by one connection under load; in any
# case, this connection will be closed once it is garbage collected.
# this pool is not a production-level pool and is only used for the
# SQLite "memory" connection, and is not very useful under actual
# multi-threaded conditions
# @testing.requires.threading_with_mock
# def test_cleanup_no_gc(self):
# self._test_cleanup(True)
def _test_cleanup(self, strong_refs):
"""test that the pool's connections are OK after cleanup() has
been called."""
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
if strong_refs:
sr = set()
def _conn():
c = p.connect()
sr.add(c.connection)
return c
else:
def _conn():
return p.connect()
def checkout():
for x in range(10):
c = _conn()
assert c
c.cursor()
c.close()
time.sleep(0.01)
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
lp = len(p._all_conns)
is_true(3 <= lp <= 4)
if strong_refs:
still_opened = len([c for c in sr if not c.close.call_count])
eq_(still_opened, 3)
def test_no_rollback_from_nested_connections(self):
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
c1 = p.connect()
mock_conn = c1.connection
c2 = p.connect()
is_(c1, c2)
c2.close()
eq_(mock_conn.mock_calls, [])
c1.close()
eq_(mock_conn.mock_calls, [call.rollback()])
class AssertionPoolTest(PoolTestBase):
def test_connect_error(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect() # noqa
assert_raises(AssertionError, p.connect)
def test_connect_multiple(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect()
c1.close()
c2 = p.connect()
c2.close()
c3 = p.connect() # noqa
assert_raises(AssertionError, p.connect)
class NullPoolTest(PoolTestBase):
def test_reconnect(self):
dbapi = MockDBAPI()
p = pool.NullPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect()
c1.close()
c1 = None
c1 = p.connect()
c1.invalidate()
c1 = None
c1 = p.connect()
dbapi.connect.assert_has_calls(
[call("foo.db"), call("foo.db")], any_order=True
)
class StaticPoolTest(PoolTestBase):
def test_recreate(self):
dbapi = MockDBAPI()
def creator():
return dbapi.connect("foo.db")
p = pool.StaticPool(creator)
p2 = p.recreate()
assert p._creator is p2._creator
def test_connect(self):
dbapi = MockDBAPI()
def creator():
return dbapi.connect("foo.db")
p = pool.StaticPool(creator)
c1 = p.connect()
conn = c1.connection
c1.close()
c2 = p.connect()
is_(conn, c2.connection)
class CreatorCompatibilityTest(PoolTestBase):
def test_creator_callable_outside_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator()
finally:
conn.close()
def test_creator_callable_outside_witharg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator(Mock())
finally:
conn.close()
def test_creator_patching_arg_to_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
# the creator is the two-arg form
conn = creator(Mock())
finally:
conn.close()
def mock_create():
return creator()
conn = e.connect()
conn.invalidate()
conn.close()
# test that the 'should_wrap_creator' status
# will dynamically switch if the _creator is monkeypatched.
# patch it with a zero-arg form
with patch.object(e.pool, "_creator", mock_create):
conn = e.connect()
conn.invalidate()
conn.close()
conn = e.connect()
conn.close()
| 28.251852 | 79 | 0.571474 | import collections
import random
import threading
import time
import weakref
import sqlalchemy as tsa
from sqlalchemy import event
from sqlalchemy import pool
from sqlalchemy import select
from sqlalchemy import testing
from sqlalchemy.engine import default
from sqlalchemy.pool.impl import _AsyncConnDialect
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_context_ok
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_none
from sqlalchemy.testing import is_not
from sqlalchemy.testing import is_not_none
from sqlalchemy.testing import is_true
from sqlalchemy.testing import mock
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.testing.mock import ANY
from sqlalchemy.testing.mock import call
from sqlalchemy.testing.mock import Mock
from sqlalchemy.testing.mock import patch
from sqlalchemy.testing.util import gc_collect
from sqlalchemy.testing.util import lazy_gc
join_timeout = 10
def MockDBAPI():
def cursor():
return Mock()
def connect(*arg, **kw):
def close():
conn.closed = True
conn = Mock(
cursor=Mock(side_effect=cursor),
close=Mock(side_effect=close),
closed=False,
)
return conn
def shutdown(value):
if value:
db.connect = Mock(side_effect=Exception("connect failed"))
else:
db.connect = Mock(side_effect=connect)
db.is_shutdown = value
db = Mock(
connect=Mock(side_effect=connect), shutdown=shutdown, is_shutdown=False
)
return db
class PoolTestBase(fixtures.TestBase):
def setup_test(self):
pool.clear_managers()
self._teardown_conns = []
def teardown_test(self):
for ref in self._teardown_conns:
conn = ref()
if conn:
conn.close()
@classmethod
def teardown_test_class(cls):
pool.clear_managers()
def _with_teardown(self, connection):
self._teardown_conns.append(weakref.ref(connection))
return connection
def _queuepool_fixture(self, **kw):
dbapi, pool = self._queuepool_dbapi_fixture(**kw)
return pool
def _queuepool_dbapi_fixture(self, **kw):
dbapi = MockDBAPI()
_is_asyncio = kw.pop("_is_asyncio", False)
p = pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw)
if _is_asyncio:
p._is_asyncio = True
p._dialect = _AsyncConnDialect()
return dbapi, p
class PoolTest(PoolTestBase):
@testing.fails_on(
"+pyodbc", "pyodbc cursor doesn't implement tuple __eq__"
)
@testing.fails_on("+pg8000", "returns [1], not (1,)")
def test_cursor_iterable(self):
conn = testing.db.raw_connection()
cursor = conn.cursor()
cursor.execute(str(select(1).compile(testing.db)))
expected = [(1,)]
for row in cursor:
eq_(row, expected.pop(0))
def test_no_connect_on_recreate(self):
def creator():
raise Exception("no creates allowed")
for cls in (
pool.SingletonThreadPool,
pool.StaticPool,
pool.QueuePool,
pool.NullPool,
pool.AssertionPool,
):
p = cls(creator=creator)
p.dispose()
p2 = p.recreate()
assert p2.__class__ is cls
mock_dbapi = MockDBAPI()
p = cls(creator=mock_dbapi.connect)
conn = p.connect()
conn.close()
mock_dbapi.connect.side_effect = Exception("error!")
p.dispose()
p.recreate()
def test_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.info)
self.assert_(c.info is c._connection_record.info)
c.info["foo"] = "bar"
c.close()
del c
c = p.connect()
self.assert_("foo" in c.info)
c.invalidate()
c = p.connect()
self.assert_("foo" not in c.info)
c.info["foo2"] = "bar2"
c.detach()
self.assert_("foo2" in c.info)
c2 = p.connect()
is_not(c.connection, c2.connection)
assert not c2.info
assert "foo2" in c.info
def test_rec_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.record_info)
self.assert_(c.record_info is c._connection_record.record_info)
c.record_info["foo"] = "bar"
c.close()
del c
c = p.connect()
self.assert_("foo" in c.record_info)
c.invalidate()
c = p.connect()
self.assert_("foo" in c.record_info)
c.record_info["foo2"] = "bar2"
c.detach()
is_(c.record_info, None)
is_(c._connection_record, None)
c2 = p.connect()
assert c2.record_info
assert "foo2" in c2.record_info
def test_rec_unconnected(self):
# test production of a _ConnectionRecord with an
# initially unconnected state.
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
r1 = pool._ConnectionRecord(p1, connect=False)
assert not r1.connection
c1 = r1.get_connection()
is_(c1, r1.connection)
def test_rec_close_reopen(self):
# test that _ConnectionRecord.close() allows
# the record to be reusable
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
r1 = pool._ConnectionRecord(p1)
c1 = r1.connection
c2 = r1.get_connection()
is_(c1, c2)
r1.close()
assert not r1.connection
eq_(c1.mock_calls, [call.close()])
c2 = r1.get_connection()
is_not(c1, c2)
is_(c2, r1.connection)
eq_(c2.mock_calls, [])
@testing.combinations(
(
pool.QueuePool,
dict(pool_size=8, max_overflow=10, timeout=25, use_lifo=True),
),
(pool.QueuePool, {}),
(pool.NullPool, {}),
(pool.SingletonThreadPool, {}),
(pool.StaticPool, {}),
(pool.AssertionPool, {}),
)
def test_recreate_state(self, pool_cls, pool_args):
creator = object()
pool_args["pre_ping"] = True
pool_args["reset_on_return"] = "commit"
pool_args["recycle"] = 35
pool_args["logging_name"] = "somepool"
pool_args["dialect"] = default.DefaultDialect()
pool_args["echo"] = "debug"
p1 = pool_cls(creator=creator, **pool_args)
cls_keys = dir(pool_cls)
d1 = dict(p1.__dict__)
p2 = p1.recreate()
d2 = dict(p2.__dict__)
for k in cls_keys:
d1.pop(k, None)
d2.pop(k, None)
for k in (
"_invoke_creator",
"_pool",
"_overflow_lock",
"_fairy",
"_conn",
"logger",
):
if k in d2:
d2[k] = mock.ANY
eq_(d1, d2)
eq_(p1.echo, p2.echo)
is_(p1._dialect, p2._dialect)
if "use_lifo" in pool_args:
eq_(p1._pool.use_lifo, p2._pool.use_lifo)
class PoolDialectTest(PoolTestBase):
def _dialect(self):
canary = []
class PoolDialect(object):
is_async = False
def do_rollback(self, dbapi_connection):
canary.append("R")
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
canary.append("C")
dbapi_connection.commit()
def do_close(self, dbapi_connection):
canary.append("CL")
dbapi_connection.close()
return PoolDialect(), canary
def _do_test(self, pool_cls, assertion):
mock_dbapi = MockDBAPI()
dialect, canary = self._dialect()
p = pool_cls(creator=mock_dbapi.connect)
p._dialect = dialect
conn = p.connect()
conn.close()
p.dispose()
p.recreate()
conn = p.connect()
conn.close()
eq_(canary, assertion)
def test_queue_pool(self):
self._do_test(pool.QueuePool, ["R", "CL", "R"])
def test_assertion_pool(self):
self._do_test(pool.AssertionPool, ["R", "CL", "R"])
def test_singleton_pool(self):
self._do_test(pool.SingletonThreadPool, ["R", "CL", "R"])
def test_null_pool(self):
self._do_test(pool.NullPool, ["R", "CL", "R", "CL"])
def test_static_pool(self):
self._do_test(pool.StaticPool, ["R", "CL", "R"])
class PoolEventsTest(PoolTestBase):
def _first_connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def first_connect(*arg, **kw):
canary.append("first_connect")
event.listen(p, "first_connect", first_connect)
return p, canary
def _connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def connect(*arg, **kw):
canary.append("connect")
event.listen(p, "connect", connect)
return p, canary
def _checkout_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkout(*arg, **kw):
canary.append("checkout")
event.listen(p, "checkout", checkout)
return p, canary
def _checkin_event_fixture(self, _is_asyncio=False):
p = self._queuepool_fixture(_is_asyncio=_is_asyncio)
canary = []
@event.listens_for(p, "checkin")
def checkin(*arg, **kw):
canary.append("checkin")
@event.listens_for(p, "close_detached")
def close_detached(*arg, **kw):
canary.append("close_detached")
@event.listens_for(p, "detach")
def detach(*arg, **kw):
canary.append("detach")
return p, canary
def _reset_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def reset(*arg, **kw):
canary.append("reset")
event.listen(p, "reset", reset)
return p, canary
def _invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "invalidate", canary)
return p, canary
def _soft_invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "soft_invalidate", canary)
return p, canary
def _close_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "close", canary)
return p, canary
def _detach_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "detach", canary)
return p, canary
def _close_detached_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "close_detached", canary)
return p, canary
def test_close(self):
p, canary = self._close_event_fixture()
c1 = p.connect()
connection = c1.connection
rec = c1._connection_record
c1.close()
eq_(canary.mock_calls, [])
p.dispose()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach(self):
p, canary = self._detach_event_fixture()
c1 = p.connect()
connection = c1.connection
rec = c1._connection_record
c1.detach()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach_close(self):
p, canary = self._close_detached_event_fixture()
c1 = p.connect()
connection = c1.connection
c1.detach()
c1.close()
eq_(canary.mock_calls, [call(connection)])
def test_first_connect_event(self):
p, canary = self._first_connect_event_fixture()
p.connect()
eq_(canary, ["first_connect"])
def test_first_connect_event_fires_once(self):
p, canary = self._first_connect_event_fixture()
p.connect()
p.connect()
eq_(canary, ["first_connect"])
def test_first_connect_on_previously_recreated(self):
p, canary = self._first_connect_event_fixture()
p2 = p.recreate()
p.connect()
p2.connect()
eq_(canary, ["first_connect", "first_connect"])
def test_first_connect_on_subsequently_recreated(self):
p, canary = self._first_connect_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["first_connect", "first_connect"])
def test_connect_event(self):
p, canary = self._connect_event_fixture()
p.connect()
eq_(canary, ["connect"])
def test_connect_insert_event(self):
p = self._queuepool_fixture()
canary = []
def connect_one(*arg, **kw):
canary.append("connect_one")
def connect_two(*arg, **kw):
canary.append("connect_two")
def connect_three(*arg, **kw):
canary.append("connect_three")
event.listen(p, "connect", connect_one)
event.listen(p, "connect", connect_two, insert=True)
event.listen(p, "connect", connect_three)
p.connect()
eq_(canary, ["connect_two", "connect_one", "connect_three"])
def test_connect_event_fires_subsequent(self):
p, canary = self._connect_event_fixture()
c1 = p.connect() # noqa
c2 = p.connect() # noqa
eq_(canary, ["connect", "connect"])
def test_connect_on_previously_recreated(self):
p, canary = self._connect_event_fixture()
p2 = p.recreate()
p.connect()
p2.connect()
eq_(canary, ["connect", "connect"])
def test_connect_on_subsequently_recreated(self):
p, canary = self._connect_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["connect", "connect"])
def test_checkout_event(self):
p, canary = self._checkout_event_fixture()
p.connect()
eq_(canary, ["checkout"])
def test_checkout_event_fires_subsequent(self):
p, canary = self._checkout_event_fixture()
p.connect()
p.connect()
eq_(canary, ["checkout", "checkout"])
def test_checkout_event_on_subsequently_recreated(self):
p, canary = self._checkout_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["checkout", "checkout"])
def test_checkin_event(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["checkin"])
def test_reset_event(self):
p, canary = self._reset_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["reset"])
def test_soft_invalidate_event_no_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
c1.invalidate(soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_soft_invalidate_event_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
exc = Exception("hi")
c1.invalidate(exc, soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
def test_invalidate_event_no_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
c1.invalidate()
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_invalidate_event_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
exc = Exception("hi")
c1.invalidate(exc)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
@testing.combinations((True, testing.requires.python3), (False,))
def test_checkin_event_gc(self, detach_gced):
p, canary = self._checkin_event_fixture(_is_asyncio=detach_gced)
c1 = p.connect()
dbapi_connection = weakref.ref(c1.connection)
eq_(canary, [])
del c1
lazy_gc()
if detach_gced:
# "close_detached" is not called because for asyncio the
# connection is just lost.
eq_(canary, ["detach"])
else:
eq_(canary, ["checkin"])
gc_collect()
if detach_gced:
is_none(dbapi_connection())
else:
is_not_none(dbapi_connection())
def test_checkin_event_on_subsequently_recreated(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["checkin"])
c2.close()
eq_(canary, ["checkin", "checkin"])
def test_listen_targets_scope(self):
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
def listen_four(*args):
canary.append("listen_four")
engine = testing_engine(testing.db.url)
event.listen(pool.Pool, "connect", listen_one)
event.listen(engine.pool, "connect", listen_two)
event.listen(engine, "connect", listen_three)
event.listen(engine.__class__, "connect", listen_four)
engine.execute(select(1)).close()
eq_(
canary, ["listen_one", "listen_four", "listen_two", "listen_three"]
)
def test_listen_targets_per_subclass(self):
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
event.listen(pool.Pool, "connect", listen_one)
event.listen(pool.QueuePool, "connect", listen_two)
event.listen(pool.SingletonThreadPool, "connect", listen_three)
p1 = pool.QueuePool(creator=MockDBAPI().connect)
p2 = pool.SingletonThreadPool(creator=MockDBAPI().connect)
assert listen_one in p1.dispatch.connect
assert listen_two in p1.dispatch.connect
assert listen_three not in p1.dispatch.connect
assert listen_one in p2.dispatch.connect
assert listen_two not in p2.dispatch.connect
assert listen_three in p2.dispatch.connect
p1.connect()
eq_(canary, ["listen_one", "listen_two"])
p2.connect()
eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"])
def test_connect_event_fails_invalidates(self):
fail = False
def listen_one(conn, rec):
if fail:
raise Exception("it failed")
def listen_two(conn, rec):
rec.info["important_flag"] = True
p1 = pool.QueuePool(
creator=MockDBAPI().connect, pool_size=1, max_overflow=0
)
event.listen(p1, "connect", listen_one)
event.listen(p1, "connect", listen_two)
conn = p1.connect()
eq_(conn.info["important_flag"], True)
conn.invalidate()
conn.close()
fail = True
assert_raises(Exception, p1.connect)
fail = False
conn = p1.connect()
eq_(conn.info["important_flag"], True)
conn.close()
def teardown_test(self):
# TODO: need to get remove() functionality
# going
pool.Pool.dispatch._clear()
class PoolFirstConnectSyncTest(PoolTestBase):
# test [ticket:2964]
@testing.requires.timing_intensive
def test_sync(self):
pool = self._queuepool_fixture(pool_size=3, max_overflow=0)
evt = Mock()
@event.listens_for(pool, "first_connect")
def slow_first_connect(dbapi_con, rec):
time.sleep(1)
evt.first_connect()
@event.listens_for(pool, "connect")
def on_connect(dbapi_con, rec):
evt.connect()
def checkout():
for j in range(2):
c1 = pool.connect()
time.sleep(0.02)
c1.close()
time.sleep(0.02)
threads = []
# what we're trying to do here is have concurrent use of
for i in range(5):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
# there is a very unlikely condition observed in CI on windows
# where even though we have five threads above all calling upon the
# pool, we didn't get concurrent use of all three connections, two
# connections were enough. so here we purposely just check out
# all three at once just to get a consistent test result.
make_sure_all_three_are_connected = [pool.connect() for i in range(3)]
for conn in make_sure_all_three_are_connected:
conn.close()
eq_(
evt.mock_calls,
[
call.first_connect(),
call.connect(),
call.connect(),
call.connect(),
],
)
class QueuePoolTest(PoolTestBase):
def test_queuepool_del(self):
self._do_testqueuepool(useclose=False)
def test_queuepool_close(self):
self._do_testqueuepool(useclose=True)
def _do_testqueuepool(self, useclose=False):
p = self._queuepool_fixture(pool_size=3, max_overflow=-1)
def status(pool):
return (
pool.size(),
pool.checkedin(),
pool.overflow(),
pool.checkedout(),
)
c1 = p.connect()
self.assert_(status(p) == (3, 0, -2, 1))
c2 = p.connect()
self.assert_(status(p) == (3, 0, -1, 2))
c3 = p.connect()
self.assert_(status(p) == (3, 0, 0, 3))
c4 = p.connect()
self.assert_(status(p) == (3, 0, 1, 4))
c5 = p.connect()
self.assert_(status(p) == (3, 0, 2, 5))
c6 = p.connect()
self.assert_(status(p) == (3, 0, 3, 6))
if useclose:
c4.close()
c3.close()
c2.close()
else:
c4 = c3 = c2 = None
lazy_gc()
eq_(status(p), (3, 3, 3, 3))
if useclose:
c1.close()
c5.close()
c6.close()
else:
c1 = c5 = c6 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 0, 0))
c1 = p.connect()
c2 = p.connect()
self.assert_(status(p) == (3, 1, 0, 2), status(p))
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
self.assert_(status(p) == (3, 2, 0, 1))
c1.close()
def test_timeout_accessor(self):
expected_timeout = 123
p = self._queuepool_fixture(timeout=expected_timeout)
eq_(p.timeout(), expected_timeout)
@testing.requires.timing_intensive
def test_timeout(self):
p = self._queuepool_fixture(pool_size=3, max_overflow=0, timeout=2)
c1 = p.connect() # noqa
c2 = p.connect() # noqa
c3 = p.connect() # noqa
now = time.time()
assert_raises(tsa.exc.TimeoutError, p.connect)
assert int(time.time() - now) == 2
@testing.requires.timing_intensive
def test_timeout_subsecond_precision(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0, timeout=0.5)
c1 = p.connect() # noqa
with expect_raises(tsa.exc.TimeoutError):
now = time.time()
c2 = p.connect() # noqa
# Python timing is not very accurate, the time diff should be very
# close to 0.5s but we give 200ms of slack.
assert 0.3 <= time.time() - now <= 0.7, "Pool timeout not respected"
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_timeout_race(self):
# test a race condition where the initial connecting threads all race
# to queue.Empty, then block on the mutex. each thread consumes a
# connection as they go in. when the limit is reached, the remaining
# threads go in, and get TimeoutError; even though they never got to
# wait for the timeout on queue.get(). the fix involves checking the
# timeout again within the mutex, and if so, unlocking and throwing
# them back to the start of do_get()
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=lambda: dbapi.connect(delay=0.05),
pool_size=2,
max_overflow=1,
timeout=3,
)
timeouts = []
def checkout():
for x in range(1):
now = time.time()
try:
c1 = p.connect()
except tsa.exc.TimeoutError:
timeouts.append(time.time() - now)
continue
time.sleep(4)
c1.close()
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
assert len(timeouts) > 0
for t in timeouts:
assert t >= 3, "Not all timeouts were >= 3 seconds %r" % timeouts
# normally, the timeout should under 4 seconds,
# but on a loaded down buildbot it can go up.
assert t < 14, "Not all timeouts were < 14 seconds %r" % timeouts
def _test_overflow(self, thread_count, max_overflow):
reaper = testing.engines.ConnectionKiller()
dbapi = MockDBAPI()
mutex = threading.Lock()
def creator():
time.sleep(0.05)
with mutex:
return dbapi.connect()
p = pool.QueuePool(
creator=creator, pool_size=3, timeout=2, max_overflow=max_overflow
)
reaper.add_pool(p)
peaks = []
def whammy():
for i in range(10):
try:
con = p.connect()
time.sleep(0.005)
peaks.append(p.overflow())
con.close()
del con
except tsa.exc.TimeoutError:
pass
threads = []
for i in range(thread_count):
th = threading.Thread(target=whammy)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
self.assert_(max(peaks) <= max_overflow)
reaper.assert_all_closed()
def test_overflow_reset_on_failed_connect(self):
dbapi = Mock()
def failing_dbapi():
raise Exception("connection failed")
creator = dbapi.connect
def create():
return creator()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
c1 = self._with_teardown(p.connect()) # noqa
c2 = self._with_teardown(p.connect()) # noqa
c3 = self._with_teardown(p.connect()) # noqa
eq_(p._overflow, 1)
creator = failing_dbapi
assert_raises(Exception, p.connect)
eq_(p._overflow, 1)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_hanging_connect_within_overflow(self):
dbapi = Mock()
mutex = threading.Lock()
def hanging_dbapi():
time.sleep(2)
with mutex:
return dbapi.connect()
def fast_dbapi():
with mutex:
return dbapi.connect()
creator = threading.local()
def create():
return creator.mock_connector()
def run_test(name, pool, should_hang):
if should_hang:
creator.mock_connector = hanging_dbapi
else:
creator.mock_connector = fast_dbapi
conn = pool.connect()
conn.operation(name)
time.sleep(1)
conn.close()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
threads = [
threading.Thread(target=run_test, args=("success_one", p, False)),
threading.Thread(target=run_test, args=("success_two", p, False)),
threading.Thread(target=run_test, args=("overflow_one", p, True)),
threading.Thread(target=run_test, args=("overflow_two", p, False)),
threading.Thread(
target=run_test, args=("overflow_three", p, False)
),
]
for t in threads:
t.start()
time.sleep(0.2)
for t in threads:
t.join(timeout=join_timeout)
eq_(
dbapi.connect().operation.mock_calls,
[
call("success_one"),
call("success_two"),
call("overflow_two"),
call("overflow_three"),
call("overflow_one"),
],
)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_waiters_handled(self):
mutex = threading.Lock()
dbapi = MockDBAPI()
def creator():
with mutex:
return dbapi.connect()
success = []
for timeout in (None, 30):
for max_overflow in (0, -1, 3):
p = pool.QueuePool(
creator=creator,
pool_size=2,
timeout=timeout,
max_overflow=max_overflow,
)
def waiter(p, timeout, max_overflow):
success_key = (timeout, max_overflow)
conn = p.connect()
success.append(success_key)
time.sleep(0.1)
conn.close()
c1 = p.connect() # noqa
c2 = p.connect()
threads = []
for i in range(2):
t = threading.Thread(
target=waiter, args=(p, timeout, max_overflow)
)
t.daemon = True
t.start()
threads.append(t)
# this sleep makes sure that the
# two waiter threads hit upon wait()
# inside the queue, before we invalidate the other
# two conns
time.sleep(0.2)
p._invalidate(c2)
for t in threads:
t.join(join_timeout)
eq_(len(success), 12, "successes: %s" % success)
def test_connrec_invalidated_within_checkout_no_race(self):
dbapi = MockDBAPI()
def creator():
return dbapi.connect()
p = pool.QueuePool(creator=creator, pool_size=1, max_overflow=0)
conn = p.connect()
conn.close()
_existing_checkout = pool._ConnectionRecord.checkout
@classmethod
def _decorate_existing_checkout(cls, *arg, **kw):
fairy = _existing_checkout(*arg, **kw)
connrec = fairy._connection_record
connrec.invalidate()
return fairy
with patch(
"sqlalchemy.pool._ConnectionRecord.checkout",
_decorate_existing_checkout,
):
conn = p.connect()
is_(conn._connection_record.connection, None)
conn.close()
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_notify_waiters(self):
dbapi = MockDBAPI()
canary = []
def creator():
canary.append(1)
return dbapi.connect()
p1 = pool.QueuePool(
creator=creator, pool_size=1, timeout=None, max_overflow=0
)
def waiter(p):
conn = p.connect()
canary.append(2)
time.sleep(0.5)
conn.close()
c1 = p1.connect()
threads = []
for i in range(5):
t = threading.Thread(target=waiter, args=(p1,))
t.start()
threads.append(t)
time.sleep(0.5)
eq_(canary, [1])
# this also calls invalidate()
# on c1
p1._invalidate(c1)
for t in threads:
t.join(join_timeout)
eq_(canary, [1, 1, 2, 2, 2, 2, 2])
def test_dispose_closes_pooled(self):
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=dbapi.connect, pool_size=2, timeout=None, max_overflow=0
)
c1 = p.connect()
c2 = p.connect()
c1_con = c1.connection
c2_con = c2.connection
c1.close()
eq_(c1_con.close.call_count, 0)
eq_(c2_con.close.call_count, 0)
p.dispose()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# currently, if a ConnectionFairy is closed
# after the pool has been disposed, there's no
# flag that states it should be invalidated
# immediately - it just gets returned to the
# pool normally...
c2.close()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# ...and that's the one we'll get back next.
c3 = p.connect()
assert c3.connection is c2_con
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_no_overflow(self):
self._test_overflow(40, 0)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_max_overflow(self):
self._test_overflow(40, 5)
def test_overflow_no_gc(self):
p = self._queuepool_fixture(pool_size=2, max_overflow=2)
# disable weakref collection of the
# underlying connections
strong_refs = set()
def _conn():
c = p.connect()
strong_refs.add(c.connection)
return c
for j in range(5):
# open 4 conns at a time. each time this
# will yield two pooled connections + two
# overflow connections.
conns = [_conn() for i in range(4)]
for c in conns:
c.close()
# doing that for a total of 5 times yields
# ten overflow connections closed plus the
# two pooled connections unclosed.
eq_(
set([c.close.call_count for c in strong_refs]),
set([1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0]),
)
def test_recycle(self):
with patch("sqlalchemy.pool.base.time.time") as mock:
mock.return_value = 10000
p = self._queuepool_fixture(
pool_size=1, max_overflow=0, recycle=30
)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
mock.return_value = 10001
c2 = p.connect()
is_(c2.connection, c_ref())
c2.close()
mock.return_value = 10035
c3 = p.connect()
is_not(c3.connection, c_ref())
@testing.requires.timing_intensive
def test_recycle_on_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
c2 = p.connect()
is_(c2.connection, c_ref())
c2_rec = c2._connection_record
p._invalidate(c2)
assert c2_rec.connection is None
c2.close()
time.sleep(0.5)
c3 = p.connect()
is_not(c3.connection, c_ref())
@testing.requires.timing_intensive
def test_recycle_on_soft_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
c2 = p.connect()
is_(c2.connection, c_ref())
c2_rec = c2._connection_record
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
c2.invalidate(soft=True)
is_(c2_rec.connection, c2.connection)
c2.close()
c3 = p.connect()
is_not(c3.connection, c_ref())
is_(c3._connection_record, c2_rec)
is_(c2_rec.connection, c3.connection)
def _no_wr_finalize(self):
finalize_fairy = pool._finalize_fairy
def assert_no_wr_callback(
connection, connection_record, pool, ref, echo, fairy=None
):
if fairy is None:
raise AssertionError(
"finalize fairy was called as a weakref callback"
)
return finalize_fairy(
connection, connection_record, pool, ref, echo, fairy
)
return patch.object(pool, "_finalize_fairy", assert_no_wr_callback)
def _assert_cleanup_on_pooled_reconnect(self, dbapi, p):
# p is QueuePool with size=1, max_overflow=2,
# and one connection in the pool that will need to
# reconnect when next used (either due to recycle or invalidate)
with self._no_wr_finalize():
eq_(p.checkedout(), 0)
eq_(p._overflow, 0)
dbapi.shutdown(True)
assert_raises_context_ok(Exception, p.connect)
eq_(p._overflow, 0)
eq_(p.checkedout(), 0) # and not 1
dbapi.shutdown(False)
c1 = self._with_teardown(p.connect()) # noqa
assert p._pool.empty() # poolsize is one, so we're empty OK
c2 = self._with_teardown(p.connect()) # noqa
eq_(p._overflow, 1) # and not 2
# this hangs if p._overflow is 2
c3 = self._with_teardown(p.connect())
c3.close()
def test_error_on_pooled_reconnect_cleanup_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.invalidate()
c1.close()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_error_on_pooled_reconnect_cleanup_recycle(self):
dbapi, p = self._queuepool_dbapi_fixture(
pool_size=1, max_overflow=2, recycle=1
)
c1 = p.connect()
c1.close()
time.sleep(1.5)
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_connect_handler_not_called_for_recycled(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=2, max_overflow=2)
canary = Mock()
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
event.listen(p, "connect", canary.connect)
event.listen(p, "checkout", canary.checkout)
assert_raises(Exception, p.connect)
p._pool.queue = collections.deque(
[c for c in p._pool.queue if c.connection is not None]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
eq_(
canary.mock_calls,
[call.connect(ANY, ANY), call.checkout(ANY, ANY, ANY)],
)
@testing.requires.timing_intensive
def test_connect_checkout_handler_always_gets_info(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=2, max_overflow=2)
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
@event.listens_for(p, "connect")
def connect(conn, conn_rec):
conn_rec.info["x"] = True
@event.listens_for(p, "checkout")
def checkout(conn, conn_rec, conn_f):
assert "x" in conn_rec.info
assert_raises(Exception, p.connect)
p._pool.queue = collections.deque(
[c for c in p._pool.queue if c.connection is not None]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
def test_error_on_pooled_reconnect_cleanup_wcheckout_event(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.close()
@event.listens_for(p, "checkout")
def handle_checkout_event(dbapi_con, con_record, con_proxy):
if dbapi.is_shutdown:
raise tsa.exc.DisconnectionError()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.combinations((True, testing.requires.python3), (False,))
def test_userspace_disconnectionerror_weakref_finalizer(self, detach_gced):
dbapi, pool = self._queuepool_dbapi_fixture(
pool_size=1, max_overflow=2, _is_asyncio=detach_gced
)
if detach_gced:
pool._dialect.is_async = True
@event.listens_for(pool, "checkout")
def handle_checkout_event(dbapi_con, con_record, con_proxy):
if getattr(dbapi_con, "boom") == "yes":
raise tsa.exc.DisconnectionError()
conn = pool.connect()
old_dbapi_conn = conn.connection
conn.close()
eq_(old_dbapi_conn.mock_calls, [call.rollback()])
old_dbapi_conn.boom = "yes"
conn = pool.connect()
dbapi_conn = conn.connection
del conn
gc_collect()
if detach_gced:
# new connection was detached + abandoned on return
eq_(dbapi_conn.mock_calls, [])
else:
# new connection reset and returned to pool
eq_(dbapi_conn.mock_calls, [call.rollback()])
# old connection was just closed - did not get an
# erroneous reset on return
eq_(old_dbapi_conn.mock_calls, [call.rollback(), call.close()])
@testing.requires.timing_intensive
def test_recycle_pool_no_race(self):
def slow_close():
slow_closing_connection._slow_close()
time.sleep(0.5)
slow_closing_connection = Mock()
slow_closing_connection.connect.return_value.close = slow_close
class Error(Exception):
pass
dialect = Mock()
dialect.is_disconnect = lambda *arg, **kw: True
dialect.dbapi.Error = Error
pools = []
class TrackQueuePool(pool.QueuePool):
def __init__(self, *arg, **kw):
pools.append(self)
super(TrackQueuePool, self).__init__(*arg, **kw)
def creator():
return slow_closing_connection.connect()
p1 = TrackQueuePool(creator=creator, pool_size=20)
from sqlalchemy import create_engine
eng = create_engine(testing.db.url, pool=p1, _initialize=False)
eng.dialect = dialect
# 15 total connections
conns = [eng.connect() for i in range(15)]
# return 8 back to the pool
for conn in conns[3:10]:
conn.close()
def attempt(conn):
time.sleep(random.random())
try:
conn._handle_dbapi_exception(
Error(), "statement", {}, Mock(), Mock()
)
except tsa.exc.DBAPIError:
pass
# run an error + invalidate operation on the remaining 7 open
# connections
threads = []
for conn in conns:
t = threading.Thread(target=attempt, args=(conn,))
t.start()
threads.append(t)
for t in threads:
t.join()
# return all 15 connections to the pool
for conn in conns:
conn.close()
# re-open 15 total connections
conns = [eng.connect() for i in range(15)]
# 15 connections have been fully closed due to invalidate
assert slow_closing_connection._slow_close.call_count == 15
# 15 initial connections + 15 reconnections
assert slow_closing_connection.connect.call_count == 30
assert len(pools) <= 2, len(pools)
def test_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_recreate(self):
p = self._queuepool_fixture(
reset_on_return=None, pool_size=1, max_overflow=0
)
p2 = p.recreate()
assert p2.size() == 1
assert p2._reset_on_return is pool.reset_none
assert p2._max_overflow == 0
def test_reconnect(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
dbapi.raise_error = True
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_detach(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1.detach()
c2 = p.connect() # noqa
eq_(dbapi.connect.mock_calls, [call("foo.db"), call("foo.db")])
c1_con = c1.connection
assert c1_con is not None
eq_(c1_con.close.call_count, 0)
c1.close()
eq_(c1_con.close.call_count, 1)
def test_detach_via_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1_con = c1.connection
c1.invalidate()
assert c1.connection is None
eq_(c1_con.close.call_count, 1)
c2 = p.connect()
assert c2.connection is not c1_con
c2_con = c2.connection
c2.close()
eq_(c2_con.close.call_count, 0)
def test_no_double_checkin(self):
p = self._queuepool_fixture(pool_size=1)
c1 = p.connect()
rec = c1._connection_record
c1.close()
assert_raises_message(
Warning, "Double checkin attempted on %s" % rec, rec.checkin
)
def test_lifo(self):
c1, c2, c3 = Mock(), Mock(), Mock()
connections = [c1, c2, c3]
def creator():
return connections.pop(0)
p = pool.QueuePool(creator, use_lifo=True)
pc1 = p.connect()
pc2 = p.connect()
pc3 = p.connect()
pc1.close()
pc2.close()
pc3.close()
for i in range(5):
pc1 = p.connect()
is_(pc1.connection, c3)
pc1.close()
pc1 = p.connect()
is_(pc1.connection, c3)
pc2 = p.connect()
is_(pc2.connection, c2)
pc2.close()
pc3 = p.connect()
is_(pc3.connection, c2)
pc2 = p.connect()
is_(pc2.connection, c1)
pc2.close()
pc3.close()
pc1.close()
def test_fifo(self):
c1, c2, c3 = Mock(), Mock(), Mock()
connections = [c1, c2, c3]
def creator():
return connections.pop(0)
p = pool.QueuePool(creator)
pc1 = p.connect()
pc2 = p.connect()
pc3 = p.connect()
pc1.close()
pc2.close()
pc3.close()
pc1 = p.connect()
is_(pc1.connection, c1)
pc1.close()
pc1 = p.connect()
is_(pc1.connection, c2)
pc2 = p.connect()
is_(pc2.connection, c3)
pc2.close()
pc3 = p.connect()
is_(pc3.connection, c1)
pc2 = p.connect()
is_(pc2.connection, c3)
pc2.close()
pc3.close()
pc1.close()
class ResetOnReturnTest(PoolTestBase):
def _fixture(self, **kw):
dbapi = Mock()
return (
dbapi,
pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw),
)
def test_plain_rollback(self):
dbapi, p = self._fixture(reset_on_return="rollback")
c1 = p.connect()
c1.close()
assert dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_plain_commit(self):
dbapi, p = self._fixture(reset_on_return="commit")
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert dbapi.connect().commit.called
def test_plain_none(self):
dbapi, p = self._fixture(reset_on_return=None)
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
class SingletonThreadPoolTest(PoolTestBase):
@testing.requires.threading_with_mock
def test_cleanup(self):
self._test_cleanup(False)
# TODO: the SingletonThreadPool cleanup method
# has an unfixed race condition within the "cleanup" system that
# leads to this test being off by one connection under load; in any
# case, this connection will be closed once it is garbage collected.
# this pool is not a production-level pool and is only used for the
# SQLite "memory" connection, and is not very useful under actual
# multi-threaded conditions
# @testing.requires.threading_with_mock
# def test_cleanup_no_gc(self):
# self._test_cleanup(True)
def _test_cleanup(self, strong_refs):
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
if strong_refs:
sr = set()
def _conn():
c = p.connect()
sr.add(c.connection)
return c
else:
def _conn():
return p.connect()
def checkout():
for x in range(10):
c = _conn()
assert c
c.cursor()
c.close()
time.sleep(0.01)
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
lp = len(p._all_conns)
is_true(3 <= lp <= 4)
if strong_refs:
still_opened = len([c for c in sr if not c.close.call_count])
eq_(still_opened, 3)
def test_no_rollback_from_nested_connections(self):
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
c1 = p.connect()
mock_conn = c1.connection
c2 = p.connect()
is_(c1, c2)
c2.close()
eq_(mock_conn.mock_calls, [])
c1.close()
eq_(mock_conn.mock_calls, [call.rollback()])
class AssertionPoolTest(PoolTestBase):
def test_connect_error(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect() # noqa
assert_raises(AssertionError, p.connect)
def test_connect_multiple(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect()
c1.close()
c2 = p.connect()
c2.close()
c3 = p.connect() # noqa
assert_raises(AssertionError, p.connect)
class NullPoolTest(PoolTestBase):
def test_reconnect(self):
dbapi = MockDBAPI()
p = pool.NullPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect()
c1.close()
c1 = None
c1 = p.connect()
c1.invalidate()
c1 = None
c1 = p.connect()
dbapi.connect.assert_has_calls(
[call("foo.db"), call("foo.db")], any_order=True
)
class StaticPoolTest(PoolTestBase):
def test_recreate(self):
dbapi = MockDBAPI()
def creator():
return dbapi.connect("foo.db")
p = pool.StaticPool(creator)
p2 = p.recreate()
assert p._creator is p2._creator
def test_connect(self):
dbapi = MockDBAPI()
def creator():
return dbapi.connect("foo.db")
p = pool.StaticPool(creator)
c1 = p.connect()
conn = c1.connection
c1.close()
c2 = p.connect()
is_(conn, c2.connection)
class CreatorCompatibilityTest(PoolTestBase):
def test_creator_callable_outside_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator()
finally:
conn.close()
def test_creator_callable_outside_witharg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator(Mock())
finally:
conn.close()
def test_creator_patching_arg_to_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
# the creator is the two-arg form
conn = creator(Mock())
finally:
conn.close()
def mock_create():
return creator()
conn = e.connect()
conn.invalidate()
conn.close()
# test that the 'should_wrap_creator' status
# will dynamically switch if the _creator is monkeypatched.
# patch it with a zero-arg form
with patch.object(e.pool, "_creator", mock_create):
conn = e.connect()
conn.invalidate()
conn.close()
conn = e.connect()
conn.close()
| true | true |
1c38810df10e266c92d99bb8419da6c328ffd0d1 | 948 | py | Python | examples/swat-s1/run.py | pgaulon/minicps | f3b873e00f477d8b5c36fcb7df3d1cfe27063822 | [
"MIT"
] | 119 | 2015-10-16T22:47:39.000Z | 2022-03-31T07:18:00.000Z | examples/swat-s1/run.py | DolphinWilly/minicps | 7400f85e47d09901bb702966abf557244cf11ce4 | [
"MIT"
] | 20 | 2016-09-30T06:18:39.000Z | 2022-03-11T10:51:33.000Z | examples/swat-s1/run.py | DolphinWilly/minicps | 7400f85e47d09901bb702966abf557244cf11ce4 | [
"MIT"
] | 65 | 2016-07-27T03:50:39.000Z | 2022-03-05T14:49:17.000Z | """
swat-s1 run.py
"""
from mininet.net import Mininet
from mininet.cli import CLI
from minicps.mcps import MiniCPS
from topo import SwatTopo
import sys
class SwatS1CPS(MiniCPS):
"""Main container used to run the simulation."""
def __init__(self, name, net):
self.name = name
self.net = net
net.start()
net.pingAll()
# start devices
plc1, plc2, plc3, s1 = self.net.get(
'plc1', 'plc2', 'plc3', 's1')
# SPHINX_SWAT_TUTORIAL RUN(
plc2.cmd(sys.executable + ' plc2.py &')
plc3.cmd(sys.executable + ' plc3.py &')
plc1.cmd(sys.executable + ' plc1.py &')
s1.cmd(sys.executable + ' physical_process.py &')
# SPHINX_SWAT_TUTORIAL RUN)
CLI(self.net)
net.stop()
if __name__ == "__main__":
topo = SwatTopo()
net = Mininet(topo=topo)
swat_s1_cps = SwatS1CPS(
name='swat_s1',
net=net)
| 18.96 | 57 | 0.580169 |
from mininet.net import Mininet
from mininet.cli import CLI
from minicps.mcps import MiniCPS
from topo import SwatTopo
import sys
class SwatS1CPS(MiniCPS):
def __init__(self, name, net):
self.name = name
self.net = net
net.start()
net.pingAll()
plc1, plc2, plc3, s1 = self.net.get(
'plc1', 'plc2', 'plc3', 's1')
plc2.cmd(sys.executable + ' plc2.py &')
plc3.cmd(sys.executable + ' plc3.py &')
plc1.cmd(sys.executable + ' plc1.py &')
s1.cmd(sys.executable + ' physical_process.py &')
CLI(self.net)
net.stop()
if __name__ == "__main__":
topo = SwatTopo()
net = Mininet(topo=topo)
swat_s1_cps = SwatS1CPS(
name='swat_s1',
net=net)
| true | true |
1c3881223c1ee0b542cb5cb12e20136c58a24464 | 6,582 | py | Python | pyOCD/target/target_MK22FN1M0Axxx12.py | orenc17/pyOCD | b5c9bc62b68323129aa258e128a8fc68aaa2527f | [
"Apache-2.0"
] | 1 | 2018-10-30T12:47:34.000Z | 2018-10-30T12:47:34.000Z | pyOCD/target/target_MK22FN1M0Axxx12.py | orenc17/pyOCD | b5c9bc62b68323129aa258e128a8fc68aaa2527f | [
"Apache-2.0"
] | null | null | null | pyOCD/target/target_MK22FN1M0Axxx12.py | orenc17/pyOCD | b5c9bc62b68323129aa258e128a8fc68aaa2527f | [
"Apache-2.0"
] | 1 | 2021-02-04T10:05:57.000Z | 2021-02-04T10:05:57.000Z | """
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .family.target_kinetis import Kinetis
from .family.flash_kinetis import Flash_Kinetis
from ..core.memory_map import (FlashRegion, RamRegion, MemoryMap)
from ..debug.svd import SVDFile
import logging
SIM_FCFG1 = 0x4004804C
SIM_FCFG2 = 0x40048050
SIM_FCFG2_PFLSH = (1 << 23)
flash_algo = {
'load_address' : 0x20000000,
'instructions' : [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0xb510483e, 0x5120f24c, 0xf64d81c1, 0x81c11128, 0xf0218801, 0x80010101, 0x78414839, 0x0160f001,
0xbf0c2940, 0x21002101, 0x444a4a36, 0xb1397011, 0xf0217841, 0x70410160, 0xf0117841, 0xd1fb0f60,
0x44484831, 0xf864f000, 0xbf182800, 0xbd102001, 0x4448482c, 0xb1587800, 0x78414829, 0x0160f021,
0x0140f041, 0x78417041, 0x0160f001, 0xd1fa2940, 0x47702000, 0xb5104824, 0x44484924, 0xf891f000,
0xbf182800, 0x2100bd10, 0xe8bd481f, 0x44484010, 0xb958f000, 0x4c1cb570, 0x444c4605, 0x4b1b4601,
0x68e24620, 0xf8b6f000, 0xbf182800, 0x2300bd70, 0x68e24629, 0x4070e8bd, 0x44484813, 0xb94cf000,
0x460cb570, 0x4606460b, 0x480f4601, 0x4615b084, 0xf0004448, 0x2800f8eb, 0xb004bf1c, 0x2000bd70,
0xe9cd2101, 0x90021000, 0x462b4807, 0x46314622, 0xf0004448, 0xb004f97f, 0x0000bd70, 0x40052000,
0x4007e000, 0x00000004, 0x00000008, 0x6b65666b, 0xbf042800, 0x47702004, 0x6cc949ea, 0x6103f3c1,
0xbf08290f, 0x1180f44f, 0x4ae7bf1f, 0xf832447a, 0x02891011, 0xe9c02200, 0x21022100, 0x61426081,
0x618202c9, 0x1203e9c0, 0x52a0f04f, 0x2108e9c0, 0x47702000, 0xbf0e2800, 0x61012004, 0x47702000,
0x48da4602, 0x49d96840, 0x0070f440, 0x47706048, 0x217048d7, 0x21807001, 0x78017001, 0x0f80f011,
0x7800d0fb, 0x0f20f010, 0x2067bf1c, 0xf0104770, 0xbf1c0f10, 0x47702068, 0x0001f010, 0x2069bf18,
0x28004770, 0x2004bf04, 0xb5104770, 0x4ac84604, 0x403bf06f, 0x48c76050, 0xbf144281, 0x2000206b,
0xbf182800, 0x4620bd10, 0xffd2f7ff, 0x46204603, 0xffc6f7ff, 0xbd104618, 0xbf042800, 0x47702004,
0x60532300, 0x60d36093, 0x61536113, 0x61d36193, 0x68c16011, 0xe9d06051, 0xfbb11001, 0x6090f0f0,
0x21102008, 0x0103e9c2, 0x1005e9c2, 0x61d02004, 0x47702000, 0x4df0e92d, 0x4615b088, 0x460c4698,
0x466a4682, 0xffd8f7ff, 0x4621462a, 0x9b044650, 0xf931f000, 0xbf1c0007, 0xe8bdb008, 0xe9dd8df0,
0x19604600, 0xfbb51e45, 0xfb06f0f6, 0xb1205010, 0xf0f6fbb5, 0x43701c40, 0x42ac1e45, 0xf8dfbf98,
0xd81cb270, 0x407ff024, 0x6010f040, 0x0004f8cb, 0x45804898, 0x206bbf14, 0x28002000, 0xb008bf1c,
0x8df0e8bd, 0xf7ff4650, 0x4607ff73, 0x0010f8da, 0xbf182800, 0xb9174780, 0x42ac4434, 0x4650d9e2,
0xff5ef7ff, 0x4638b008, 0x8df0e8bd, 0xbf042a00, 0x47702004, 0x45f0e92d, 0x4614b089, 0x460d461e,
0x466a4680, 0xff88f7ff, 0x46294632, 0x9b034640, 0xf8e1f000, 0xbf1c0007, 0xe8bdb009, 0x9d0085f0,
0xbf182e00, 0xa1e8f8df, 0xf854d025, 0xf8ca0b04, 0x98030008, 0xbf042804, 0x407ff025, 0x60c0f040,
0x2808d009, 0xf854d109, 0xf8ca0b04, 0xf025000c, 0xf040407f, 0xf8ca60e0, 0x46400004, 0xff28f7ff,
0x1010f8d8, 0x29004607, 0x4788bf18, 0x9803b91f, 0x1a364405, 0x4640d1d9, 0xff12f7ff, 0x4638b009,
0x85f0e8bd, 0xbf042800, 0x47702004, 0xea424a62, 0x4a5f4101, 0xe70b6051, 0x4dffe92d, 0x4614b088,
0x460d469a, 0x9808466a, 0xff36f7ff, 0x46294622, 0x98089b05, 0xf88ff000, 0xbf1c2800, 0xe8bdb00c,
0x466a8df0, 0x98084629, 0xff26f7ff, 0xf8dd9e00, 0x42708008, 0x0100f1c8, 0x42474008, 0xbf0842b7,
0x2c004447, 0xf8dfbf18, 0xd01fb128, 0x42a51bbd, 0x4625bf88, 0xf0269805, 0xfbb5417f, 0xf041f0f0,
0xf8cb7180, 0x04001004, 0x200aea40, 0x00fff040, 0x0008f8cb, 0xf7ff9808, 0x2800fecb, 0xb00cbf1c,
0x8df0e8bd, 0x442e1b64, 0xd1df4447, 0x2000b00c, 0x8df0e8bd, 0xbf042b00, 0x47702004, 0x4dffe92d,
0x4616b088, 0x7a14e9dd, 0x460c461d, 0xf8dd466a, 0x98088058, 0xfee0f7ff, 0x3007e9dd, 0x46214632,
0xf839f000, 0xbf1c2800, 0xe8bdb00c, 0x9c008df0, 0xbf042e00, 0xe8bdb00c, 0xf8df8df0, 0xf06fb094,
0xea40407f, 0xf0246707, 0xf040407f, 0xf8cb7000, 0xf8cb0004, 0x68287008, 0x000cf8cb, 0xf7ff9808,
0xb168fe87, 0x0f00f1ba, 0xf8cabf18, 0xf1b84000, 0xbf1c0f00, 0xf8c82100, 0xb00c1000, 0x8df0e8bd,
0x1a769907, 0x0103f021, 0x9907440d, 0xd1da440c, 0xe8bdb00c, 0x28008df0, 0x2004bf04, 0x1e5b4770,
0xbf0e4219, 0x2065421a, 0x68034770, 0xd806428b, 0x44116840, 0x42884418, 0x2000bf24, 0x20664770,
0x00004770, 0x40048000, 0x000003b4, 0x4001f000, 0x40020000, 0x6b65666b, 0x4000ffff, 0x40020004,
0x40020010, 0x00100008, 0x00200018, 0x00400030, 0x00800060, 0x010000c0, 0x02000180, 0x04000300,
0x00000600, 0x00000000, 0x00000000,
],
'pc_init' : 0x20000021,
'pc_unInit': 0x20000071,
'pc_program_page': 0x200000E1,
'pc_erase_sector': 0x200000B5,
'pc_eraseAll' : 0x20000095,
'static_base' : 0x20000000 + 0x00000020 + 0x00000504,
'begin_stack' : 0x20000000 + 0x00000800,
'begin_data' : 0x20000000 + 0x00000A00,
'page_size' : 0x00000200,
'analyzer_supported' : True,
'analyzer_address' : 0x1ffff000, # Analyzer 0x1ffff000..0x1ffff600
'page_buffers' : [0x20003000, 0x20004000], # Enable double buffering
'min_program_length' : 8,
};
class Flash_k22fa12(Flash_Kinetis):
def __init__(self, target):
super(Flash_k22fa12, self).__init__(target, flash_algo)
class K22FA12(Kinetis):
# 1MB flash with 4kB sectors, 128kB RAM
memoryMap = MemoryMap(
FlashRegion( start=0, length=0x100000, blocksize=0x1000, isBootMemory=True),
RamRegion( start=0x1fff0000, length=0x20000)
)
def __init__(self, link):
super(K22FA12, self).__init__(link, self.memoryMap)
self.mdm_idr = 0x001c0000
self._svd_location = SVDFile(vendor="Freescale", filename="MK22FA12.svd", is_local=False)
def init(self):
super(K22FA12, self).init()
# If the device has FlexNVM, then it has half-sized program flash.
fcfg2 = self.read32(SIM_FCFG2)
if (fcfg2 & SIM_FCFG2_PFLSH) == 0:
rgn = self.memory_map.getRegionForAddress(0)
rgn._end = 0x7ffff
| 56.25641 | 101 | 0.762078 |
from .family.target_kinetis import Kinetis
from .family.flash_kinetis import Flash_Kinetis
from ..core.memory_map import (FlashRegion, RamRegion, MemoryMap)
from ..debug.svd import SVDFile
import logging
SIM_FCFG1 = 0x4004804C
SIM_FCFG2 = 0x40048050
SIM_FCFG2_PFLSH = (1 << 23)
flash_algo = {
'load_address' : 0x20000000,
'instructions' : [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0xb510483e, 0x5120f24c, 0xf64d81c1, 0x81c11128, 0xf0218801, 0x80010101, 0x78414839, 0x0160f001,
0xbf0c2940, 0x21002101, 0x444a4a36, 0xb1397011, 0xf0217841, 0x70410160, 0xf0117841, 0xd1fb0f60,
0x44484831, 0xf864f000, 0xbf182800, 0xbd102001, 0x4448482c, 0xb1587800, 0x78414829, 0x0160f021,
0x0140f041, 0x78417041, 0x0160f001, 0xd1fa2940, 0x47702000, 0xb5104824, 0x44484924, 0xf891f000,
0xbf182800, 0x2100bd10, 0xe8bd481f, 0x44484010, 0xb958f000, 0x4c1cb570, 0x444c4605, 0x4b1b4601,
0x68e24620, 0xf8b6f000, 0xbf182800, 0x2300bd70, 0x68e24629, 0x4070e8bd, 0x44484813, 0xb94cf000,
0x460cb570, 0x4606460b, 0x480f4601, 0x4615b084, 0xf0004448, 0x2800f8eb, 0xb004bf1c, 0x2000bd70,
0xe9cd2101, 0x90021000, 0x462b4807, 0x46314622, 0xf0004448, 0xb004f97f, 0x0000bd70, 0x40052000,
0x4007e000, 0x00000004, 0x00000008, 0x6b65666b, 0xbf042800, 0x47702004, 0x6cc949ea, 0x6103f3c1,
0xbf08290f, 0x1180f44f, 0x4ae7bf1f, 0xf832447a, 0x02891011, 0xe9c02200, 0x21022100, 0x61426081,
0x618202c9, 0x1203e9c0, 0x52a0f04f, 0x2108e9c0, 0x47702000, 0xbf0e2800, 0x61012004, 0x47702000,
0x48da4602, 0x49d96840, 0x0070f440, 0x47706048, 0x217048d7, 0x21807001, 0x78017001, 0x0f80f011,
0x7800d0fb, 0x0f20f010, 0x2067bf1c, 0xf0104770, 0xbf1c0f10, 0x47702068, 0x0001f010, 0x2069bf18,
0x28004770, 0x2004bf04, 0xb5104770, 0x4ac84604, 0x403bf06f, 0x48c76050, 0xbf144281, 0x2000206b,
0xbf182800, 0x4620bd10, 0xffd2f7ff, 0x46204603, 0xffc6f7ff, 0xbd104618, 0xbf042800, 0x47702004,
0x60532300, 0x60d36093, 0x61536113, 0x61d36193, 0x68c16011, 0xe9d06051, 0xfbb11001, 0x6090f0f0,
0x21102008, 0x0103e9c2, 0x1005e9c2, 0x61d02004, 0x47702000, 0x4df0e92d, 0x4615b088, 0x460c4698,
0x466a4682, 0xffd8f7ff, 0x4621462a, 0x9b044650, 0xf931f000, 0xbf1c0007, 0xe8bdb008, 0xe9dd8df0,
0x19604600, 0xfbb51e45, 0xfb06f0f6, 0xb1205010, 0xf0f6fbb5, 0x43701c40, 0x42ac1e45, 0xf8dfbf98,
0xd81cb270, 0x407ff024, 0x6010f040, 0x0004f8cb, 0x45804898, 0x206bbf14, 0x28002000, 0xb008bf1c,
0x8df0e8bd, 0xf7ff4650, 0x4607ff73, 0x0010f8da, 0xbf182800, 0xb9174780, 0x42ac4434, 0x4650d9e2,
0xff5ef7ff, 0x4638b008, 0x8df0e8bd, 0xbf042a00, 0x47702004, 0x45f0e92d, 0x4614b089, 0x460d461e,
0x466a4680, 0xff88f7ff, 0x46294632, 0x9b034640, 0xf8e1f000, 0xbf1c0007, 0xe8bdb009, 0x9d0085f0,
0xbf182e00, 0xa1e8f8df, 0xf854d025, 0xf8ca0b04, 0x98030008, 0xbf042804, 0x407ff025, 0x60c0f040,
0x2808d009, 0xf854d109, 0xf8ca0b04, 0xf025000c, 0xf040407f, 0xf8ca60e0, 0x46400004, 0xff28f7ff,
0x1010f8d8, 0x29004607, 0x4788bf18, 0x9803b91f, 0x1a364405, 0x4640d1d9, 0xff12f7ff, 0x4638b009,
0x85f0e8bd, 0xbf042800, 0x47702004, 0xea424a62, 0x4a5f4101, 0xe70b6051, 0x4dffe92d, 0x4614b088,
0x460d469a, 0x9808466a, 0xff36f7ff, 0x46294622, 0x98089b05, 0xf88ff000, 0xbf1c2800, 0xe8bdb00c,
0x466a8df0, 0x98084629, 0xff26f7ff, 0xf8dd9e00, 0x42708008, 0x0100f1c8, 0x42474008, 0xbf0842b7,
0x2c004447, 0xf8dfbf18, 0xd01fb128, 0x42a51bbd, 0x4625bf88, 0xf0269805, 0xfbb5417f, 0xf041f0f0,
0xf8cb7180, 0x04001004, 0x200aea40, 0x00fff040, 0x0008f8cb, 0xf7ff9808, 0x2800fecb, 0xb00cbf1c,
0x8df0e8bd, 0x442e1b64, 0xd1df4447, 0x2000b00c, 0x8df0e8bd, 0xbf042b00, 0x47702004, 0x4dffe92d,
0x4616b088, 0x7a14e9dd, 0x460c461d, 0xf8dd466a, 0x98088058, 0xfee0f7ff, 0x3007e9dd, 0x46214632,
0xf839f000, 0xbf1c2800, 0xe8bdb00c, 0x9c008df0, 0xbf042e00, 0xe8bdb00c, 0xf8df8df0, 0xf06fb094,
0xea40407f, 0xf0246707, 0xf040407f, 0xf8cb7000, 0xf8cb0004, 0x68287008, 0x000cf8cb, 0xf7ff9808,
0xb168fe87, 0x0f00f1ba, 0xf8cabf18, 0xf1b84000, 0xbf1c0f00, 0xf8c82100, 0xb00c1000, 0x8df0e8bd,
0x1a769907, 0x0103f021, 0x9907440d, 0xd1da440c, 0xe8bdb00c, 0x28008df0, 0x2004bf04, 0x1e5b4770,
0xbf0e4219, 0x2065421a, 0x68034770, 0xd806428b, 0x44116840, 0x42884418, 0x2000bf24, 0x20664770,
0x00004770, 0x40048000, 0x000003b4, 0x4001f000, 0x40020000, 0x6b65666b, 0x4000ffff, 0x40020004,
0x40020010, 0x00100008, 0x00200018, 0x00400030, 0x00800060, 0x010000c0, 0x02000180, 0x04000300,
0x00000600, 0x00000000, 0x00000000,
],
'pc_init' : 0x20000021,
'pc_unInit': 0x20000071,
'pc_program_page': 0x200000E1,
'pc_erase_sector': 0x200000B5,
'pc_eraseAll' : 0x20000095,
'static_base' : 0x20000000 + 0x00000020 + 0x00000504,
'begin_stack' : 0x20000000 + 0x00000800,
'begin_data' : 0x20000000 + 0x00000A00,
'page_size' : 0x00000200,
'analyzer_supported' : True,
'analyzer_address' : 0x1ffff000,
'page_buffers' : [0x20003000, 0x20004000],
'min_program_length' : 8,
};
class Flash_k22fa12(Flash_Kinetis):
def __init__(self, target):
super(Flash_k22fa12, self).__init__(target, flash_algo)
class K22FA12(Kinetis):
memoryMap = MemoryMap(
FlashRegion( start=0, length=0x100000, blocksize=0x1000, isBootMemory=True),
RamRegion( start=0x1fff0000, length=0x20000)
)
def __init__(self, link):
super(K22FA12, self).__init__(link, self.memoryMap)
self.mdm_idr = 0x001c0000
self._svd_location = SVDFile(vendor="Freescale", filename="MK22FA12.svd", is_local=False)
def init(self):
super(K22FA12, self).init()
fcfg2 = self.read32(SIM_FCFG2)
if (fcfg2 & SIM_FCFG2_PFLSH) == 0:
rgn = self.memory_map.getRegionForAddress(0)
rgn._end = 0x7ffff
| true | true |
1c38824e94a12bf48e5da200097fda8c47173733 | 4,662 | py | Python | pynmrstar/utils.py | bmrb-io/PyNMRSTAR | 55df5bf7de192e7a6c95f37e0756f09e3f504170 | [
"MIT"
] | 1 | 2022-03-07T16:10:56.000Z | 2022-03-07T16:10:56.000Z | pynmrstar/utils.py | bmrb-io/PyNMRSTAR | 55df5bf7de192e7a6c95f37e0756f09e3f504170 | [
"MIT"
] | 2 | 2021-10-29T19:25:56.000Z | 2022-02-28T11:05:44.000Z | pynmrstar/utils.py | bmrb-io/PyNMRSTAR | 55df5bf7de192e7a6c95f37e0756f09e3f504170 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
""" This file contains various helper functions."""
import functools
import json
import os
from typing import Iterable, Any, Dict
from urllib.error import HTTPError, URLError
from pynmrstar import definitions, cnmrstar, entry as entry_mod
from pynmrstar._internal import _interpret_file
from pynmrstar.schema import Schema
# Set this to allow import * from pynmrstar to work sensibly
__all__ = ['diff', 'format_category', 'format_tag', 'get_schema', 'iter_entries', 'quote_value', 'validate']
def diff(entry1: 'entry_mod.Entry', entry2: 'entry_mod.Entry') -> None:
"""Prints the differences between two entries. Non-equal entries
will always be detected, but specific differences detected depends
on the order of entries."""
diffs = entry1.compare(entry2)
if len(diffs) == 0:
print("Identical entries.")
for difference in diffs:
print(difference)
@functools.lru_cache(maxsize=1024)
def format_category(tag: str) -> str:
"""Adds a '_' to the front of a tag (if not present) and strips out
anything after a '.'"""
if tag:
if not tag.startswith("_"):
tag = "_" + tag
if "." in tag:
tag = tag[:tag.index(".")]
return tag
@functools.lru_cache(maxsize=1024)
def format_tag(tag: str) -> str:
"""Strips anything before the '.'"""
if '.' in tag:
return tag[tag.index('.') + 1:]
return tag
@functools.lru_cache(maxsize=1024)
def format_tag_lc(tag: str) -> str:
"""Strips anything before the '.' and makes the tag lowercase. """
return format_tag(tag.lower())
# noinspection PyDefaultArgument
def get_schema(passed_schema: 'Schema' = None, _cached_schema: Dict[str, Schema] = {}) -> 'Schema':
"""If passed a schema (not None) it returns it. If passed none,
it checks if the default schema has been initialized. If not
initialized, it initializes it. Then it returns the default schema."""
if passed_schema:
return passed_schema
if not _cached_schema:
# Try to load the local file first
try:
schema_file = os.path.join(os.path.dirname(os.path.realpath(__file__)))
schema_file = os.path.join(schema_file, "reference_files/schema.csv")
_cached_schema['schema'] = Schema(schema_file=schema_file)
except IOError:
# Try to load from the internet
try:
_cached_schema['schema'] = Schema()
except (HTTPError, URLError):
raise ValueError("Could not load a BMRB schema from the internet or from the local repository.")
return _cached_schema['schema']
def iter_entries(metabolomics: bool = False) -> Iterable['entry_mod.Entry']:
""" Returns a generator that will yield an Entry object for every
macromolecule entry in the current BMRB database. Perfect for performing
an operation across the entire BMRB database. Set `metabolomics=True`
in order to get all the entries in the metabolomics database."""
api_url = f"{definitions.API_URL}/list_entries?database=macromolecules"
if metabolomics:
api_url = f"{definitions.API_URL}/list_entries?database=metabolomics"
for entry in json.loads(_interpret_file(api_url).read()):
yield entry_mod.Entry.from_database(entry)
@functools.lru_cache(maxsize=65536, typed=True)
def quote_value(value: Any) -> str:
"""Automatically quotes the value in the appropriate way. Don't
quote values you send to this method or they will show up in
another set of quotes as part of the actual data. E.g.:
quote_value('"e. coli"') returns '\'"e. coli"\''
while
quote_value("e. coli") returns "'e. coli'"
This will automatically be called on all values when you use a str()
method (so don't call it before inserting values into tags or loops).
Be mindful of the value of STR_CONVERSION_DICT as it will effect the
way the value is converted to a string.
"""
# Allow manual specification of conversions for booleans, Nones, etc.
if value in definitions.STR_CONVERSION_DICT:
if any(isinstance(value, type(x)) for x in definitions.STR_CONVERSION_DICT):
value = definitions.STR_CONVERSION_DICT[value]
return cnmrstar.quote_value(value)
def validate(entry_to_validate: 'entry_mod.Entry', schema: 'Schema' = None) -> None:
"""Prints a validation report of an object."""
validation = entry_to_validate.validate(schema=schema)
if len(validation) == 0:
print("No problems found during validation.")
for pos, err in enumerate(validation):
print(f"{pos + 1}: {err}")
| 34.533333 | 112 | 0.682754 |
import functools
import json
import os
from typing import Iterable, Any, Dict
from urllib.error import HTTPError, URLError
from pynmrstar import definitions, cnmrstar, entry as entry_mod
from pynmrstar._internal import _interpret_file
from pynmrstar.schema import Schema
__all__ = ['diff', 'format_category', 'format_tag', 'get_schema', 'iter_entries', 'quote_value', 'validate']
def diff(entry1: 'entry_mod.Entry', entry2: 'entry_mod.Entry') -> None:
diffs = entry1.compare(entry2)
if len(diffs) == 0:
print("Identical entries.")
for difference in diffs:
print(difference)
@functools.lru_cache(maxsize=1024)
def format_category(tag: str) -> str:
if tag:
if not tag.startswith("_"):
tag = "_" + tag
if "." in tag:
tag = tag[:tag.index(".")]
return tag
@functools.lru_cache(maxsize=1024)
def format_tag(tag: str) -> str:
if '.' in tag:
return tag[tag.index('.') + 1:]
return tag
@functools.lru_cache(maxsize=1024)
def format_tag_lc(tag: str) -> str:
return format_tag(tag.lower())
def get_schema(passed_schema: 'Schema' = None, _cached_schema: Dict[str, Schema] = {}) -> 'Schema':
if passed_schema:
return passed_schema
if not _cached_schema:
try:
schema_file = os.path.join(os.path.dirname(os.path.realpath(__file__)))
schema_file = os.path.join(schema_file, "reference_files/schema.csv")
_cached_schema['schema'] = Schema(schema_file=schema_file)
except IOError:
try:
_cached_schema['schema'] = Schema()
except (HTTPError, URLError):
raise ValueError("Could not load a BMRB schema from the internet or from the local repository.")
return _cached_schema['schema']
def iter_entries(metabolomics: bool = False) -> Iterable['entry_mod.Entry']:
api_url = f"{definitions.API_URL}/list_entries?database=macromolecules"
if metabolomics:
api_url = f"{definitions.API_URL}/list_entries?database=metabolomics"
for entry in json.loads(_interpret_file(api_url).read()):
yield entry_mod.Entry.from_database(entry)
@functools.lru_cache(maxsize=65536, typed=True)
def quote_value(value: Any) -> str:
if value in definitions.STR_CONVERSION_DICT:
if any(isinstance(value, type(x)) for x in definitions.STR_CONVERSION_DICT):
value = definitions.STR_CONVERSION_DICT[value]
return cnmrstar.quote_value(value)
def validate(entry_to_validate: 'entry_mod.Entry', schema: 'Schema' = None) -> None:
validation = entry_to_validate.validate(schema=schema)
if len(validation) == 0:
print("No problems found during validation.")
for pos, err in enumerate(validation):
print(f"{pos + 1}: {err}")
| true | true |
1c38828042952d710c0ec220f1f9afaa869a649e | 867 | py | Python | src/east_coast_images/plot_ratios.py | slawler/SI_2019_Coastal | 4064d323bc62ce2f47a7af41b9a11ea5538ad181 | [
"MIT"
] | 1 | 2020-03-13T07:51:44.000Z | 2020-03-13T07:51:44.000Z | src/east_coast_images/plot_ratios.py | cheginit/SI_2019_Coastal | 4064d323bc62ce2f47a7af41b9a11ea5538ad181 | [
"MIT"
] | null | null | null | src/east_coast_images/plot_ratios.py | cheginit/SI_2019_Coastal | 4064d323bc62ce2f47a7af41b9a11ea5538ad181 | [
"MIT"
] | 1 | 2020-03-13T14:44:57.000Z | 2020-03-13T14:44:57.000Z | import pandas as pd
from pathlib import Path
from geopy import distance
import utils
data = pd.read_csv('shape_data.csv')
left = data.loc[data.Lon.idxmin(), ['Lat', 'Lon']]
data['Distance'] = data.apply(lambda x: distance.geodesic(left, (x['Lat'], x['Lon'])).km, axis=1)
trap = data.loc[data.Shape == 'trapezoid', ['Lon', 'Lat', 'Rlb', 'Rbt', 'Distance']]
tri = data.loc[data.Shape == 'triangle', ['Lon', 'Lat', 'Rlb', 'Rbr', 'Distance']]
fig, gs, canvas = utils.make_canvas(5, 5)
ax = fig.add_subplot(gs[0])
tri.plot(kind='scatter', x='Distance', y='Rlb', marker='^', color='g', ax=ax)
trap.plot(kind='scatter', x='Distance', y='Rlb', marker='s', color='#FFA500', ax=ax)
ax.set_xlabel('Distance from Corpus Christi (km)')
ax.set_ylabel('$R_{lb} = \\dfrac{L_b}{W_b}$')
ax.legend(['Triangle', 'Trapezoid'])
canvas.print_figure("ratio.png", format="png", dpi=300);
| 39.409091 | 97 | 0.655133 | import pandas as pd
from pathlib import Path
from geopy import distance
import utils
data = pd.read_csv('shape_data.csv')
left = data.loc[data.Lon.idxmin(), ['Lat', 'Lon']]
data['Distance'] = data.apply(lambda x: distance.geodesic(left, (x['Lat'], x['Lon'])).km, axis=1)
trap = data.loc[data.Shape == 'trapezoid', ['Lon', 'Lat', 'Rlb', 'Rbt', 'Distance']]
tri = data.loc[data.Shape == 'triangle', ['Lon', 'Lat', 'Rlb', 'Rbr', 'Distance']]
fig, gs, canvas = utils.make_canvas(5, 5)
ax = fig.add_subplot(gs[0])
tri.plot(kind='scatter', x='Distance', y='Rlb', marker='^', color='g', ax=ax)
trap.plot(kind='scatter', x='Distance', y='Rlb', marker='s', color='#FFA500', ax=ax)
ax.set_xlabel('Distance from Corpus Christi (km)')
ax.set_ylabel('$R_{lb} = \\dfrac{L_b}{W_b}$')
ax.legend(['Triangle', 'Trapezoid'])
canvas.print_figure("ratio.png", format="png", dpi=300);
| true | true |
1c388316514f753f1f2f8589d28db201efa18df7 | 5,330 | py | Python | test/IECoreImage/DisplayDriverServerTest.py | andrewkaufman/cortex | 23f893c3c0c92ec1842e3946f9654ff91f40aa67 | [
"BSD-3-Clause"
] | null | null | null | test/IECoreImage/DisplayDriverServerTest.py | andrewkaufman/cortex | 23f893c3c0c92ec1842e3946f9654ff91f40aa67 | [
"BSD-3-Clause"
] | null | null | null | test/IECoreImage/DisplayDriverServerTest.py | andrewkaufman/cortex | 23f893c3c0c92ec1842e3946f9654ff91f40aa67 | [
"BSD-3-Clause"
] | null | null | null | ##########################################################################
#
# Copyright (c) 2016, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import six
import unittest
import IECore
import IECoreImage
class DisplayDriverServerTest( unittest.TestCase ) :
def testPortNumber( self ) :
s1 = IECoreImage.DisplayDriverServer( 1559 )
self.assertEqual( s1.portNumber(), 1559 )
self.assertRaises( RuntimeError, IECoreImage.DisplayDriverServer, 1559 )
s2 = IECoreImage.DisplayDriverServer( 0 )
self.assertNotEqual( s2.portNumber(), 0 )
self.assertNotEqual( s2.portNumber(), s1.portNumber() )
s3 = IECoreImage.DisplayDriverServer( 0 )
self.assertNotEqual( s3.portNumber(), 0 )
self.assertNotEqual( s3.portNumber(), s2.portNumber() )
s4 = IECoreImage.DisplayDriverServer()
self.assertNotEqual( s4.portNumber(), 0 )
self.assertNotEqual( s4.portNumber(), s3.portNumber() )
def testPortRange( self ) :
IECoreImage.DisplayDriverServer.setPortRange( ( 45000, 45010 ) )
self.assertEqual( IECoreImage.DisplayDriverServer.getPortRange(), ( 45000, 45010 ) )
s1 = IECoreImage.DisplayDriverServer()
self.assertEqual( s1.portNumber(), 45000 )
s2 = IECoreImage.DisplayDriverServer()
self.assertEqual( s2.portNumber(), 45001 )
# deleting servers should free the ports for reuse
del s1, s2
servers = []
for p in range( 45000, 45011 ) :
servers.append( IECoreImage.DisplayDriverServer( 0 ) )
self.assertEqual( servers[-1].portNumber(), p )
# make one more than the range allows
six.assertRaisesRegex( self, RuntimeError, ".*Unable to find a free port in the range.*", IECoreImage.DisplayDriverServer, 0 )
# can't resuse ports
six.assertRaisesRegex( self, RuntimeError, ".*Unable to connect to port 45010.*Address already in use.*", IECoreImage.DisplayDriverServer, 45010 )
# bad range
six.assertRaisesRegex( self, RuntimeError, ".*portNumber must fall.*", IECoreImage.DisplayDriverServer, 44999 )
six.assertRaisesRegex( self, RuntimeError, ".*portNumber must fall.*", IECoreImage.DisplayDriverServer, 45011 )
six.assertRaisesRegex( self, RuntimeError, ".*min port must be <= max port.*", IECoreImage.DisplayDriverServer.setPortRange, ( 45010, 45000 ) )
def testPortRangeRegistry( self ) :
IECoreImage.DisplayDriverServer.registerPortRange( "a", ( 45000, 45010 ) )
self.assertEqual( IECoreImage.DisplayDriverServer.registeredPortRange( "a" ), ( 45000, 45010 ) )
IECoreImage.DisplayDriverServer.registerPortRange( "b", ( 45011, 45020 ) )
self.assertEqual( IECoreImage.DisplayDriverServer.registeredPortRange( "b" ), ( 45011, 45020 ) )
six.assertRaisesRegex( self, RuntimeError, ".*is already registered.*", IECoreImage.DisplayDriverServer.registerPortRange, "b", ( 45021, 45030 ) )
IECoreImage.DisplayDriverServer.deregisterPortRange( "b" )
six.assertRaisesRegex( self, RuntimeError, ".*is not registered.*", IECoreImage.DisplayDriverServer.deregisterPortRange, "b" )
six.assertRaisesRegex( self, RuntimeError, ".*is not registered.*", IECoreImage.DisplayDriverServer.registeredPortRange, "b" )
IECoreImage.DisplayDriverServer.registerPortRange( "b", ( 45021, 45030 ) )
self.assertEqual( IECoreImage.DisplayDriverServer.registeredPortRange( "b" ), ( 45021, 45030 ) )
IECoreImage.DisplayDriverServer.setPortRange( IECoreImage.DisplayDriverServer.registeredPortRange( "a" ) )
s1 = IECoreImage.DisplayDriverServer()
self.assertEqual( s1.portNumber(), 45000 )
IECoreImage.DisplayDriverServer.setPortRange( IECoreImage.DisplayDriverServer.registeredPortRange( "b" ) )
s2 = IECoreImage.DisplayDriverServer()
self.assertEqual( s2.portNumber(), 45021 )
if __name__ == "__main__":
unittest.main()
| 44.789916 | 148 | 0.732645 | true | true | |
1c38834865037be0126926c49cc1add3c763f20d | 3,852 | py | Python | ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/dhcp6iana6_7939c0233bac17c3e3764bc9bc1a9571.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 20 | 2019-05-07T01:59:14.000Z | 2022-02-11T05:24:47.000Z | ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/dhcp6iana6_7939c0233bac17c3e3764bc9bc1a9571.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 60 | 2019-04-03T18:59:35.000Z | 2022-02-22T12:05:05.000Z | ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/dhcp6iana6_7939c0233bac17c3e3764bc9bc1a9571.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 13 | 2019-05-20T10:48:31.000Z | 2021-10-06T07:45:44.000Z | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class Dhcp6Iana6(Base):
"""DHCPv6 Iana/Iata endpoint used in traffic
The Dhcp6Iana6 class encapsulates a required dhcp6Iana6 resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'dhcp6Iana6'
_SDM_ATT_MAP = {
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'Name': 'name',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(Dhcp6Iana6, self).__init__(parent, list_op)
@property
def Tag(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tag_e30f24de79247381d4dfd423b2f6986d.Tag): An instance of the Tag class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tag_e30f24de79247381d4dfd423b2f6986d import Tag
if self._properties.get('Tag', None) is not None:
return self._properties.get('Tag')
else:
return Tag(self)
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
def update(self, Name=None):
# type: (str) -> Dhcp6Iana6
"""Updates dhcp6Iana6 resource on the server.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
| 35.33945 | 145 | 0.660696 |
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class Dhcp6Iana6(Base):
__slots__ = ()
_SDM_NAME = 'dhcp6Iana6'
_SDM_ATT_MAP = {
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'Name': 'name',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(Dhcp6Iana6, self).__init__(parent, list_op)
@property
def Tag(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tag_e30f24de79247381d4dfd423b2f6986d import Tag
if self._properties.get('Tag', None) is not None:
return self._properties.get('Tag')
else:
return Tag(self)
@property
def Count(self):
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def Name(self):
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
def update(self, Name=None):
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
| true | true |
1c3883ecedd095bf376a4526f985caa975a29924 | 38,335 | py | Python | tests/test_tools.py | raidun99/regolith | ad4af42023d6133d18b0ef8aac7f3ef7aa5c1693 | [
"CC0-1.0"
] | null | null | null | tests/test_tools.py | raidun99/regolith | ad4af42023d6133d18b0ef8aac7f3ef7aa5c1693 | [
"CC0-1.0"
] | null | null | null | tests/test_tools.py | raidun99/regolith | ad4af42023d6133d18b0ef8aac7f3ef7aa5c1693 | [
"CC0-1.0"
] | null | null | null | import pytest
import datetime as dt
from regolith.tools import (
filter_publications,
fuzzy_retrieval,
fragment_retrieval,
number_suffix,
latex_safe,
update_schemas,
merge_collections,
group,
is_fully_appointed,
group_member_ids,
group_member_employment_start_end,
month_and_year,
awards_grants_honors,
get_id_from_name,
date_to_rfc822,
key_value_pair_filter,
collection_str,
search_collection,
collect_appts,
grant_burn,
validate_meeting
)
def test_author_publications():
citations = [{"author": ["CJ", "SJLB"]}, {"editor": "SJLB"}]
filter_publications(citations, {"SJLB"})
def test_fuzzy_retrieval():
person = {
"_id": "scopatz",
"aka": [
"Scopatz",
"Scopatz, A",
"Scopatz, A.",
"Scopatz, A M",
"Anthony Michael Scopatz",
],
"name": "Anthony Scopatz",
}
assert fuzzy_retrieval([person], ["aka", "name", "_id"],
"scopatz") == person
assert fuzzy_retrieval([person], ["aka", "name", "_id"],
"scopatz, a") is None
assert (
fuzzy_retrieval(
[person], ["aka", "name", "_id"], "scopatz, a",
case_sensitive=False,
)
== person
)
@pytest.mark.parametrize(
"input,expected",
[
(0, "th"),
(1, "st"),
(2, "nd"),
(3, "rd"),
(4, "th"),
(10, "th"),
(13, "th"),
(33, "rd"),
(None, ""),
("0", ""),
],
)
def test_number_suffix(input, expected):
assert number_suffix(input) == expected
@pytest.mark.parametrize(
"input,expected",
[
(
(
[
{
"_id": "proposal1",
"title": "European swallow",
"author": "king arthur",
}
],
[{"_id": "grant1", "linked_to": "proposal1",
"amount": "100 mph"}],
),
[
{
"_id": "grant1",
"title": "European swallow",
"author": "king arthur",
"linked_to": "proposal1",
"amount": "100 mph",
}
],
),
(
(
[
{
"_id": "proposal1",
"title": "European swallow",
"author": "king arthur",
},
{
"_id": "proposal2",
"title": "African swallow",
"author": "king arthur",
},
],
[{"_id": "grant1", "linked_to": "proposal1",
"amount": "100 mph"}],
),
[
{
"_id": "grant1",
"title": "European swallow",
"author": "king arthur",
"linked_to": "proposal1",
"amount": "100 mph",
}
],
),
(
(
[
{
"_id": "proposal1",
"title": "European swallow",
"author": "king arthur",
"amount": "50 mph",
},
{
"_id": "proposal2",
"title": "African swallow",
"author": "king arthur",
},
],
[{"_id": "grant1", "linked_to": "proposal1",
"amount": "100 mph"}],
),
[
{
"_id": "grant1",
"title": "European swallow",
"author": "king arthur",
"linked_to": "proposal1",
"amount": "100 mph",
}
],
),
],
)
def test_merge_collections(input, expected):
a = input[0]
b = input[1]
target_id = "linked_to"
assert merge_collections(a, b, target_id) == expected
@pytest.mark.parametrize(
"input,expected,kwargs",
[
("$hi", r"\$hi", {}),
(
r"Website: https://github.com/CJ-Wright/"
r"Masters_Thesis/raw/master/thesis.pdf hi",
r"Website: \url{https://github.com/CJ-Wright/"
r"Masters_Thesis/raw/master/thesis.pdf} hi",
{},
),
(
r"Website: https://github.com/CJ-Wright/"
r"Masters_Thesis/raw/master/thesis.pdf hi",
r"Website: \href{https://github.com/CJ-Wright/"
r"Masters_Thesis/raw/master/thesis.pdf} hi",
{"wrapper": "href"},
),
(
r"Website: https://github.com/CJ-Wright/"
r"Masters_Thesis/raw/master/thesis.pdf hi",
r"Website: https://github.com/CJ-Wright/"
r"Masters\_Thesis/raw/master/thesis.pdf hi",
{"url_check": False},
),
],
)
def test_latex_safe(input, expected, kwargs):
output = latex_safe(input, **kwargs)
assert output == expected
DEFAULT_SCHEMA = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"day": {
"description": "Expense day",
"required": True,
"type": "integer",
},
},
},
},
},
}
USER_SCHEMA0 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {"type": "dict",
"schema": {"day": {"required": False, }, }, },
},
},
}
EXPECTED_SCHEMA0 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"day": {
"description": "Expense day",
"required": False,
"type": "integer",
},
},
},
},
},
}
USER_SCHEMA1 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {"type": "dict",
"schema": {"day": {"type": "string", }, }, },
},
},
}
EXPECTED_SCHEMA1 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"day": {
"description": "Expense day",
"required": True,
"type": "string",
},
},
},
},
},
}
USER_SCHEMA2 = {
"expenses": {
"begin_day": {
"description": "The first day of expense",
"required": True,
"type": "string",
}
},
}
EXPECTED_SCHEMA2 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"day": {
"description": "Expense day",
"required": True,
"type": "integer",
},
},
},
},
"begin_day": {
"description": "The first day of expense",
"required": True,
"type": "string",
},
},
}
USER_SCHEMA3 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {"day": {"description": "The date on the receipt"}, },
},
},
},
}
EXPECTED_SCHEMA3 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"day": {
"description": "The date on the receipt",
"required": True,
"type": "integer",
},
},
},
},
},
}
USER_SCHEMA4 = {
"expenses": {
"itemized_expenses": {
"schema": {
"schema": {
"prepaid_expense": {
"description": "Expense paid by the direct billing",
"required": True,
"type": "float",
},
},
},
},
},
}
EXPECTED_SCHEMA4 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"day": {
"description": "Expense day",
"required": True,
"type": "integer",
},
"prepaid_expense": {
"description": "Expense paid by the direct billing",
"required": True,
"type": "float",
},
},
},
},
},
}
USER_SCHEMA5 = {}
EXPECTED_SCHEMA5 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"day": {
"description": "Expense day",
"required": True,
"type": "integer",
},
},
},
},
},
}
USER_SCHEMA6 = {"expenses": {}}
EXPECTED_SCHEMA6 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"day": {
"description": "Expense day",
"required": True,
"type": "integer",
},
},
},
},
},
}
@pytest.mark.parametrize(
"default_schema, user_schema, expected_schema",
[
(DEFAULT_SCHEMA, USER_SCHEMA0, EXPECTED_SCHEMA0),
(DEFAULT_SCHEMA, USER_SCHEMA1, EXPECTED_SCHEMA1),
(DEFAULT_SCHEMA, USER_SCHEMA2, EXPECTED_SCHEMA2),
(DEFAULT_SCHEMA, USER_SCHEMA3, EXPECTED_SCHEMA3),
(DEFAULT_SCHEMA, USER_SCHEMA4, EXPECTED_SCHEMA4),
(DEFAULT_SCHEMA, USER_SCHEMA5, EXPECTED_SCHEMA5),
(DEFAULT_SCHEMA, USER_SCHEMA6, EXPECTED_SCHEMA6),
],
)
def test_update_schemas(default_schema, user_schema, expected_schema):
updated_schema = update_schemas(default_schema, user_schema)
assert updated_schema == expected_schema
def test_group():
doc0 = {"k0": "v00", "k1": "v01"}
doc1 = {"k0": "v10", "k1": "v11"}
doc2 = {"k1": "v21"}
doc3 = {"k0": "v00", "k1": "v31"}
db = (doc for doc in (doc0, doc1, doc2, doc3))
by = "k0"
expect = {"v00": [doc0, doc3], "v10": [doc1]}
assert group(db, by) == expect
ppl_coll = [
{
"_id": "m1",
"name": "member1",
"education": [{
"group": "bg",
"institution": "columbiau",
"degree": "PhD",
"department": "apam",
"begin_year": 2016
}],
"employment": [{
"begin_year": 2020,
"begin_month": 1,
"organization": "columbiau",
"position": "Undergraduate Researcher",
"advisor": "sbillinge",
"status": "undergrad"
}]
},
{
"_id": "nm1",
"name": "non-member1",
"education": [{
"institution": "columbiau",
"degree": "PhD",
"department": "apam",
"begin_year": 2016
}],
"employment": [{
"begin_year": 2020,
"begin_month": 1,
"organization": "columbiau",
"position": "Undergraduate Researcher",
"advisor": "sbillinge",
"status": "undergrad"
}]
},
{
"_id": "m2",
"name": "member2",
"education": [{
"institution": "columbiau",
"degree": "PhD",
"department": "apam",
"begin_year": 2016
}],
"employment": [{
"begin_year": 2020,
"begin_month": 1,
"group": "bg",
"organization": "columbiau",
"position": "Undergraduate Researcher",
"advisor": "sbillinge",
"status": "undergrad"
}]
},
]
@pytest.mark.parametrize(
"input,expected",
[
(ppl_coll, set(["m1", "m2"])),
],
)
def test_group_member_ids(input, expected):
actual = group_member_ids(input, "bg")
assert actual == expected
p1 = {
"_id": "scopatz",
"aka": [
"Scopatz",
"Scopatz, A",
"Scopatz, A.",
"Scopatz, A M",
"Anthony Michael Scopatz",
],
"name": "Anthony Scopatz",
}
p2 = {
"_id": "abc",
"aka": [
"A. BC",
"BC, A",
"Anthony BC",
],
"name": "Anthony Bill Chris",
}
@pytest.mark.parametrize(
"input, expected",
[
(([p1, p2], ["aka", "name", "_id"],
"Anth", False),[p1,p2]),
(([p1, p2], ["aka", "name", "_id"],
"scopatz, a", True),[]),
(([p1, p2], ["aka", "name", "_id"],
"scopatz, a", False),[p1]),
(([p1, p2], ["aka", "name", "_id"],
"ill", False),[p2]),
],
)
def test_fragment_retrieval(input, expected):
assert(fragment_retrieval(input[0],input[1],input[2],case_sensitive = input[3]) == expected)
@pytest.mark.parametrize(
"input, expected",
[
((None, None), "present"),
((None, 2002), "2002"),
((5,2002), "May 2002"),
],
)
def test_month_and_year(input,expected):
assert(month_and_year(input[0],input[1]) == expected)
@pytest.mark.parametrize(
"appts,start,end,expected",
[
({"name": "Kurt Godel",
"_id": "kgodel",
"appointments": {
"A": {"begin_year": 2017, "begin_month": 6, "begin_day": 1, "end_year": 2017, "end_month": 6, "end_day": 30,
"grant": "grant1", "loading": 1.0, "type": "pd",}}},
"2017-06-01", "2017-07-01", False),
({"name": "MC Escher",
"_id": "mcescher",
"appointments": {
"A": {"begin_date": '2017-06-01', "end_date": '2017-06-30', "grant": "grant1", "loading": 0.5, "type": "pd",},
"B": {"begin_date": '2017-06-01', "end_date": '2017-06-30', "grant": "grant2", "loading": 0.5, "type": "pd",
}}},"2017-06-01", "2017-06-30", True),
({"name": "Johann Sebastian Bach",
"_id": "jsbach",
"appointments":{
"A": {"begin_date": '2017-06-01', "end_date": '2017-06-30', "grant": "grant1", "loading": 0.5, "type": "pd",},
"B": {"begin_date": '2017-06-02', "end_date": '2017-06-29', "grant": "grant2", "loading": 0.5, "type": "pd",
}}}, "2017-06-01", "2017-06-30", False),
({"name": "Evariste Galois",
"_id": "egalois",
"appointments": {
"A": {"begin_date": '2017-06-01', "end_date": '2017-06-15', "grant": "grant1", "loading": 1.0, "type": "pd",},
"B": {"begin_date": '2017-06-16', "end_date": '2017-06-30', "grant": "grant2", "loading": 1.0, "type": "pd",
}}},"2017-06-01", "2017-06-30", True),
({"name": "Ludwig Wittgenstein",
"_id": "lwittgenstein",
"appointments": {
"A": {"begin_date": '2017-06-01', "end_date": '2017-06-15', "grant": "grant1", "loading": 1.0, "type": "pd",},
"B": {"begin_date": '2017-06-17', "end_date": '2017-06-30', "grant": "grant2", "loading": 1.0, "type": "pd",},
"C": {"begin_date": '2017-07-01', "end_date": '2017-07-30', "grant": "grant3", "loading": 1.0, "type": "pd",
}}}, "2017-06-01", "2017-06-30", False),
({"name": "Buckminster Fuller",
"_id": "bfuller",
"appointments":{
"A": {"begin_date": '2017-06-01', "end_date": '2017-06-30', "grant": "grant1", "loading": 1.0, "type": "pd",},
"B": {"begin_date": '2017-06-17', "end_date": '2017-06-30', "grant": "grant2", "loading": 1.0, "type": "pd",
}}}, "2017-06-01", "2017-06-30", False),
({"name": "Lorem Ipsum",
"_id": "lipsum",
"appointments":{
"A": {"begin_date": '2017-06-01', "end_date": '2017-06-30', "grant": "grant1", "loading": 1.0,"type": "pd",},
"B": {"begin_date": '2017-06-17', "end_date": '2017-06-30', "grant": "grant2", "loading": 1.0, "type": "pd",}
}}, "2017-06-01", "2017-06-30", False),
],
)
def test_is_fully_appointed(appts, start, end, expected):
actual = is_fully_appointed(appts, start, end)
assert actual == expected
@pytest.mark.parametrize(
"input, expected",
[
({'funding':[
{"name": "Omega Laser User's Group Travel Award",
"value": 1100,
"year": 2013},
{"name": "NIF User's Group Travel Award",
"value": 1150,
"year": 2013}]},
[{'description': "Omega Laser User's Group Travel Award (\\$1,100)",
'year': 2013,
'_key': 2013.0},
{'description':"NIF User's Group Travel Award (\\$1,150)",
'year': 2013,
'_key': 2013.0}]),
({'funding':[
{"name": "Omega Laser User's Group Travel Award",
"value": 1100,
"year": 2013}],
"service":[{"name": "International Steering Committee", "role": "chair",
"type": "profession", "year": 2020,
"month": 3, "notes": ["something"]}]},
[{"description":"International Steering Committee",
"year":2020,
"_key":2020.03},
{'description': "Omega Laser User's Group Travel Award (\\$1,100)",
'year': 2013,
'_key': 2013.0}]
)
],
)
def test_get_id_from_name(input,expected):
assert(awards_grants_honors(input) == expected)
@pytest.mark.parametrize(
"input, expected",
[
(([{'_id':'afriend','aka':['AB Friend','Tony Friend'], 'name': 'Anthony B Friend'}], 'Simon'), None),
(([{'_id':'afriend','aka':['AB Friend','Tony Friend'], 'name': 'Anthony B Friend'}], 'Anthony B Friend'),
'afriend'),
(([{'_id':'afriend','aka':['AB Friend','Tony Friend'], 'name': 'Anthony B Friend'},
{'_id':'aeinstein','aka':['Einstein'], 'name': 'Albert Einstein'}],
'Albert Einstein'),
'aeinstein')
],
)
def test_get_id_from_name(input,expected):
assert(get_id_from_name(input[0],input[1]) == expected)
@pytest.mark.parametrize(
"input, expected",
[
((2012, 'Jan', 18), 'Wed, 18 Jan 2012 00:00:00 -0000'),
((2020, 6, 22), 'Mon, 22 Jun 2020 00:00:00 -0000'),
],
)
def test_date_to_rfc822(input,expected):
assert(date_to_rfc822(input[0], input[1], input[2]) == expected)
person1 = {
"_id": "scopatz",
"aka": [
"Scopatz",
"Scopatz, A",
"Scopatz, A.",
"Scopatz, A M",
"Anthony Michael Scopatz",
],
"name": "Anthony Scopatz",
"position": "Professor"
}
person2 = {
"_id": "abc",
"aka": [
"A. BC",
"BC, A",
"Anthony BC",
],
"name": "Anthony Bill Chris",
"position": "Professor"
}
person3 = {
"_id": "jdoe",
"aka": [
"A. BC",
"BC, A",
"Anthony BC",
],
"name": "John Doe",
}
people = [person1, person2, person3]
@pytest.mark.parametrize(
"input, expected",
[
((people, ['name', 'Doe']), [person3]),
((people, ['name', 'Jerry']), []),
((people, ['position', 'Prof']), [person1, person2]),
((people, ['position', 'Prof', 'name', 'Chris']), [person2]),
],
)
def test_key_value_pair_filter(input, expected):
assert(key_value_pair_filter(input[0], input[1]) == expected)
@pytest.mark.parametrize(
"input, expected",
[
(([person3], None), "jdoe \n"),
(([], None), ''),
(([person1, person2], ['position']), "scopatz position: Professor \nabc position: Professor \n"),
(([person2], ['position']), "abc position: Professor \n"),
],
)
def test_collection_str(input, expected):
assert(collection_str(input[0], input[1]) == expected)
@pytest.mark.parametrize(
"input, expected",
[
((people, ['name', 'Doe'], None), "jdoe \n"),
((people, ['name', 'Jerry'], None), ""),
((people, ['position', 'Prof', 'name', 'Chris'], None), "abc \n"),
((people, ['position', 'prof', 'name', 'Chris'], ['position']), "abc position: Professor \n"),
],
)
def test_search_collection(input, expected):
assert(search_collection(input[0], input[1], input[2]) == expected)
appointed_people = [
{'name': 'Kurt Godel', '_id': 'kgodel',
'appointments': {
"A": {"begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.5, 'type': 'gra'},
"B": {'_id': 'B', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.25, 'type': 'gra'},
"C": {'_id': 'C', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.25, 'type': 'gra'}},
"employment": [
{'group': 'permutation', 'begin_date': '2014-06-01', 'end_date': '2015-06-01', 'status': 'phd'},
{'group': 'matrix', 'begin_year': '2020', 'end_day': '5', 'end_month': '12', 'end_year': '2020'},
{'group': 'permutation', 'begin_day': 4, 'begin_month': 9, 'begin_year': 2012, 'end_day': 5,
'end_month': 9, 'end_year': 2012, 'permanent': 'true'}
]},
{'name': 'MC Escher', '_id': 'mcescher',
'appointments':{
"A": {"begin_date": '2019-10-01', "end_date": '2019-10-31', 'grant': 'grant1', 'loading': 1.0, 'type': 'ss'},
"B": {"begin_date": '2019-11-01', "end_date": '2019-11-30', 'grant': 'grant2', 'loading': 0.5, 'type': 'ss'},
"C": {"begin_date": '2019-11-01', "end_date": '2019-11-30', 'grant': 'grant3', 'loading': 0.5, 'type': 'ss'},},
'employment': [
{'group': 'transformation', 'begin_date': '2018-07-24', 'end_date': dt.date(2020, 8, 1), 'status': 'postdoc'},
{'group': 'abstract', 'begin_year': 2010, 'end_day': 5, 'end_month': 12, 'end_year': 2020},
{'group': 'abstract', 'begin_date': '2012-06-30', 'end_date': '2012-09-05'}
]},
{'name': 'Johann Sebastian Bach', '_id': 'jsbach',
'appointments': {
"A": {"begin_date": '2019-12-01', "end_date": '2020-12-15', 'grant': 'grant1', 'loading': 0.9, 'type': 'pd'},
"B": {"begin_date": '2019-12-16', "end_date": '2020-12-31', 'grant': 'grant2', 'loading': 0.9, 'type': 'pd'},
"C": {"begin_date": '2019-12-01', "end_date": '2020-12-31', 'grant': 'grant3', 'loading': 0.1, 'type': 'pd'}},
'employment': [
{'group': 'bg', 'begin_date': '2019-02-03'}
]},
{'name': 'Ludwig Wittgenstein', '_id': 'lwittgenstein',
'appointments': {
"A": {'begin_date': '2019-12-10', 'end_date': '2019-12-20', 'grant': 'grant2', 'loading': 1.0, 'type': 'ss'}}},
{'name': 'Karl Popper', '_id': 'kpopper',
'appointments': {
"A": {'begin_date': '2019-12-25', 'end_date': '2019-12-31', 'grant': 'grant2', 'loading': 1.0, 'type': 'ss'}}},
{'name': 'GEM Anscombe', '_id': 'ganscombe', 'appointments': {}},
{'name': 'Sophie Germain', '_id': 'sgermain',
'appointments': {
"A": {'begin_date': '2019-09-02', 'end_date': '2019-09-06', 'grant': 'grant4', 'loading': 1.0, 'type': 'ss'}}},
]
@pytest.mark.parametrize(
"people,key,value,start,end,expected",
[(appointed_people, 'grant', 'grant1', None, None,
[{'person': 'kgodel', '_id': 'A', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.5, 'type': 'gra'},
{'person': 'kgodel', '_id': 'B', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.25, 'type': 'gra'},
{'person': 'kgodel', '_id': 'C', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.25, 'type': 'gra'},
{'person': 'mcescher', '_id': 'A', "begin_date": '2019-10-01', "end_date": '2019-10-31', 'grant': 'grant1', 'loading': 1.0, 'type': 'ss'},
{'person': 'jsbach', '_id': 'A', "begin_date": '2019-12-01', "end_date": '2020-12-15', 'grant': 'grant1', 'loading': 0.9, 'type': 'pd'},
]),
(appointed_people, None, None, '2019-09-01', '2019-09-30',
[{'person': 'kgodel', '_id': 'A', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.5, 'type': 'gra'},
{'person': 'kgodel', '_id': 'B', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.25, 'type': 'gra'},
{'person': 'kgodel', '_id': 'C', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.25, 'type': 'gra'},
{'person': 'sgermain', '_id': 'A', 'begin_date': '2019-09-02', 'end_date': '2019-09-06', 'grant': 'grant4', 'loading': 1.0, 'type': 'ss'} ,
]),
(appointed_people, ['loading', 'type'], [1.0, 'ss'], '2019-12-15', '2019-12-25',
[{'person': 'lwittgenstein', '_id': 'A', 'begin_date': '2019-12-10', 'end_date': '2019-12-20', 'grant': 'grant2', 'loading': 1.0, 'type': 'ss'},
{'person': 'kpopper', '_id': 'A', 'begin_date': '2019-12-25', 'end_date': '2019-12-31', 'grant': 'grant2', 'loading': 1.0, 'type': 'ss'}
]),
(appointed_people, ['loading', 'type', 'grant'], [0.9, 'pd', 'grant3'], None, None, []),
(appointed_people, None, None, None, None,
[{'person': 'kgodel', '_id': 'A', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.5, 'type': 'gra'},
{'person': 'kgodel', '_id': 'B', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.25, 'type': 'gra'},
{'person': 'kgodel', '_id': 'C', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.25, 'type': 'gra'},
{'person': 'mcescher', '_id': 'A', "begin_date": '2019-10-01', "end_date": '2019-10-31', 'grant': 'grant1', 'loading': 1.0, 'type': 'ss'},
{'person': 'mcescher', '_id' :'B', "begin_date": '2019-11-01', "end_date": '2019-11-30', 'grant': 'grant2', 'loading': 0.5, 'type': 'ss'},
{'person': 'mcescher', '_id': 'C', "begin_date": '2019-11-01', "end_date": '2019-11-30', 'grant': 'grant3', 'loading': 0.5, 'type': 'ss'},
{'person': 'jsbach', '_id': 'A', "begin_date": '2019-12-01', "end_date": '2020-12-15', 'grant': 'grant1', 'loading': 0.9, 'type': 'pd'},
{'person': 'jsbach', '_id': 'B', "begin_date": '2019-12-16', "end_date": '2020-12-31', 'grant': 'grant2', 'loading': 0.9, 'type': 'pd'},
{'person': 'jsbach', '_id': 'C', "begin_date": '2019-12-01', "end_date": '2020-12-31', 'grant': 'grant3', 'loading': 0.1, 'type': 'pd'},
{'person': 'lwittgenstein', '_id': 'A', 'begin_date': '2019-12-10', 'end_date': '2019-12-20', 'grant': 'grant2', 'loading': 1.0, 'type': 'ss'},
{'person': 'kpopper', '_id': 'A', 'begin_date': '2019-12-25', 'end_date': '2019-12-31', 'grant': 'grant2', 'loading': 1.0, 'type': 'ss'},
{'person': 'sgermain', '_id': 'A', 'begin_date': '2019-09-02', 'end_date': '2019-09-06', 'grant': 'grant4', 'loading': 1.0, 'type': 'ss'},
]),
(appointed_people, 'type', 'ss', '2019-10-21', '2019-09-01', 'begin date is after end date'),
(appointed_people, ['type', 'loading'], None, None, None, 'number of filter keys and filter values do not match'),
(appointed_people, 'type', 'pd', '2019-12-10', None, 'please enter both begin date and end date or neither'),
([{'name': 'Magical Person', '_id': 'mperson', 'appointments': {"A": {'begin_date': '2019-09-01', 'end_date': '2019-09-05',
'loading': 1.0, 'grant': 'grant1', 'type': 'imaginary'}}}], None, None,
None, None, 'invalid type imaginary for appointment A of mperson'
),
]
)
def test_collect_appts(people, key, value, start, end, expected):
try:
actual = collect_appts(people, filter_key=key, filter_value=value, begin_date=start, end_date=end)
assert actual == expected
except ValueError:
with pytest.raises(ValueError) as excinfo:
actual = collect_appts(people, filter_key=key, filter_value=value, begin_date=start, end_date=end)
assert str(excinfo.value) == expected
except RuntimeError:
with pytest.raises(RuntimeError) as excinfo:
actual = collect_appts(people, filter_key=key, filter_value=value, begin_date=start, end_date=end)
assert str(excinfo.value) == expected
appts = collect_appts(appointed_people)
grant1 = {'_id': 'grant1', 'alias': 'grant_one', 'budget': [
{'begin_date': '2019-09-01', 'end_date': '2019-09-03', 'student_months': 1, 'postdoc_months': 0.5, 'ss_months': 0},
{'begin_date': '2019-09-04', 'end_date': '2019-09-07', 'student_months': 1.5, 'postdoc_months': 0, 'ss_months': 0},
{'begin_date': '2019-09-08', 'end_date': '2019-09-10', 'student_months': 2, 'postdoc_months': 1.5, 'ss_months': 0},
]}
grant2 = {'_id': 'grant2', 'alias': 'grant_two', 'budget': [
{'begin_date': '2019-09-01', 'end_date': '2019-12-31', 'student_months': 4, 'postdoc_months': 2.5, 'ss_months': 1}
]}
grant3 = {'_id': 'grant3', 'budget': [
{'begin_date': '2019-09-01', 'end_date': '2019-10-31', 'student_months': 0, 'postdoc_months': 1, 'ss_months': 2},
{'begin_date': '2019-11-01', 'end_date': '2019-12-31', 'student_months': 2, 'postdoc_months': 0.5, 'ss_months': 0}
]}
grant4 = {'_id': 'grant4', 'alias': 'grant_four', 'budget': [
{'begin_date': '2019-09-01', 'end_date': '2019-09-07', 'student_months': 1, 'postdoc_months': 1, 'ss_months': 1}]}
@pytest.mark.parametrize(
"grant,appointments,start,end,expected",
[
(grant1, appts, None, None,
{dt.date(2019, 9, 1): {'postdoc_days': 15.25, 'ss_days': 0.0, 'student_days': 29.5},
dt.date(2019, 9, 2): {'postdoc_days': 15.25, 'ss_days': 0.0, 'student_days': 28.5},
dt.date(2019, 9, 3): {'postdoc_days': 15.25, 'ss_days': 0.0, 'student_days': 27.5},
dt.date(2019, 9, 4): {'postdoc_days': 15.25, 'ss_days': 0.0, 'student_days': 72.25},
dt.date(2019, 9, 5): {'postdoc_days': 15.25, 'ss_days': 0.0, 'student_days': 71.25},
dt.date(2019, 9, 6): {'postdoc_days': 15.25, 'ss_days': 0.0, 'student_days': 70.25},
dt.date(2019, 9, 7): {'postdoc_days': 15.25, 'ss_days': 0.0, 'student_days': 69.25},
dt.date(2019, 9, 8): {'postdoc_days': 61.0, 'ss_days': 0.0, 'student_days': 129.25},
dt.date(2019, 9, 9): {'postdoc_days': 61.0, 'ss_days': 0.0, 'student_days': 128.25},
dt.date(2019, 9, 10): {'postdoc_days': 61.0, 'ss_days': 0.0, 'student_days': 127.25}}
),
(grant2, appts, '2019-12-15', '2019-12-31',
{dt.date(2019, 12, 15): {'postdoc_days': 76.25, 'ss_days': 9.5, 'student_days': 122.0},
dt.date(2019, 12, 16): {'postdoc_days': 75.35, 'ss_days': 8.5, 'student_days': 122.0},
dt.date(2019, 12, 17): {'postdoc_days': 74.45, 'ss_days': 7.5, 'student_days': 122.0},
dt.date(2019, 12, 18): {'postdoc_days': 73.55, 'ss_days': 6.5, 'student_days': 122.0},
dt.date(2019, 12, 19): {'postdoc_days': 72.65, 'ss_days': 5.5, 'student_days': 122.0},
dt.date(2019, 12, 20): {'postdoc_days': 71.75, 'ss_days': 4.5, 'student_days': 122.0},
dt.date(2019, 12, 21): {'postdoc_days': 70.85, 'ss_days': 4.5, 'student_days': 122.0},
dt.date(2019, 12, 22): {'postdoc_days': 69.95, 'ss_days': 4.5, 'student_days': 122.0},
dt.date(2019, 12, 23): {'postdoc_days': 69.05, 'ss_days': 4.5, 'student_days': 122.0},
dt.date(2019, 12, 24): {'postdoc_days': 68.15, 'ss_days': 4.5, 'student_days': 122.0},
dt.date(2019, 12, 25): {'postdoc_days': 67.25, 'ss_days': 3.5, 'student_days': 122.0},
dt.date(2019, 12, 26): {'postdoc_days': 66.35, 'ss_days': 2.5, 'student_days': 122.0},
dt.date(2019, 12, 27): {'postdoc_days': 65.45, 'ss_days': 1.5, 'student_days': 122.0},
dt.date(2019, 12, 28): {'postdoc_days': 64.55, 'ss_days': 0.5, 'student_days': 122.0},
dt.date(2019, 12, 29): {'postdoc_days': 63.65, 'ss_days': -0.5, 'student_days': 122.0},
dt.date(2019, 12, 30): {'postdoc_days': 62.75, 'ss_days': -1.5, 'student_days': 122.0},
dt.date(2019, 12, 31): {'postdoc_days': 61.85, 'ss_days': -2.5, 'student_days': 122.0}}
),
(grant3, appts, '2019-12-31', '2019-12-31',
{dt.date(2019, 12, 31): {'postdoc_days': 42.65, 'ss_days': 46.0, 'student_days': 61.0}}
),
(grant4, appts, None, None,
{dt.date(2019, 9, 1): {'postdoc_days': 30.5, 'ss_days': 30.5, 'student_days': 30.5},
dt.date(2019, 9, 2): {'postdoc_days': 30.5, 'ss_days': 29.5, 'student_days': 30.5},
dt.date(2019, 9, 3): {'postdoc_days': 30.5, 'ss_days': 28.5, 'student_days': 30.5},
dt.date(2019, 9, 4): {'postdoc_days': 30.5, 'ss_days': 27.5, 'student_days': 30.5},
dt.date(2019, 9, 5): {'postdoc_days': 30.5, 'ss_days': 26.5, 'student_days': 30.5},
dt.date(2019, 9, 6): {'postdoc_days': 30.5, 'ss_days': 25.5, 'student_days': 30.5},
dt.date(2019, 9, 7): {'postdoc_days': 30.5, 'ss_days': 25.5, 'student_days': 30.5}}
),
({'_id': 'magical_grant', 'alias': 'very_magical_grant'}, appts,
'2012-12-23', '2013-01-24', 'magical_grant has no specified budget'
),
(grant4, appointed_people[0].get('appointments'), None, None,
{dt.date(2019, 9, 1): {'postdoc_days': 30.5, 'ss_days': 30.5, 'student_days': 30.5},
dt.date(2019, 9, 2): {'postdoc_days': 30.5, 'ss_days': 30.5, 'student_days': 30.5},
dt.date(2019, 9, 3): {'postdoc_days': 30.5, 'ss_days': 30.5, 'student_days': 30.5},
dt.date(2019, 9, 4): {'postdoc_days': 30.5, 'ss_days': 30.5, 'student_days': 30.5},
dt.date(2019, 9, 5): {'postdoc_days': 30.5, 'ss_days': 30.5, 'student_days': 30.5},
dt.date(2019, 9, 6): {'postdoc_days': 30.5, 'ss_days': 30.5, 'student_days': 30.5},
dt.date(2019, 9, 7): {'postdoc_days': 30.5, 'ss_days': 30.5, 'student_days': 30.5}}
)
]
)
def test_grant_burn(grant, appointments, start, end, expected):
try:
actual = grant_burn(grant, appointments, begin_date=start, end_date=end)
assert actual == expected
except ValueError:
with pytest.raises(ValueError) as excinfo:
actual = grant_burn(grant, appointments, begin_date=start, end_date=end)
assert str(excinfo.value) == expected
meeting1 = {'_id': 'grp2020-06-15', 'journal_club': {'doi': 'TBD'}}
meeting2 = {'_id': 'grp2020-06-22', 'presentation': {'link': 'TBD'}}
meeting3 = {'_id': 'grp2020-06-29', 'presentation': {'link': '2002ak_grmtg_presnetation', 'title': 'tbd'}}
@pytest.mark.parametrize(
"meeting,date,expected",
[
(meeting1, dt.date(2020, 8, 15), 'grp2020-06-15 does not have a journal club doi'),
(meeting1, dt.date(2020, 5, 15), None),
(meeting2, dt.date(2020, 8, 15), 'grp2020-06-22 does not have a presentation link'),
(meeting2, dt.date(2020, 5, 15), None),
(meeting3, dt.date(2020, 8, 15), 'grp2020-06-29 does not have a presentation title'),
(meeting3, dt.date(2020, 5, 15), None),
]
)
def test_validate_meeting(meeting, date, expected):
try:
actual = validate_meeting(meeting, date)
assert actual == expected
except ValueError:
with pytest.raises(ValueError) as excinfo:
actual = validate_meeting(meeting, date)
assert str(excinfo.value) == expected
@pytest.mark.parametrize(
"person,grpname,expected",
[
(appointed_people[0], 'permutation',
[{'_id': 'kgodel', 'begin_date': dt.date(2014, 6, 1),
'end_date': dt.date(2015, 6, 1), 'status': 'phd', 'permanent': None},
{'_id': 'kgodel', 'begin_date': dt.date(2012, 9, 4),
'end_date': dt.date(2012, 9, 5), 'status': None, 'permanent': 'true'}]
),
(appointed_people[1], 'transformation',
[{'_id': 'mcescher', 'begin_date': dt.date(2018, 7, 24),
'end_date': dt.date(2020, 8, 1), 'status': 'postdoc', 'permanent': None}]
),
(appointed_people[2], 'bg', "WARNING: jsbach has no end date in employment for bg starting 2019-02-03"
),
(appointed_people[3], 'abstract', [])
]
)
def test_group_member_employment_start_end(person, grpname, expected):
try:
actual = group_member_employment_start_end(person, grpname)
assert actual == expected
except:
with pytest.raises(RuntimeError) as excinfo:
actual = group_member_employment_start_end(person, grpname)
assert str(excinfo.value) == expected
| 39.197342 | 150 | 0.481753 | import pytest
import datetime as dt
from regolith.tools import (
filter_publications,
fuzzy_retrieval,
fragment_retrieval,
number_suffix,
latex_safe,
update_schemas,
merge_collections,
group,
is_fully_appointed,
group_member_ids,
group_member_employment_start_end,
month_and_year,
awards_grants_honors,
get_id_from_name,
date_to_rfc822,
key_value_pair_filter,
collection_str,
search_collection,
collect_appts,
grant_burn,
validate_meeting
)
def test_author_publications():
citations = [{"author": ["CJ", "SJLB"]}, {"editor": "SJLB"}]
filter_publications(citations, {"SJLB"})
def test_fuzzy_retrieval():
person = {
"_id": "scopatz",
"aka": [
"Scopatz",
"Scopatz, A",
"Scopatz, A.",
"Scopatz, A M",
"Anthony Michael Scopatz",
],
"name": "Anthony Scopatz",
}
assert fuzzy_retrieval([person], ["aka", "name", "_id"],
"scopatz") == person
assert fuzzy_retrieval([person], ["aka", "name", "_id"],
"scopatz, a") is None
assert (
fuzzy_retrieval(
[person], ["aka", "name", "_id"], "scopatz, a",
case_sensitive=False,
)
== person
)
@pytest.mark.parametrize(
"input,expected",
[
(0, "th"),
(1, "st"),
(2, "nd"),
(3, "rd"),
(4, "th"),
(10, "th"),
(13, "th"),
(33, "rd"),
(None, ""),
("0", ""),
],
)
def test_number_suffix(input, expected):
assert number_suffix(input) == expected
@pytest.mark.parametrize(
"input,expected",
[
(
(
[
{
"_id": "proposal1",
"title": "European swallow",
"author": "king arthur",
}
],
[{"_id": "grant1", "linked_to": "proposal1",
"amount": "100 mph"}],
),
[
{
"_id": "grant1",
"title": "European swallow",
"author": "king arthur",
"linked_to": "proposal1",
"amount": "100 mph",
}
],
),
(
(
[
{
"_id": "proposal1",
"title": "European swallow",
"author": "king arthur",
},
{
"_id": "proposal2",
"title": "African swallow",
"author": "king arthur",
},
],
[{"_id": "grant1", "linked_to": "proposal1",
"amount": "100 mph"}],
),
[
{
"_id": "grant1",
"title": "European swallow",
"author": "king arthur",
"linked_to": "proposal1",
"amount": "100 mph",
}
],
),
(
(
[
{
"_id": "proposal1",
"title": "European swallow",
"author": "king arthur",
"amount": "50 mph",
},
{
"_id": "proposal2",
"title": "African swallow",
"author": "king arthur",
},
],
[{"_id": "grant1", "linked_to": "proposal1",
"amount": "100 mph"}],
),
[
{
"_id": "grant1",
"title": "European swallow",
"author": "king arthur",
"linked_to": "proposal1",
"amount": "100 mph",
}
],
),
],
)
def test_merge_collections(input, expected):
a = input[0]
b = input[1]
target_id = "linked_to"
assert merge_collections(a, b, target_id) == expected
@pytest.mark.parametrize(
"input,expected,kwargs",
[
("$hi", r"\$hi", {}),
(
r"Website: https://github.com/CJ-Wright/"
r"Masters_Thesis/raw/master/thesis.pdf hi",
r"Website: \url{https://github.com/CJ-Wright/"
r"Masters_Thesis/raw/master/thesis.pdf} hi",
{},
),
(
r"Website: https://github.com/CJ-Wright/"
r"Masters_Thesis/raw/master/thesis.pdf hi",
r"Website: \href{https://github.com/CJ-Wright/"
r"Masters_Thesis/raw/master/thesis.pdf} hi",
{"wrapper": "href"},
),
(
r"Website: https://github.com/CJ-Wright/"
r"Masters_Thesis/raw/master/thesis.pdf hi",
r"Website: https://github.com/CJ-Wright/"
r"Masters\_Thesis/raw/master/thesis.pdf hi",
{"url_check": False},
),
],
)
def test_latex_safe(input, expected, kwargs):
output = latex_safe(input, **kwargs)
assert output == expected
DEFAULT_SCHEMA = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"day": {
"description": "Expense day",
"required": True,
"type": "integer",
},
},
},
},
},
}
USER_SCHEMA0 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {"type": "dict",
"schema": {"day": {"required": False, }, }, },
},
},
}
EXPECTED_SCHEMA0 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"day": {
"description": "Expense day",
"required": False,
"type": "integer",
},
},
},
},
},
}
USER_SCHEMA1 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {"type": "dict",
"schema": {"day": {"type": "string", }, }, },
},
},
}
EXPECTED_SCHEMA1 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"day": {
"description": "Expense day",
"required": True,
"type": "string",
},
},
},
},
},
}
USER_SCHEMA2 = {
"expenses": {
"begin_day": {
"description": "The first day of expense",
"required": True,
"type": "string",
}
},
}
EXPECTED_SCHEMA2 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"day": {
"description": "Expense day",
"required": True,
"type": "integer",
},
},
},
},
"begin_day": {
"description": "The first day of expense",
"required": True,
"type": "string",
},
},
}
USER_SCHEMA3 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {"day": {"description": "The date on the receipt"}, },
},
},
},
}
EXPECTED_SCHEMA3 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"day": {
"description": "The date on the receipt",
"required": True,
"type": "integer",
},
},
},
},
},
}
USER_SCHEMA4 = {
"expenses": {
"itemized_expenses": {
"schema": {
"schema": {
"prepaid_expense": {
"description": "Expense paid by the direct billing",
"required": True,
"type": "float",
},
},
},
},
},
}
EXPECTED_SCHEMA4 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"day": {
"description": "Expense day",
"required": True,
"type": "integer",
},
"prepaid_expense": {
"description": "Expense paid by the direct billing",
"required": True,
"type": "float",
},
},
},
},
},
}
USER_SCHEMA5 = {}
EXPECTED_SCHEMA5 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"day": {
"description": "Expense day",
"required": True,
"type": "integer",
},
},
},
},
},
}
USER_SCHEMA6 = {"expenses": {}}
EXPECTED_SCHEMA6 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"day": {
"description": "Expense day",
"required": True,
"type": "integer",
},
},
},
},
},
}
@pytest.mark.parametrize(
"default_schema, user_schema, expected_schema",
[
(DEFAULT_SCHEMA, USER_SCHEMA0, EXPECTED_SCHEMA0),
(DEFAULT_SCHEMA, USER_SCHEMA1, EXPECTED_SCHEMA1),
(DEFAULT_SCHEMA, USER_SCHEMA2, EXPECTED_SCHEMA2),
(DEFAULT_SCHEMA, USER_SCHEMA3, EXPECTED_SCHEMA3),
(DEFAULT_SCHEMA, USER_SCHEMA4, EXPECTED_SCHEMA4),
(DEFAULT_SCHEMA, USER_SCHEMA5, EXPECTED_SCHEMA5),
(DEFAULT_SCHEMA, USER_SCHEMA6, EXPECTED_SCHEMA6),
],
)
def test_update_schemas(default_schema, user_schema, expected_schema):
updated_schema = update_schemas(default_schema, user_schema)
assert updated_schema == expected_schema
def test_group():
doc0 = {"k0": "v00", "k1": "v01"}
doc1 = {"k0": "v10", "k1": "v11"}
doc2 = {"k1": "v21"}
doc3 = {"k0": "v00", "k1": "v31"}
db = (doc for doc in (doc0, doc1, doc2, doc3))
by = "k0"
expect = {"v00": [doc0, doc3], "v10": [doc1]}
assert group(db, by) == expect
ppl_coll = [
{
"_id": "m1",
"name": "member1",
"education": [{
"group": "bg",
"institution": "columbiau",
"degree": "PhD",
"department": "apam",
"begin_year": 2016
}],
"employment": [{
"begin_year": 2020,
"begin_month": 1,
"organization": "columbiau",
"position": "Undergraduate Researcher",
"advisor": "sbillinge",
"status": "undergrad"
}]
},
{
"_id": "nm1",
"name": "non-member1",
"education": [{
"institution": "columbiau",
"degree": "PhD",
"department": "apam",
"begin_year": 2016
}],
"employment": [{
"begin_year": 2020,
"begin_month": 1,
"organization": "columbiau",
"position": "Undergraduate Researcher",
"advisor": "sbillinge",
"status": "undergrad"
}]
},
{
"_id": "m2",
"name": "member2",
"education": [{
"institution": "columbiau",
"degree": "PhD",
"department": "apam",
"begin_year": 2016
}],
"employment": [{
"begin_year": 2020,
"begin_month": 1,
"group": "bg",
"organization": "columbiau",
"position": "Undergraduate Researcher",
"advisor": "sbillinge",
"status": "undergrad"
}]
},
]
@pytest.mark.parametrize(
"input,expected",
[
(ppl_coll, set(["m1", "m2"])),
],
)
def test_group_member_ids(input, expected):
actual = group_member_ids(input, "bg")
assert actual == expected
p1 = {
"_id": "scopatz",
"aka": [
"Scopatz",
"Scopatz, A",
"Scopatz, A.",
"Scopatz, A M",
"Anthony Michael Scopatz",
],
"name": "Anthony Scopatz",
}
p2 = {
"_id": "abc",
"aka": [
"A. BC",
"BC, A",
"Anthony BC",
],
"name": "Anthony Bill Chris",
}
@pytest.mark.parametrize(
"input, expected",
[
(([p1, p2], ["aka", "name", "_id"],
"Anth", False),[p1,p2]),
(([p1, p2], ["aka", "name", "_id"],
"scopatz, a", True),[]),
(([p1, p2], ["aka", "name", "_id"],
"scopatz, a", False),[p1]),
(([p1, p2], ["aka", "name", "_id"],
"ill", False),[p2]),
],
)
def test_fragment_retrieval(input, expected):
assert(fragment_retrieval(input[0],input[1],input[2],case_sensitive = input[3]) == expected)
@pytest.mark.parametrize(
"input, expected",
[
((None, None), "present"),
((None, 2002), "2002"),
((5,2002), "May 2002"),
],
)
def test_month_and_year(input,expected):
assert(month_and_year(input[0],input[1]) == expected)
@pytest.mark.parametrize(
"appts,start,end,expected",
[
({"name": "Kurt Godel",
"_id": "kgodel",
"appointments": {
"A": {"begin_year": 2017, "begin_month": 6, "begin_day": 1, "end_year": 2017, "end_month": 6, "end_day": 30,
"grant": "grant1", "loading": 1.0, "type": "pd",}}},
"2017-06-01", "2017-07-01", False),
({"name": "MC Escher",
"_id": "mcescher",
"appointments": {
"A": {"begin_date": '2017-06-01', "end_date": '2017-06-30', "grant": "grant1", "loading": 0.5, "type": "pd",},
"B": {"begin_date": '2017-06-01', "end_date": '2017-06-30', "grant": "grant2", "loading": 0.5, "type": "pd",
}}},"2017-06-01", "2017-06-30", True),
({"name": "Johann Sebastian Bach",
"_id": "jsbach",
"appointments":{
"A": {"begin_date": '2017-06-01', "end_date": '2017-06-30', "grant": "grant1", "loading": 0.5, "type": "pd",},
"B": {"begin_date": '2017-06-02', "end_date": '2017-06-29', "grant": "grant2", "loading": 0.5, "type": "pd",
}}}, "2017-06-01", "2017-06-30", False),
({"name": "Evariste Galois",
"_id": "egalois",
"appointments": {
"A": {"begin_date": '2017-06-01', "end_date": '2017-06-15', "grant": "grant1", "loading": 1.0, "type": "pd",},
"B": {"begin_date": '2017-06-16', "end_date": '2017-06-30', "grant": "grant2", "loading": 1.0, "type": "pd",
}}},"2017-06-01", "2017-06-30", True),
({"name": "Ludwig Wittgenstein",
"_id": "lwittgenstein",
"appointments": {
"A": {"begin_date": '2017-06-01', "end_date": '2017-06-15', "grant": "grant1", "loading": 1.0, "type": "pd",},
"B": {"begin_date": '2017-06-17', "end_date": '2017-06-30', "grant": "grant2", "loading": 1.0, "type": "pd",},
"C": {"begin_date": '2017-07-01', "end_date": '2017-07-30', "grant": "grant3", "loading": 1.0, "type": "pd",
}}}, "2017-06-01", "2017-06-30", False),
({"name": "Buckminster Fuller",
"_id": "bfuller",
"appointments":{
"A": {"begin_date": '2017-06-01', "end_date": '2017-06-30', "grant": "grant1", "loading": 1.0, "type": "pd",},
"B": {"begin_date": '2017-06-17', "end_date": '2017-06-30', "grant": "grant2", "loading": 1.0, "type": "pd",
}}}, "2017-06-01", "2017-06-30", False),
({"name": "Lorem Ipsum",
"_id": "lipsum",
"appointments":{
"A": {"begin_date": '2017-06-01', "end_date": '2017-06-30', "grant": "grant1", "loading": 1.0,"type": "pd",},
"B": {"begin_date": '2017-06-17', "end_date": '2017-06-30', "grant": "grant2", "loading": 1.0, "type": "pd",}
}}, "2017-06-01", "2017-06-30", False),
],
)
def test_is_fully_appointed(appts, start, end, expected):
actual = is_fully_appointed(appts, start, end)
assert actual == expected
@pytest.mark.parametrize(
"input, expected",
[
({'funding':[
{"name": "Omega Laser User's Group Travel Award",
"value": 1100,
"year": 2013},
{"name": "NIF User's Group Travel Award",
"value": 1150,
"year": 2013}]},
[{'description': "Omega Laser User's Group Travel Award (\\$1,100)",
'year': 2013,
'_key': 2013.0},
{'description':"NIF User's Group Travel Award (\\$1,150)",
'year': 2013,
'_key': 2013.0}]),
({'funding':[
{"name": "Omega Laser User's Group Travel Award",
"value": 1100,
"year": 2013}],
"service":[{"name": "International Steering Committee", "role": "chair",
"type": "profession", "year": 2020,
"month": 3, "notes": ["something"]}]},
[{"description":"International Steering Committee",
"year":2020,
"_key":2020.03},
{'description': "Omega Laser User's Group Travel Award (\\$1,100)",
'year': 2013,
'_key': 2013.0}]
)
],
)
def test_get_id_from_name(input,expected):
assert(awards_grants_honors(input) == expected)
@pytest.mark.parametrize(
"input, expected",
[
(([{'_id':'afriend','aka':['AB Friend','Tony Friend'], 'name': 'Anthony B Friend'}], 'Simon'), None),
(([{'_id':'afriend','aka':['AB Friend','Tony Friend'], 'name': 'Anthony B Friend'}], 'Anthony B Friend'),
'afriend'),
(([{'_id':'afriend','aka':['AB Friend','Tony Friend'], 'name': 'Anthony B Friend'},
{'_id':'aeinstein','aka':['Einstein'], 'name': 'Albert Einstein'}],
'Albert Einstein'),
'aeinstein')
],
)
def test_get_id_from_name(input,expected):
assert(get_id_from_name(input[0],input[1]) == expected)
@pytest.mark.parametrize(
"input, expected",
[
((2012, 'Jan', 18), 'Wed, 18 Jan 2012 00:00:00 -0000'),
((2020, 6, 22), 'Mon, 22 Jun 2020 00:00:00 -0000'),
],
)
def test_date_to_rfc822(input,expected):
assert(date_to_rfc822(input[0], input[1], input[2]) == expected)
person1 = {
"_id": "scopatz",
"aka": [
"Scopatz",
"Scopatz, A",
"Scopatz, A.",
"Scopatz, A M",
"Anthony Michael Scopatz",
],
"name": "Anthony Scopatz",
"position": "Professor"
}
person2 = {
"_id": "abc",
"aka": [
"A. BC",
"BC, A",
"Anthony BC",
],
"name": "Anthony Bill Chris",
"position": "Professor"
}
person3 = {
"_id": "jdoe",
"aka": [
"A. BC",
"BC, A",
"Anthony BC",
],
"name": "John Doe",
}
people = [person1, person2, person3]
@pytest.mark.parametrize(
"input, expected",
[
((people, ['name', 'Doe']), [person3]),
((people, ['name', 'Jerry']), []),
((people, ['position', 'Prof']), [person1, person2]),
((people, ['position', 'Prof', 'name', 'Chris']), [person2]),
],
)
def test_key_value_pair_filter(input, expected):
assert(key_value_pair_filter(input[0], input[1]) == expected)
@pytest.mark.parametrize(
"input, expected",
[
(([person3], None), "jdoe \n"),
(([], None), ''),
(([person1, person2], ['position']), "scopatz position: Professor \nabc position: Professor \n"),
(([person2], ['position']), "abc position: Professor \n"),
],
)
def test_collection_str(input, expected):
assert(collection_str(input[0], input[1]) == expected)
@pytest.mark.parametrize(
"input, expected",
[
((people, ['name', 'Doe'], None), "jdoe \n"),
((people, ['name', 'Jerry'], None), ""),
((people, ['position', 'Prof', 'name', 'Chris'], None), "abc \n"),
((people, ['position', 'prof', 'name', 'Chris'], ['position']), "abc position: Professor \n"),
],
)
def test_search_collection(input, expected):
assert(search_collection(input[0], input[1], input[2]) == expected)
appointed_people = [
{'name': 'Kurt Godel', '_id': 'kgodel',
'appointments': {
"A": {"begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.5, 'type': 'gra'},
"B": {'_id': 'B', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.25, 'type': 'gra'},
"C": {'_id': 'C', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.25, 'type': 'gra'}},
"employment": [
{'group': 'permutation', 'begin_date': '2014-06-01', 'end_date': '2015-06-01', 'status': 'phd'},
{'group': 'matrix', 'begin_year': '2020', 'end_day': '5', 'end_month': '12', 'end_year': '2020'},
{'group': 'permutation', 'begin_day': 4, 'begin_month': 9, 'begin_year': 2012, 'end_day': 5,
'end_month': 9, 'end_year': 2012, 'permanent': 'true'}
]},
{'name': 'MC Escher', '_id': 'mcescher',
'appointments':{
"A": {"begin_date": '2019-10-01', "end_date": '2019-10-31', 'grant': 'grant1', 'loading': 1.0, 'type': 'ss'},
"B": {"begin_date": '2019-11-01', "end_date": '2019-11-30', 'grant': 'grant2', 'loading': 0.5, 'type': 'ss'},
"C": {"begin_date": '2019-11-01', "end_date": '2019-11-30', 'grant': 'grant3', 'loading': 0.5, 'type': 'ss'},},
'employment': [
{'group': 'transformation', 'begin_date': '2018-07-24', 'end_date': dt.date(2020, 8, 1), 'status': 'postdoc'},
{'group': 'abstract', 'begin_year': 2010, 'end_day': 5, 'end_month': 12, 'end_year': 2020},
{'group': 'abstract', 'begin_date': '2012-06-30', 'end_date': '2012-09-05'}
]},
{'name': 'Johann Sebastian Bach', '_id': 'jsbach',
'appointments': {
"A": {"begin_date": '2019-12-01', "end_date": '2020-12-15', 'grant': 'grant1', 'loading': 0.9, 'type': 'pd'},
"B": {"begin_date": '2019-12-16', "end_date": '2020-12-31', 'grant': 'grant2', 'loading': 0.9, 'type': 'pd'},
"C": {"begin_date": '2019-12-01', "end_date": '2020-12-31', 'grant': 'grant3', 'loading': 0.1, 'type': 'pd'}},
'employment': [
{'group': 'bg', 'begin_date': '2019-02-03'}
]},
{'name': 'Ludwig Wittgenstein', '_id': 'lwittgenstein',
'appointments': {
"A": {'begin_date': '2019-12-10', 'end_date': '2019-12-20', 'grant': 'grant2', 'loading': 1.0, 'type': 'ss'}}},
{'name': 'Karl Popper', '_id': 'kpopper',
'appointments': {
"A": {'begin_date': '2019-12-25', 'end_date': '2019-12-31', 'grant': 'grant2', 'loading': 1.0, 'type': 'ss'}}},
{'name': 'GEM Anscombe', '_id': 'ganscombe', 'appointments': {}},
{'name': 'Sophie Germain', '_id': 'sgermain',
'appointments': {
"A": {'begin_date': '2019-09-02', 'end_date': '2019-09-06', 'grant': 'grant4', 'loading': 1.0, 'type': 'ss'}}},
]
@pytest.mark.parametrize(
"people,key,value,start,end,expected",
[(appointed_people, 'grant', 'grant1', None, None,
[{'person': 'kgodel', '_id': 'A', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.5, 'type': 'gra'},
{'person': 'kgodel', '_id': 'B', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.25, 'type': 'gra'},
{'person': 'kgodel', '_id': 'C', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.25, 'type': 'gra'},
{'person': 'mcescher', '_id': 'A', "begin_date": '2019-10-01', "end_date": '2019-10-31', 'grant': 'grant1', 'loading': 1.0, 'type': 'ss'},
{'person': 'jsbach', '_id': 'A', "begin_date": '2019-12-01', "end_date": '2020-12-15', 'grant': 'grant1', 'loading': 0.9, 'type': 'pd'},
]),
(appointed_people, None, None, '2019-09-01', '2019-09-30',
[{'person': 'kgodel', '_id': 'A', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.5, 'type': 'gra'},
{'person': 'kgodel', '_id': 'B', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.25, 'type': 'gra'},
{'person': 'kgodel', '_id': 'C', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.25, 'type': 'gra'},
{'person': 'sgermain', '_id': 'A', 'begin_date': '2019-09-02', 'end_date': '2019-09-06', 'grant': 'grant4', 'loading': 1.0, 'type': 'ss'} ,
]),
(appointed_people, ['loading', 'type'], [1.0, 'ss'], '2019-12-15', '2019-12-25',
[{'person': 'lwittgenstein', '_id': 'A', 'begin_date': '2019-12-10', 'end_date': '2019-12-20', 'grant': 'grant2', 'loading': 1.0, 'type': 'ss'},
{'person': 'kpopper', '_id': 'A', 'begin_date': '2019-12-25', 'end_date': '2019-12-31', 'grant': 'grant2', 'loading': 1.0, 'type': 'ss'}
]),
(appointed_people, ['loading', 'type', 'grant'], [0.9, 'pd', 'grant3'], None, None, []),
(appointed_people, None, None, None, None,
[{'person': 'kgodel', '_id': 'A', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.5, 'type': 'gra'},
{'person': 'kgodel', '_id': 'B', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.25, 'type': 'gra'},
{'person': 'kgodel', '_id': 'C', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.25, 'type': 'gra'},
{'person': 'mcescher', '_id': 'A', "begin_date": '2019-10-01', "end_date": '2019-10-31', 'grant': 'grant1', 'loading': 1.0, 'type': 'ss'},
{'person': 'mcescher', '_id' :'B', "begin_date": '2019-11-01', "end_date": '2019-11-30', 'grant': 'grant2', 'loading': 0.5, 'type': 'ss'},
{'person': 'mcescher', '_id': 'C', "begin_date": '2019-11-01', "end_date": '2019-11-30', 'grant': 'grant3', 'loading': 0.5, 'type': 'ss'},
{'person': 'jsbach', '_id': 'A', "begin_date": '2019-12-01', "end_date": '2020-12-15', 'grant': 'grant1', 'loading': 0.9, 'type': 'pd'},
{'person': 'jsbach', '_id': 'B', "begin_date": '2019-12-16', "end_date": '2020-12-31', 'grant': 'grant2', 'loading': 0.9, 'type': 'pd'},
{'person': 'jsbach', '_id': 'C', "begin_date": '2019-12-01', "end_date": '2020-12-31', 'grant': 'grant3', 'loading': 0.1, 'type': 'pd'},
{'person': 'lwittgenstein', '_id': 'A', 'begin_date': '2019-12-10', 'end_date': '2019-12-20', 'grant': 'grant2', 'loading': 1.0, 'type': 'ss'},
{'person': 'kpopper', '_id': 'A', 'begin_date': '2019-12-25', 'end_date': '2019-12-31', 'grant': 'grant2', 'loading': 1.0, 'type': 'ss'},
{'person': 'sgermain', '_id': 'A', 'begin_date': '2019-09-02', 'end_date': '2019-09-06', 'grant': 'grant4', 'loading': 1.0, 'type': 'ss'},
]),
(appointed_people, 'type', 'ss', '2019-10-21', '2019-09-01', 'begin date is after end date'),
(appointed_people, ['type', 'loading'], None, None, None, 'number of filter keys and filter values do not match'),
(appointed_people, 'type', 'pd', '2019-12-10', None, 'please enter both begin date and end date or neither'),
([{'name': 'Magical Person', '_id': 'mperson', 'appointments': {"A": {'begin_date': '2019-09-01', 'end_date': '2019-09-05',
'loading': 1.0, 'grant': 'grant1', 'type': 'imaginary'}}}], None, None,
None, None, 'invalid type imaginary for appointment A of mperson'
),
]
)
def test_collect_appts(people, key, value, start, end, expected):
try:
actual = collect_appts(people, filter_key=key, filter_value=value, begin_date=start, end_date=end)
assert actual == expected
except ValueError:
with pytest.raises(ValueError) as excinfo:
actual = collect_appts(people, filter_key=key, filter_value=value, begin_date=start, end_date=end)
assert str(excinfo.value) == expected
except RuntimeError:
with pytest.raises(RuntimeError) as excinfo:
actual = collect_appts(people, filter_key=key, filter_value=value, begin_date=start, end_date=end)
assert str(excinfo.value) == expected
appts = collect_appts(appointed_people)
grant1 = {'_id': 'grant1', 'alias': 'grant_one', 'budget': [
{'begin_date': '2019-09-01', 'end_date': '2019-09-03', 'student_months': 1, 'postdoc_months': 0.5, 'ss_months': 0},
{'begin_date': '2019-09-04', 'end_date': '2019-09-07', 'student_months': 1.5, 'postdoc_months': 0, 'ss_months': 0},
{'begin_date': '2019-09-08', 'end_date': '2019-09-10', 'student_months': 2, 'postdoc_months': 1.5, 'ss_months': 0},
]}
grant2 = {'_id': 'grant2', 'alias': 'grant_two', 'budget': [
{'begin_date': '2019-09-01', 'end_date': '2019-12-31', 'student_months': 4, 'postdoc_months': 2.5, 'ss_months': 1}
]}
grant3 = {'_id': 'grant3', 'budget': [
{'begin_date': '2019-09-01', 'end_date': '2019-10-31', 'student_months': 0, 'postdoc_months': 1, 'ss_months': 2},
{'begin_date': '2019-11-01', 'end_date': '2019-12-31', 'student_months': 2, 'postdoc_months': 0.5, 'ss_months': 0}
]}
grant4 = {'_id': 'grant4', 'alias': 'grant_four', 'budget': [
{'begin_date': '2019-09-01', 'end_date': '2019-09-07', 'student_months': 1, 'postdoc_months': 1, 'ss_months': 1}]}
@pytest.mark.parametrize(
"grant,appointments,start,end,expected",
[
(grant1, appts, None, None,
{dt.date(2019, 9, 1): {'postdoc_days': 15.25, 'ss_days': 0.0, 'student_days': 29.5},
dt.date(2019, 9, 2): {'postdoc_days': 15.25, 'ss_days': 0.0, 'student_days': 28.5},
dt.date(2019, 9, 3): {'postdoc_days': 15.25, 'ss_days': 0.0, 'student_days': 27.5},
dt.date(2019, 9, 4): {'postdoc_days': 15.25, 'ss_days': 0.0, 'student_days': 72.25},
dt.date(2019, 9, 5): {'postdoc_days': 15.25, 'ss_days': 0.0, 'student_days': 71.25},
dt.date(2019, 9, 6): {'postdoc_days': 15.25, 'ss_days': 0.0, 'student_days': 70.25},
dt.date(2019, 9, 7): {'postdoc_days': 15.25, 'ss_days': 0.0, 'student_days': 69.25},
dt.date(2019, 9, 8): {'postdoc_days': 61.0, 'ss_days': 0.0, 'student_days': 129.25},
dt.date(2019, 9, 9): {'postdoc_days': 61.0, 'ss_days': 0.0, 'student_days': 128.25},
dt.date(2019, 9, 10): {'postdoc_days': 61.0, 'ss_days': 0.0, 'student_days': 127.25}}
),
(grant2, appts, '2019-12-15', '2019-12-31',
{dt.date(2019, 12, 15): {'postdoc_days': 76.25, 'ss_days': 9.5, 'student_days': 122.0},
dt.date(2019, 12, 16): {'postdoc_days': 75.35, 'ss_days': 8.5, 'student_days': 122.0},
dt.date(2019, 12, 17): {'postdoc_days': 74.45, 'ss_days': 7.5, 'student_days': 122.0},
dt.date(2019, 12, 18): {'postdoc_days': 73.55, 'ss_days': 6.5, 'student_days': 122.0},
dt.date(2019, 12, 19): {'postdoc_days': 72.65, 'ss_days': 5.5, 'student_days': 122.0},
dt.date(2019, 12, 20): {'postdoc_days': 71.75, 'ss_days': 4.5, 'student_days': 122.0},
dt.date(2019, 12, 21): {'postdoc_days': 70.85, 'ss_days': 4.5, 'student_days': 122.0},
dt.date(2019, 12, 22): {'postdoc_days': 69.95, 'ss_days': 4.5, 'student_days': 122.0},
dt.date(2019, 12, 23): {'postdoc_days': 69.05, 'ss_days': 4.5, 'student_days': 122.0},
dt.date(2019, 12, 24): {'postdoc_days': 68.15, 'ss_days': 4.5, 'student_days': 122.0},
dt.date(2019, 12, 25): {'postdoc_days': 67.25, 'ss_days': 3.5, 'student_days': 122.0},
dt.date(2019, 12, 26): {'postdoc_days': 66.35, 'ss_days': 2.5, 'student_days': 122.0},
dt.date(2019, 12, 27): {'postdoc_days': 65.45, 'ss_days': 1.5, 'student_days': 122.0},
dt.date(2019, 12, 28): {'postdoc_days': 64.55, 'ss_days': 0.5, 'student_days': 122.0},
dt.date(2019, 12, 29): {'postdoc_days': 63.65, 'ss_days': -0.5, 'student_days': 122.0},
dt.date(2019, 12, 30): {'postdoc_days': 62.75, 'ss_days': -1.5, 'student_days': 122.0},
dt.date(2019, 12, 31): {'postdoc_days': 61.85, 'ss_days': -2.5, 'student_days': 122.0}}
),
(grant3, appts, '2019-12-31', '2019-12-31',
{dt.date(2019, 12, 31): {'postdoc_days': 42.65, 'ss_days': 46.0, 'student_days': 61.0}}
),
(grant4, appts, None, None,
{dt.date(2019, 9, 1): {'postdoc_days': 30.5, 'ss_days': 30.5, 'student_days': 30.5},
dt.date(2019, 9, 2): {'postdoc_days': 30.5, 'ss_days': 29.5, 'student_days': 30.5},
dt.date(2019, 9, 3): {'postdoc_days': 30.5, 'ss_days': 28.5, 'student_days': 30.5},
dt.date(2019, 9, 4): {'postdoc_days': 30.5, 'ss_days': 27.5, 'student_days': 30.5},
dt.date(2019, 9, 5): {'postdoc_days': 30.5, 'ss_days': 26.5, 'student_days': 30.5},
dt.date(2019, 9, 6): {'postdoc_days': 30.5, 'ss_days': 25.5, 'student_days': 30.5},
dt.date(2019, 9, 7): {'postdoc_days': 30.5, 'ss_days': 25.5, 'student_days': 30.5}}
),
({'_id': 'magical_grant', 'alias': 'very_magical_grant'}, appts,
'2012-12-23', '2013-01-24', 'magical_grant has no specified budget'
),
(grant4, appointed_people[0].get('appointments'), None, None,
{dt.date(2019, 9, 1): {'postdoc_days': 30.5, 'ss_days': 30.5, 'student_days': 30.5},
dt.date(2019, 9, 2): {'postdoc_days': 30.5, 'ss_days': 30.5, 'student_days': 30.5},
dt.date(2019, 9, 3): {'postdoc_days': 30.5, 'ss_days': 30.5, 'student_days': 30.5},
dt.date(2019, 9, 4): {'postdoc_days': 30.5, 'ss_days': 30.5, 'student_days': 30.5},
dt.date(2019, 9, 5): {'postdoc_days': 30.5, 'ss_days': 30.5, 'student_days': 30.5},
dt.date(2019, 9, 6): {'postdoc_days': 30.5, 'ss_days': 30.5, 'student_days': 30.5},
dt.date(2019, 9, 7): {'postdoc_days': 30.5, 'ss_days': 30.5, 'student_days': 30.5}}
)
]
)
def test_grant_burn(grant, appointments, start, end, expected):
try:
actual = grant_burn(grant, appointments, begin_date=start, end_date=end)
assert actual == expected
except ValueError:
with pytest.raises(ValueError) as excinfo:
actual = grant_burn(grant, appointments, begin_date=start, end_date=end)
assert str(excinfo.value) == expected
meeting1 = {'_id': 'grp2020-06-15', 'journal_club': {'doi': 'TBD'}}
meeting2 = {'_id': 'grp2020-06-22', 'presentation': {'link': 'TBD'}}
meeting3 = {'_id': 'grp2020-06-29', 'presentation': {'link': '2002ak_grmtg_presnetation', 'title': 'tbd'}}
@pytest.mark.parametrize(
"meeting,date,expected",
[
(meeting1, dt.date(2020, 8, 15), 'grp2020-06-15 does not have a journal club doi'),
(meeting1, dt.date(2020, 5, 15), None),
(meeting2, dt.date(2020, 8, 15), 'grp2020-06-22 does not have a presentation link'),
(meeting2, dt.date(2020, 5, 15), None),
(meeting3, dt.date(2020, 8, 15), 'grp2020-06-29 does not have a presentation title'),
(meeting3, dt.date(2020, 5, 15), None),
]
)
def test_validate_meeting(meeting, date, expected):
try:
actual = validate_meeting(meeting, date)
assert actual == expected
except ValueError:
with pytest.raises(ValueError) as excinfo:
actual = validate_meeting(meeting, date)
assert str(excinfo.value) == expected
@pytest.mark.parametrize(
"person,grpname,expected",
[
(appointed_people[0], 'permutation',
[{'_id': 'kgodel', 'begin_date': dt.date(2014, 6, 1),
'end_date': dt.date(2015, 6, 1), 'status': 'phd', 'permanent': None},
{'_id': 'kgodel', 'begin_date': dt.date(2012, 9, 4),
'end_date': dt.date(2012, 9, 5), 'status': None, 'permanent': 'true'}]
),
(appointed_people[1], 'transformation',
[{'_id': 'mcescher', 'begin_date': dt.date(2018, 7, 24),
'end_date': dt.date(2020, 8, 1), 'status': 'postdoc', 'permanent': None}]
),
(appointed_people[2], 'bg', "WARNING: jsbach has no end date in employment for bg starting 2019-02-03"
),
(appointed_people[3], 'abstract', [])
]
)
def test_group_member_employment_start_end(person, grpname, expected):
try:
actual = group_member_employment_start_end(person, grpname)
assert actual == expected
except:
with pytest.raises(RuntimeError) as excinfo:
actual = group_member_employment_start_end(person, grpname)
assert str(excinfo.value) == expected
| true | true |
1c38840516f5d7445dc6d7a99403e818a10956dd | 2,932 | py | Python | osim-rl/examples/arm.py | lancerane/NIPS-2018-AI-for-Prosthetics | 7689646e2d079ffcbcde898ece25d2cf78c132c7 | [
"MIT"
] | 3 | 2019-04-01T10:14:04.000Z | 2022-02-16T07:15:49.000Z | osim-rl/examples/arm.py | lancerane/NIPS-2018-AI-for-Prosthetics | 7689646e2d079ffcbcde898ece25d2cf78c132c7 | [
"MIT"
] | 1 | 2019-07-10T07:57:48.000Z | 2019-07-11T07:41:43.000Z | osim-rl/examples/arm.py | lancerane/NIPS-2018-AI-for-Prosthetics | 7689646e2d079ffcbcde898ece25d2cf78c132c7 | [
"MIT"
] | 3 | 2019-05-12T09:41:00.000Z | 2021-11-03T20:54:36.000Z | import os
from osim.env import OsimEnv
import pprint
import numpy as np
class Arm3dEnv(OsimEnv):
model_path = os.path.join(os.path.dirname(__file__), '../osim/models/MoBL_ARMS_J_Simple_032118.osim')
time_limit = 200
current_objective = np.array([0,0,0])
def is_done(self):
# End the simulation if the pelvis is too low
state_desc = self.get_state_desc()
return False
def get_observation(self):
state_desc = self.get_state_desc()
# Augmented environment from the L2R challenge
res = []
# Map some of the state variables to the observation vector
for body_part in state_desc["body_pos_rot"].keys():
res = res + state_desc["body_pos_rot"][body_part][2:]
res = res + state_desc["body_pos"][body_part][0:2]
res = res + state_desc["body_vel_rot"][body_part][2:]
res = res + state_desc["body_vel"][body_part][0:2]
res = res + state_desc["body_acc_rot"][body_part][2:]
res = res + state_desc["body_acc"][body_part][0:2]
for joint in state_desc["joint_pos"].keys():
res = res + state_desc["joint_pos"][joint]
res = res + state_desc["joint_vel"][joint]
res = res + state_desc["joint_acc"][joint]
res = res + state_desc["misc"]["mass_center_pos"] + state_desc["misc"]["mass_center_vel"] + state_desc["misc"]["mass_center_acc"]
res += self.current_objective.tolist()
res = np.array(res)
res[np.isnan(res)] = 0
return res
def get_observation_space_size(self):
return 168
def reset_objective(self):
self.current_objective = np.random.uniform(-0.5,0.5,3)
def reset(self):
print(self.reward())
self.reset_objective()
return super(Arm3dEnv, self).reset()
def reward(self):
# Get the current state and the last state
prev_state_desc = self.get_prev_state_desc()
if not prev_state_desc:
return 0
state_desc = self.get_state_desc()
res = 0
# # Penalize movement of the pelvis
# res = -(prev_state_desc["misc"]["mass_center_pos"][0] - state_desc["misc"]["mass_center_pos"][0])**2\
# -(prev_state_desc["misc"]["mass_center_pos"][1] - state_desc["misc"]["mass_center_pos"][1])**2
# # Penalize very low position of the pelvis
# res += -(state_desc["joint_pos"]["ground_pelvis"][2] < 0.8)
return -np.linalg.norm(np.array(state_desc["markers"]["Handle"]["pos"]) - self.current_objective)
env = Arm3dEnv(visualize=True)
if __name__ == '__main__':
observation = env.reset()
for i in range(200):
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if done:
env.reset()
| 35.756098 | 138 | 0.596862 | import os
from osim.env import OsimEnv
import pprint
import numpy as np
class Arm3dEnv(OsimEnv):
model_path = os.path.join(os.path.dirname(__file__), '../osim/models/MoBL_ARMS_J_Simple_032118.osim')
time_limit = 200
current_objective = np.array([0,0,0])
def is_done(self):
state_desc = self.get_state_desc()
return False
def get_observation(self):
state_desc = self.get_state_desc()
res = []
for body_part in state_desc["body_pos_rot"].keys():
res = res + state_desc["body_pos_rot"][body_part][2:]
res = res + state_desc["body_pos"][body_part][0:2]
res = res + state_desc["body_vel_rot"][body_part][2:]
res = res + state_desc["body_vel"][body_part][0:2]
res = res + state_desc["body_acc_rot"][body_part][2:]
res = res + state_desc["body_acc"][body_part][0:2]
for joint in state_desc["joint_pos"].keys():
res = res + state_desc["joint_pos"][joint]
res = res + state_desc["joint_vel"][joint]
res = res + state_desc["joint_acc"][joint]
res = res + state_desc["misc"]["mass_center_pos"] + state_desc["misc"]["mass_center_vel"] + state_desc["misc"]["mass_center_acc"]
res += self.current_objective.tolist()
res = np.array(res)
res[np.isnan(res)] = 0
return res
def get_observation_space_size(self):
return 168
def reset_objective(self):
self.current_objective = np.random.uniform(-0.5,0.5,3)
def reset(self):
print(self.reward())
self.reset_objective()
return super(Arm3dEnv, self).reset()
def reward(self):
prev_state_desc = self.get_prev_state_desc()
if not prev_state_desc:
return 0
state_desc = self.get_state_desc()
res = 0
rm(np.array(state_desc["markers"]["Handle"]["pos"]) - self.current_objective)
env = Arm3dEnv(visualize=True)
if __name__ == '__main__':
observation = env.reset()
for i in range(200):
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if done:
env.reset()
| true | true |
1c38844a25ec5660e773058dc22758b3ba1914bf | 446 | py | Python | src/lolite/lib/hooks/Python3Script.py | NathanKewley/lolite | f3d6cd07e9893bbb6b923d49ecb8681cd218eace | [
"Apache-2.0"
] | 5 | 2021-05-04T06:07:43.000Z | 2021-12-11T15:08:01.000Z | src/lolite/lib/hooks/Python3Script.py | NathanKewley/lolite | f3d6cd07e9893bbb6b923d49ecb8681cd218eace | [
"Apache-2.0"
] | 4 | 2021-05-01T07:20:21.000Z | 2021-05-29T04:51:56.000Z | src/lolite/lib/hooks/Python3Script.py | NathanKewley/lolite | f3d6cd07e9893bbb6b923d49ecb8681cd218eace | [
"Apache-2.0"
] | null | null | null | from lolite.lib.hooks.hook_base import HookBase
class Hook(HookBase):
def __init__(self, logger, arguments):
super().__init__(logger, arguments)
def execute_hook(self):
self._logger.debug(f"Running Python3 Hook: {self._arguments}")
if not self._subproc.run_command_exit_code(f"python3 {self._arguments}") == 0:
raise Exception(f"Python Hook Retuned non 0 result ({self._arguments})")
| 31.857143 | 87 | 0.674888 | from lolite.lib.hooks.hook_base import HookBase
class Hook(HookBase):
def __init__(self, logger, arguments):
super().__init__(logger, arguments)
def execute_hook(self):
self._logger.debug(f"Running Python3 Hook: {self._arguments}")
if not self._subproc.run_command_exit_code(f"python3 {self._arguments}") == 0:
raise Exception(f"Python Hook Retuned non 0 result ({self._arguments})")
| true | true |
1c38844fce8820095626484dad3f69c9e1319288 | 24,888 | py | Python | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_mpls_vpn_oper.py | bopopescu/ACI | dd717bc74739eeed4747b3ea9e36b239580df5e1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_mpls_vpn_oper.py | bopopescu/ACI | dd717bc74739eeed4747b3ea9e36b239580df5e1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_mpls_vpn_oper.py | bopopescu/ACI | dd717bc74739eeed4747b3ea9e36b239580df5e1 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-07-22T04:04:44.000Z | 2020-07-22T04:04:44.000Z | """ Cisco_IOS_XR_mpls_vpn_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR mpls\-vpn package operational data.
This module contains definitions
for the following management objects\:
l3vpn\: L3VPN operational data
Copyright (c) 2013\-2017 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class MplsVpnAfi(Enum):
"""
MplsVpnAfi (Enum Class)
Layer 3 VPN Address Family Type
.. data:: ipv4 = 1
VRF IPv4 address family
.. data:: ipv6 = 2
VRF IPv6 address family
"""
ipv4 = Enum.YLeaf(1, "ipv4")
ipv6 = Enum.YLeaf(2, "ipv6")
class MplsVpnRt(Enum):
"""
MplsVpnRt (Enum Class)
Layer 3 VPN Route Target Type
.. data:: import_ = 1
VRF Route Target Type Import
.. data:: export = 2
VRF Route Target Type Export
.. data:: both = 3
VRF Route Target Type Import and Export
"""
import_ = Enum.YLeaf(1, "import")
export = Enum.YLeaf(2, "export")
both = Enum.YLeaf(3, "both")
class MplsVpnSafi(Enum):
"""
MplsVpnSafi (Enum Class)
Layer 3 VPN Sub\-Address Family Type
.. data:: unicast = 1
VRF Unicast sub-address family
.. data:: multicast = 2
VRF Multicast sub-address family
.. data:: flowspec = 133
VRF Flowspec sub-address family
"""
unicast = Enum.YLeaf(1, "unicast")
multicast = Enum.YLeaf(2, "multicast")
flowspec = Enum.YLeaf(133, "flowspec")
class L3Vpn(Entity):
"""
L3VPN operational data
.. attribute:: invalid_vrfs
Invalid VRF Table (VRFs that are forward referenced)
**type**\: :py:class:`InvalidVrfs <ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_vpn_oper.L3Vpn.InvalidVrfs>`
.. attribute:: vrfs
VRF Table
**type**\: :py:class:`Vrfs <ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_vpn_oper.L3Vpn.Vrfs>`
"""
_prefix = 'mpls-vpn-oper'
_revision = '2015-11-09'
def __init__(self):
super(L3Vpn, self).__init__()
self._top_entity = None
self.yang_name = "l3vpn"
self.yang_parent_name = "Cisco-IOS-XR-mpls-vpn-oper"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("invalid-vrfs", ("invalid_vrfs", L3Vpn.InvalidVrfs)), ("vrfs", ("vrfs", L3Vpn.Vrfs))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict()
self.invalid_vrfs = L3Vpn.InvalidVrfs()
self.invalid_vrfs.parent = self
self._children_name_map["invalid_vrfs"] = "invalid-vrfs"
self._children_yang_names.add("invalid-vrfs")
self.vrfs = L3Vpn.Vrfs()
self.vrfs.parent = self
self._children_name_map["vrfs"] = "vrfs"
self._children_yang_names.add("vrfs")
self._segment_path = lambda: "Cisco-IOS-XR-mpls-vpn-oper:l3vpn"
class InvalidVrfs(Entity):
"""
Invalid VRF Table (VRFs that are forward
referenced)
.. attribute:: invalid_vrf
Invalid VRF (VRF that is forward referenced)
**type**\: list of :py:class:`InvalidVrf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_vpn_oper.L3Vpn.InvalidVrfs.InvalidVrf>`
"""
_prefix = 'mpls-vpn-oper'
_revision = '2015-11-09'
def __init__(self):
super(L3Vpn.InvalidVrfs, self).__init__()
self.yang_name = "invalid-vrfs"
self.yang_parent_name = "l3vpn"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("invalid-vrf", ("invalid_vrf", L3Vpn.InvalidVrfs.InvalidVrf))])
self._leafs = OrderedDict()
self.invalid_vrf = YList(self)
self._segment_path = lambda: "invalid-vrfs"
self._absolute_path = lambda: "Cisco-IOS-XR-mpls-vpn-oper:l3vpn/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(L3Vpn.InvalidVrfs, [], name, value)
class InvalidVrf(Entity):
"""
Invalid VRF (VRF that is forward referenced)
.. attribute:: vrf_name (key)
The Name for an invalid VRF
**type**\: str
.. attribute:: vrf_name_xr
VRF Name
**type**\: str
.. attribute:: vrf_description
VRF Description
**type**\: str
.. attribute:: route_distinguisher
Route Distinguisher
**type**\: str
.. attribute:: is_big_vrf
VRF mode information
**type**\: bool
.. attribute:: interface
Interfaces in VRF
**type**\: list of :py:class:`Interface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_vpn_oper.L3Vpn.InvalidVrfs.InvalidVrf.Interface>`
.. attribute:: af
AF/SAF information
**type**\: list of :py:class:`Af <ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_vpn_oper.L3Vpn.InvalidVrfs.InvalidVrf.Af>`
"""
_prefix = 'mpls-vpn-oper'
_revision = '2015-11-09'
def __init__(self):
super(L3Vpn.InvalidVrfs.InvalidVrf, self).__init__()
self.yang_name = "invalid-vrf"
self.yang_parent_name = "invalid-vrfs"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['vrf_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("interface", ("interface", L3Vpn.InvalidVrfs.InvalidVrf.Interface)), ("af", ("af", L3Vpn.InvalidVrfs.InvalidVrf.Af))])
self._leafs = OrderedDict([
('vrf_name', YLeaf(YType.str, 'vrf-name')),
('vrf_name_xr', YLeaf(YType.str, 'vrf-name-xr')),
('vrf_description', YLeaf(YType.str, 'vrf-description')),
('route_distinguisher', YLeaf(YType.str, 'route-distinguisher')),
('is_big_vrf', YLeaf(YType.boolean, 'is-big-vrf')),
])
self.vrf_name = None
self.vrf_name_xr = None
self.vrf_description = None
self.route_distinguisher = None
self.is_big_vrf = None
self.interface = YList(self)
self.af = YList(self)
self._segment_path = lambda: "invalid-vrf" + "[vrf-name='" + str(self.vrf_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-mpls-vpn-oper:l3vpn/invalid-vrfs/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(L3Vpn.InvalidVrfs.InvalidVrf, ['vrf_name', 'vrf_name_xr', 'vrf_description', 'route_distinguisher', 'is_big_vrf'], name, value)
class Interface(Entity):
"""
Interfaces in VRF
.. attribute:: interface_name
Interface Name
**type**\: str
"""
_prefix = 'mpls-vpn-oper'
_revision = '2015-11-09'
def __init__(self):
super(L3Vpn.InvalidVrfs.InvalidVrf.Interface, self).__init__()
self.yang_name = "interface"
self.yang_parent_name = "invalid-vrf"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', YLeaf(YType.str, 'interface-name')),
])
self.interface_name = None
self._segment_path = lambda: "interface"
def __setattr__(self, name, value):
self._perform_setattr(L3Vpn.InvalidVrfs.InvalidVrf.Interface, ['interface_name'], name, value)
class Af(Entity):
"""
AF/SAF information
.. attribute:: af_name
AF name
**type**\: :py:class:`MplsVpnAfi <ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_vpn_oper.MplsVpnAfi>`
.. attribute:: saf_name
SAF name
**type**\: :py:class:`MplsVpnSafi <ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_vpn_oper.MplsVpnSafi>`
.. attribute:: import_route_policy
Import Route Policy
**type**\: str
.. attribute:: export_route_policy
Export Route Policy
**type**\: str
.. attribute:: route_target
Route Targets
**type**\: list of :py:class:`RouteTarget <ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_vpn_oper.L3Vpn.InvalidVrfs.InvalidVrf.Af.RouteTarget>`
"""
_prefix = 'mpls-vpn-oper'
_revision = '2015-11-09'
def __init__(self):
super(L3Vpn.InvalidVrfs.InvalidVrf.Af, self).__init__()
self.yang_name = "af"
self.yang_parent_name = "invalid-vrf"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("route-target", ("route_target", L3Vpn.InvalidVrfs.InvalidVrf.Af.RouteTarget))])
self._leafs = OrderedDict([
('af_name', YLeaf(YType.enumeration, 'af-name')),
('saf_name', YLeaf(YType.enumeration, 'saf-name')),
('import_route_policy', YLeaf(YType.str, 'import-route-policy')),
('export_route_policy', YLeaf(YType.str, 'export-route-policy')),
])
self.af_name = None
self.saf_name = None
self.import_route_policy = None
self.export_route_policy = None
self.route_target = YList(self)
self._segment_path = lambda: "af"
def __setattr__(self, name, value):
self._perform_setattr(L3Vpn.InvalidVrfs.InvalidVrf.Af, ['af_name', 'saf_name', 'import_route_policy', 'export_route_policy'], name, value)
class RouteTarget(Entity):
"""
Route Targets
.. attribute:: route_target_type
Route Target Type
**type**\: :py:class:`MplsVpnRt <ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_vpn_oper.MplsVpnRt>`
.. attribute:: route_target_value
Route Target Value
**type**\: str
.. attribute:: af_name
AF name
**type**\: :py:class:`MplsVpnAfi <ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_vpn_oper.MplsVpnAfi>`
.. attribute:: saf_name
SAF name
**type**\: :py:class:`MplsVpnSafi <ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_vpn_oper.MplsVpnSafi>`
"""
_prefix = 'mpls-vpn-oper'
_revision = '2015-11-09'
def __init__(self):
super(L3Vpn.InvalidVrfs.InvalidVrf.Af.RouteTarget, self).__init__()
self.yang_name = "route-target"
self.yang_parent_name = "af"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('route_target_type', YLeaf(YType.enumeration, 'route-target-type')),
('route_target_value', YLeaf(YType.str, 'route-target-value')),
('af_name', YLeaf(YType.enumeration, 'af-name')),
('saf_name', YLeaf(YType.enumeration, 'saf-name')),
])
self.route_target_type = None
self.route_target_value = None
self.af_name = None
self.saf_name = None
self._segment_path = lambda: "route-target"
def __setattr__(self, name, value):
self._perform_setattr(L3Vpn.InvalidVrfs.InvalidVrf.Af.RouteTarget, ['route_target_type', 'route_target_value', 'af_name', 'saf_name'], name, value)
class Vrfs(Entity):
"""
VRF Table
.. attribute:: vrf
VRF
**type**\: list of :py:class:`Vrf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_vpn_oper.L3Vpn.Vrfs.Vrf>`
"""
_prefix = 'mpls-vpn-oper'
_revision = '2015-11-09'
def __init__(self):
super(L3Vpn.Vrfs, self).__init__()
self.yang_name = "vrfs"
self.yang_parent_name = "l3vpn"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("vrf", ("vrf", L3Vpn.Vrfs.Vrf))])
self._leafs = OrderedDict()
self.vrf = YList(self)
self._segment_path = lambda: "vrfs"
self._absolute_path = lambda: "Cisco-IOS-XR-mpls-vpn-oper:l3vpn/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(L3Vpn.Vrfs, [], name, value)
class Vrf(Entity):
"""
VRF
.. attribute:: vrf_name (key)
The Name for a VRF
**type**\: str
.. attribute:: vrf_name_xr
VRF Name
**type**\: str
.. attribute:: vrf_description
VRF Description
**type**\: str
.. attribute:: route_distinguisher
Route Distinguisher
**type**\: str
.. attribute:: is_big_vrf
VRF mode information
**type**\: bool
.. attribute:: interface
Interfaces in VRF
**type**\: list of :py:class:`Interface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_vpn_oper.L3Vpn.Vrfs.Vrf.Interface>`
.. attribute:: af
AF/SAF information
**type**\: list of :py:class:`Af <ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_vpn_oper.L3Vpn.Vrfs.Vrf.Af>`
"""
_prefix = 'mpls-vpn-oper'
_revision = '2015-11-09'
def __init__(self):
super(L3Vpn.Vrfs.Vrf, self).__init__()
self.yang_name = "vrf"
self.yang_parent_name = "vrfs"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['vrf_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("interface", ("interface", L3Vpn.Vrfs.Vrf.Interface)), ("af", ("af", L3Vpn.Vrfs.Vrf.Af))])
self._leafs = OrderedDict([
('vrf_name', YLeaf(YType.str, 'vrf-name')),
('vrf_name_xr', YLeaf(YType.str, 'vrf-name-xr')),
('vrf_description', YLeaf(YType.str, 'vrf-description')),
('route_distinguisher', YLeaf(YType.str, 'route-distinguisher')),
('is_big_vrf', YLeaf(YType.boolean, 'is-big-vrf')),
])
self.vrf_name = None
self.vrf_name_xr = None
self.vrf_description = None
self.route_distinguisher = None
self.is_big_vrf = None
self.interface = YList(self)
self.af = YList(self)
self._segment_path = lambda: "vrf" + "[vrf-name='" + str(self.vrf_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-mpls-vpn-oper:l3vpn/vrfs/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(L3Vpn.Vrfs.Vrf, ['vrf_name', 'vrf_name_xr', 'vrf_description', 'route_distinguisher', 'is_big_vrf'], name, value)
class Interface(Entity):
"""
Interfaces in VRF
.. attribute:: interface_name
Interface Name
**type**\: str
"""
_prefix = 'mpls-vpn-oper'
_revision = '2015-11-09'
def __init__(self):
super(L3Vpn.Vrfs.Vrf.Interface, self).__init__()
self.yang_name = "interface"
self.yang_parent_name = "vrf"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', YLeaf(YType.str, 'interface-name')),
])
self.interface_name = None
self._segment_path = lambda: "interface"
def __setattr__(self, name, value):
self._perform_setattr(L3Vpn.Vrfs.Vrf.Interface, ['interface_name'], name, value)
class Af(Entity):
"""
AF/SAF information
.. attribute:: af_name
AF name
**type**\: :py:class:`MplsVpnAfi <ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_vpn_oper.MplsVpnAfi>`
.. attribute:: saf_name
SAF name
**type**\: :py:class:`MplsVpnSafi <ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_vpn_oper.MplsVpnSafi>`
.. attribute:: import_route_policy
Import Route Policy
**type**\: str
.. attribute:: export_route_policy
Export Route Policy
**type**\: str
.. attribute:: route_target
Route Targets
**type**\: list of :py:class:`RouteTarget <ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_vpn_oper.L3Vpn.Vrfs.Vrf.Af.RouteTarget>`
"""
_prefix = 'mpls-vpn-oper'
_revision = '2015-11-09'
def __init__(self):
super(L3Vpn.Vrfs.Vrf.Af, self).__init__()
self.yang_name = "af"
self.yang_parent_name = "vrf"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("route-target", ("route_target", L3Vpn.Vrfs.Vrf.Af.RouteTarget))])
self._leafs = OrderedDict([
('af_name', YLeaf(YType.enumeration, 'af-name')),
('saf_name', YLeaf(YType.enumeration, 'saf-name')),
('import_route_policy', YLeaf(YType.str, 'import-route-policy')),
('export_route_policy', YLeaf(YType.str, 'export-route-policy')),
])
self.af_name = None
self.saf_name = None
self.import_route_policy = None
self.export_route_policy = None
self.route_target = YList(self)
self._segment_path = lambda: "af"
def __setattr__(self, name, value):
self._perform_setattr(L3Vpn.Vrfs.Vrf.Af, ['af_name', 'saf_name', 'import_route_policy', 'export_route_policy'], name, value)
class RouteTarget(Entity):
"""
Route Targets
.. attribute:: route_target_type
Route Target Type
**type**\: :py:class:`MplsVpnRt <ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_vpn_oper.MplsVpnRt>`
.. attribute:: route_target_value
Route Target Value
**type**\: str
.. attribute:: af_name
AF name
**type**\: :py:class:`MplsVpnAfi <ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_vpn_oper.MplsVpnAfi>`
.. attribute:: saf_name
SAF name
**type**\: :py:class:`MplsVpnSafi <ydk.models.cisco_ios_xr.Cisco_IOS_XR_mpls_vpn_oper.MplsVpnSafi>`
"""
_prefix = 'mpls-vpn-oper'
_revision = '2015-11-09'
def __init__(self):
super(L3Vpn.Vrfs.Vrf.Af.RouteTarget, self).__init__()
self.yang_name = "route-target"
self.yang_parent_name = "af"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('route_target_type', YLeaf(YType.enumeration, 'route-target-type')),
('route_target_value', YLeaf(YType.str, 'route-target-value')),
('af_name', YLeaf(YType.enumeration, 'af-name')),
('saf_name', YLeaf(YType.enumeration, 'saf-name')),
])
self.route_target_type = None
self.route_target_value = None
self.af_name = None
self.saf_name = None
self._segment_path = lambda: "route-target"
def __setattr__(self, name, value):
self._perform_setattr(L3Vpn.Vrfs.Vrf.Af.RouteTarget, ['route_target_type', 'route_target_value', 'af_name', 'saf_name'], name, value)
def clone_ptr(self):
self._top_entity = L3Vpn()
return self._top_entity
| 36.069565 | 175 | 0.499879 | from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class MplsVpnAfi(Enum):
ipv4 = Enum.YLeaf(1, "ipv4")
ipv6 = Enum.YLeaf(2, "ipv6")
class MplsVpnRt(Enum):
import_ = Enum.YLeaf(1, "import")
export = Enum.YLeaf(2, "export")
both = Enum.YLeaf(3, "both")
class MplsVpnSafi(Enum):
unicast = Enum.YLeaf(1, "unicast")
multicast = Enum.YLeaf(2, "multicast")
flowspec = Enum.YLeaf(133, "flowspec")
class L3Vpn(Entity):
_prefix = 'mpls-vpn-oper'
_revision = '2015-11-09'
def __init__(self):
super(L3Vpn, self).__init__()
self._top_entity = None
self.yang_name = "l3vpn"
self.yang_parent_name = "Cisco-IOS-XR-mpls-vpn-oper"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("invalid-vrfs", ("invalid_vrfs", L3Vpn.InvalidVrfs)), ("vrfs", ("vrfs", L3Vpn.Vrfs))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict()
self.invalid_vrfs = L3Vpn.InvalidVrfs()
self.invalid_vrfs.parent = self
self._children_name_map["invalid_vrfs"] = "invalid-vrfs"
self._children_yang_names.add("invalid-vrfs")
self.vrfs = L3Vpn.Vrfs()
self.vrfs.parent = self
self._children_name_map["vrfs"] = "vrfs"
self._children_yang_names.add("vrfs")
self._segment_path = lambda: "Cisco-IOS-XR-mpls-vpn-oper:l3vpn"
class InvalidVrfs(Entity):
_prefix = 'mpls-vpn-oper'
_revision = '2015-11-09'
def __init__(self):
super(L3Vpn.InvalidVrfs, self).__init__()
self.yang_name = "invalid-vrfs"
self.yang_parent_name = "l3vpn"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("invalid-vrf", ("invalid_vrf", L3Vpn.InvalidVrfs.InvalidVrf))])
self._leafs = OrderedDict()
self.invalid_vrf = YList(self)
self._segment_path = lambda: "invalid-vrfs"
self._absolute_path = lambda: "Cisco-IOS-XR-mpls-vpn-oper:l3vpn/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(L3Vpn.InvalidVrfs, [], name, value)
class InvalidVrf(Entity):
_prefix = 'mpls-vpn-oper'
_revision = '2015-11-09'
def __init__(self):
super(L3Vpn.InvalidVrfs.InvalidVrf, self).__init__()
self.yang_name = "invalid-vrf"
self.yang_parent_name = "invalid-vrfs"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['vrf_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("interface", ("interface", L3Vpn.InvalidVrfs.InvalidVrf.Interface)), ("af", ("af", L3Vpn.InvalidVrfs.InvalidVrf.Af))])
self._leafs = OrderedDict([
('vrf_name', YLeaf(YType.str, 'vrf-name')),
('vrf_name_xr', YLeaf(YType.str, 'vrf-name-xr')),
('vrf_description', YLeaf(YType.str, 'vrf-description')),
('route_distinguisher', YLeaf(YType.str, 'route-distinguisher')),
('is_big_vrf', YLeaf(YType.boolean, 'is-big-vrf')),
])
self.vrf_name = None
self.vrf_name_xr = None
self.vrf_description = None
self.route_distinguisher = None
self.is_big_vrf = None
self.interface = YList(self)
self.af = YList(self)
self._segment_path = lambda: "invalid-vrf" + "[vrf-name='" + str(self.vrf_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-mpls-vpn-oper:l3vpn/invalid-vrfs/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(L3Vpn.InvalidVrfs.InvalidVrf, ['vrf_name', 'vrf_name_xr', 'vrf_description', 'route_distinguisher', 'is_big_vrf'], name, value)
class Interface(Entity):
_prefix = 'mpls-vpn-oper'
_revision = '2015-11-09'
def __init__(self):
super(L3Vpn.InvalidVrfs.InvalidVrf.Interface, self).__init__()
self.yang_name = "interface"
self.yang_parent_name = "invalid-vrf"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', YLeaf(YType.str, 'interface-name')),
])
self.interface_name = None
self._segment_path = lambda: "interface"
def __setattr__(self, name, value):
self._perform_setattr(L3Vpn.InvalidVrfs.InvalidVrf.Interface, ['interface_name'], name, value)
class Af(Entity):
_prefix = 'mpls-vpn-oper'
_revision = '2015-11-09'
def __init__(self):
super(L3Vpn.InvalidVrfs.InvalidVrf.Af, self).__init__()
self.yang_name = "af"
self.yang_parent_name = "invalid-vrf"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("route-target", ("route_target", L3Vpn.InvalidVrfs.InvalidVrf.Af.RouteTarget))])
self._leafs = OrderedDict([
('af_name', YLeaf(YType.enumeration, 'af-name')),
('saf_name', YLeaf(YType.enumeration, 'saf-name')),
('import_route_policy', YLeaf(YType.str, 'import-route-policy')),
('export_route_policy', YLeaf(YType.str, 'export-route-policy')),
])
self.af_name = None
self.saf_name = None
self.import_route_policy = None
self.export_route_policy = None
self.route_target = YList(self)
self._segment_path = lambda: "af"
def __setattr__(self, name, value):
self._perform_setattr(L3Vpn.InvalidVrfs.InvalidVrf.Af, ['af_name', 'saf_name', 'import_route_policy', 'export_route_policy'], name, value)
class RouteTarget(Entity):
_prefix = 'mpls-vpn-oper'
_revision = '2015-11-09'
def __init__(self):
super(L3Vpn.InvalidVrfs.InvalidVrf.Af.RouteTarget, self).__init__()
self.yang_name = "route-target"
self.yang_parent_name = "af"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('route_target_type', YLeaf(YType.enumeration, 'route-target-type')),
('route_target_value', YLeaf(YType.str, 'route-target-value')),
('af_name', YLeaf(YType.enumeration, 'af-name')),
('saf_name', YLeaf(YType.enumeration, 'saf-name')),
])
self.route_target_type = None
self.route_target_value = None
self.af_name = None
self.saf_name = None
self._segment_path = lambda: "route-target"
def __setattr__(self, name, value):
self._perform_setattr(L3Vpn.InvalidVrfs.InvalidVrf.Af.RouteTarget, ['route_target_type', 'route_target_value', 'af_name', 'saf_name'], name, value)
class Vrfs(Entity):
_prefix = 'mpls-vpn-oper'
_revision = '2015-11-09'
def __init__(self):
super(L3Vpn.Vrfs, self).__init__()
self.yang_name = "vrfs"
self.yang_parent_name = "l3vpn"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("vrf", ("vrf", L3Vpn.Vrfs.Vrf))])
self._leafs = OrderedDict()
self.vrf = YList(self)
self._segment_path = lambda: "vrfs"
self._absolute_path = lambda: "Cisco-IOS-XR-mpls-vpn-oper:l3vpn/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(L3Vpn.Vrfs, [], name, value)
class Vrf(Entity):
_prefix = 'mpls-vpn-oper'
_revision = '2015-11-09'
def __init__(self):
super(L3Vpn.Vrfs.Vrf, self).__init__()
self.yang_name = "vrf"
self.yang_parent_name = "vrfs"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['vrf_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("interface", ("interface", L3Vpn.Vrfs.Vrf.Interface)), ("af", ("af", L3Vpn.Vrfs.Vrf.Af))])
self._leafs = OrderedDict([
('vrf_name', YLeaf(YType.str, 'vrf-name')),
('vrf_name_xr', YLeaf(YType.str, 'vrf-name-xr')),
('vrf_description', YLeaf(YType.str, 'vrf-description')),
('route_distinguisher', YLeaf(YType.str, 'route-distinguisher')),
('is_big_vrf', YLeaf(YType.boolean, 'is-big-vrf')),
])
self.vrf_name = None
self.vrf_name_xr = None
self.vrf_description = None
self.route_distinguisher = None
self.is_big_vrf = None
self.interface = YList(self)
self.af = YList(self)
self._segment_path = lambda: "vrf" + "[vrf-name='" + str(self.vrf_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-mpls-vpn-oper:l3vpn/vrfs/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(L3Vpn.Vrfs.Vrf, ['vrf_name', 'vrf_name_xr', 'vrf_description', 'route_distinguisher', 'is_big_vrf'], name, value)
class Interface(Entity):
_prefix = 'mpls-vpn-oper'
_revision = '2015-11-09'
def __init__(self):
super(L3Vpn.Vrfs.Vrf.Interface, self).__init__()
self.yang_name = "interface"
self.yang_parent_name = "vrf"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', YLeaf(YType.str, 'interface-name')),
])
self.interface_name = None
self._segment_path = lambda: "interface"
def __setattr__(self, name, value):
self._perform_setattr(L3Vpn.Vrfs.Vrf.Interface, ['interface_name'], name, value)
class Af(Entity):
_prefix = 'mpls-vpn-oper'
_revision = '2015-11-09'
def __init__(self):
super(L3Vpn.Vrfs.Vrf.Af, self).__init__()
self.yang_name = "af"
self.yang_parent_name = "vrf"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("route-target", ("route_target", L3Vpn.Vrfs.Vrf.Af.RouteTarget))])
self._leafs = OrderedDict([
('af_name', YLeaf(YType.enumeration, 'af-name')),
('saf_name', YLeaf(YType.enumeration, 'saf-name')),
('import_route_policy', YLeaf(YType.str, 'import-route-policy')),
('export_route_policy', YLeaf(YType.str, 'export-route-policy')),
])
self.af_name = None
self.saf_name = None
self.import_route_policy = None
self.export_route_policy = None
self.route_target = YList(self)
self._segment_path = lambda: "af"
def __setattr__(self, name, value):
self._perform_setattr(L3Vpn.Vrfs.Vrf.Af, ['af_name', 'saf_name', 'import_route_policy', 'export_route_policy'], name, value)
class RouteTarget(Entity):
_prefix = 'mpls-vpn-oper'
_revision = '2015-11-09'
def __init__(self):
super(L3Vpn.Vrfs.Vrf.Af.RouteTarget, self).__init__()
self.yang_name = "route-target"
self.yang_parent_name = "af"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('route_target_type', YLeaf(YType.enumeration, 'route-target-type')),
('route_target_value', YLeaf(YType.str, 'route-target-value')),
('af_name', YLeaf(YType.enumeration, 'af-name')),
('saf_name', YLeaf(YType.enumeration, 'saf-name')),
])
self.route_target_type = None
self.route_target_value = None
self.af_name = None
self.saf_name = None
self._segment_path = lambda: "route-target"
def __setattr__(self, name, value):
self._perform_setattr(L3Vpn.Vrfs.Vrf.Af.RouteTarget, ['route_target_type', 'route_target_value', 'af_name', 'saf_name'], name, value)
def clone_ptr(self):
self._top_entity = L3Vpn()
return self._top_entity
| true | true |
1c3884b2e21c9416d76a3ae7f7dfa9fe00146e26 | 4,127 | py | Python | application/aci/models.py | HyechurnJang/archon | 2cda56436ed6dea65d38774f7c9ed6c3315dbc03 | [
"Apache-2.0"
] | 1 | 2018-03-07T08:33:23.000Z | 2018-03-07T08:33:23.000Z | application/aci/models.py | HyechurnJang/archon | 2cda56436ed6dea65d38774f7c9ed6c3315dbc03 | [
"Apache-2.0"
] | 2 | 2017-03-14T01:02:55.000Z | 2017-03-14T01:07:29.000Z | application/aci/models.py | HyechurnJang/archon | 2cda56436ed6dea65d38774f7c9ed6c3315dbc03 | [
"Apache-2.0"
] | 4 | 2017-02-03T04:53:07.000Z | 2020-04-20T07:52:47.000Z | # -*- coding: utf-8 -*-
################################################################################
# _____ _ _____ _ #
# / ____(_) / ____| | | #
# | | _ ___ ___ ___ | (___ _ _ ___| |_ ___ _ __ ___ ___ #
# | | | / __|/ __/ _ \ \___ \| | | / __| __/ _ \ '_ ` _ \/ __| #
# | |____| \__ \ (_| (_) | ____) | |_| \__ \ || __/ | | | | \__ \ #
# \_____|_|___/\___\___/ |_____/ \__, |___/\__\___|_| |_| |_|___/ #
# __/ | #
# |___/ #
# _ __ _____ _ _____ ______ #
# | |/ / / ____| | |/ ____| ____| #
# | ' / ___ _ __ ___ __ _ | (___ ___ | | (___ | |__ #
# | < / _ \| '__/ _ \/ _` | \___ \ / _ \| |\___ \| __| #
# | . \ (_) | | | __/ (_| | ____) | (_) | |____) | |____ #
# |_|\_\___/|_| \___|\__,_| |_____/ \___/|_|_____/|______| #
# #
################################################################################
# #
# Copyright (c) 2016 Cisco Systems #
# All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# License for the specific language governing permissions and limitations #
# under the License. #
# #
################################################################################
from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
from archon import modelview
#===============================================================================
# Create your models here.
#===============================================================================
class Domain(models.Model):
name = models.CharField(max_length=64)
controllers = models.CharField(max_length=64)
user = models.CharField(max_length=32)
password = models.CharField(max_length=32)
created_date = models.DateTimeField(default=timezone.now)
modelview(Domain)
class EPTracker(models.Model):
mac = models.CharField(max_length=18)
ip = models.CharField(max_length=16)
domain = models.CharField(max_length=64)
tenant = models.CharField(max_length=100)
app = models.CharField(max_length=100)
epg = models.CharField(max_length=100)
dn = models.CharField(max_length=2048)
intf = models.CharField(max_length=2048)
start = models.CharField(max_length=24)
stop = models.CharField(max_length=24)
modelview(EPTracker)
class FaultMessage(models.Model):
code = models.CharField(max_length=8)
title = models.CharField(max_length=512)
syslog = models.TextField()
explan = models.TextField()
actions = models.TextField()
modelview(FaultMessage)
| 53.597403 | 80 | 0.388902 |
d(max_length=100)
dn = models.CharField(max_length=2048)
intf = models.CharField(max_length=2048)
start = models.CharField(max_length=24)
stop = models.CharField(max_length=24)
modelview(EPTracker)
class FaultMessage(models.Model):
code = models.CharField(max_length=8)
title = models.CharField(max_length=512)
syslog = models.TextField()
explan = models.TextField()
actions = models.TextField()
modelview(FaultMessage)
| true | true |
1c3885a735d8c4e6f62f1bac47fb74426f809a4d | 1,141 | py | Python | lib/systems/d-arabinose.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | lib/systems/d-arabinose.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | lib/systems/d-arabinose.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | import pulsar as psr
def load_ref_system():
""" Returns d-arabinose as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C 2.3481 -0.9668 0.1807
C 1.2791 0.0840 -0.2072
C -0.1614 -0.4732 0.0020
C -1.2259 0.6349 -0.2676
C -2.5511 -0.0222 -0.6508
O 3.6320 -0.4210 0.2861
O 1.4697 0.5270 -1.5308
O -0.3352 -1.0573 1.2671
O -1.3074 1.5523 0.7917
O -3.5641 0.0995 -0.0056
H 2.3453 -1.8155 -0.5330
H 2.1708 -1.3751 1.1909
H 1.4187 1.0267 0.3728
H -0.3388 -1.3388 -0.6793
H -0.8941 1.2815 -1.1147
H -2.5391 -0.6295 -1.5702
H 3.8240 0.0269 -0.5291
H 1.3798 -0.2273 -2.1017
H -0.0891 -0.4109 1.9191
H -1.8997 1.1985 1.4455
""")
| 40.75 | 67 | 0.405784 | import pulsar as psr
def load_ref_system():
return psr.make_system("""
C 2.3481 -0.9668 0.1807
C 1.2791 0.0840 -0.2072
C -0.1614 -0.4732 0.0020
C -1.2259 0.6349 -0.2676
C -2.5511 -0.0222 -0.6508
O 3.6320 -0.4210 0.2861
O 1.4697 0.5270 -1.5308
O -0.3352 -1.0573 1.2671
O -1.3074 1.5523 0.7917
O -3.5641 0.0995 -0.0056
H 2.3453 -1.8155 -0.5330
H 2.1708 -1.3751 1.1909
H 1.4187 1.0267 0.3728
H -0.3388 -1.3388 -0.6793
H -0.8941 1.2815 -1.1147
H -2.5391 -0.6295 -1.5702
H 3.8240 0.0269 -0.5291
H 1.3798 -0.2273 -2.1017
H -0.0891 -0.4109 1.9191
H -1.8997 1.1985 1.4455
""")
| true | true |
1c38875a7a77a091f468124bf60e54b52c488fe7 | 19,102 | py | Python | train_180215_1_Dense_6th_training.py | OsciiArt/Cookpad | b2245f84db0650d6282c97c98600de825c6ed6e0 | [
"MIT"
] | null | null | null | train_180215_1_Dense_6th_training.py | OsciiArt/Cookpad | b2245f84db0650d6282c97c98600de825c6ed6e0 | [
"MIT"
] | null | null | null | train_180215_1_Dense_6th_training.py | OsciiArt/Cookpad | b2245f84db0650d6282c97c98600de825c6ed6e0 | [
"MIT"
] | null | null | null | import numpy as np # linear algebra
np.random.seed(42)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.model_selection import train_test_split
from matplotlib import pyplot
import time
import os, glob
import cv2
# parameters
format = "%H%M"
ts = time.strftime(format)
base_name = os.path.splitext(__file__)[0] + "_ts" + ts
input_size = 221
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Input, Flatten, GaussianNoise
from keras.layers import GlobalMaxPooling2D, Reshape, UpSampling3D, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.merge import Concatenate
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, Callback, EarlyStopping, CSVLogger, ReduceLROnPlateau, LearningRateScheduler
from keras import backend as K
def get_callbacks(save_path, lr=0.001, patience=64):
csv_logger = CSVLogger(save_path + '_log.csv', append=True)
# check_path = save_path + '_e{epoch:02d}_vl{val_loss:.5f}.hdf5'
check_path = save_path
save_checkpoint = ModelCheckpoint(filepath=check_path, monitor='val_loss', save_best_only=True)
lerning_rate_schedular = ReduceLROnPlateau(patience=8, min_lr=lr * 0.00001)
def lrs(epoch):
if epoch<100:
return 1e-6
elif epoch<200:
return 1e-7
else:
return 1e-8
learning_rate_Schedular = LearningRateScheduler(lambda epoch: lrs(epoch))
early_stopping = EarlyStopping(monitor='val_loss',
patience=16,
verbose=1,
min_delta=1e-4,
mode='min')
Callbacks = [csv_logger,
save_checkpoint,
learning_rate_Schedular,
# early_stopping
]
return Callbacks
def swish(x):
return x * K.sigmoid(x)
from keras.applications.vgg16 import VGG16
from keras.applications.densenet import DenseNet121
from keras.optimizers import SGD, Adam
from keras.layers import GlobalAveragePooling2D
def get_model(num_class):
base_model = DenseNet121(weights=None, include_top=False,
input_shape=[input_size,input_size,3], classes=1)
# print(base_model.summary())
x = base_model.get_layer("bn").output
# x = base_model.get_layer("block5_pool").output
x = GlobalAveragePooling2D()(x)
predictions = Dense(num_class, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
optimizer = Adam(lr=0.0001)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
return model
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
sat_shift_limit=(-255, 255),
val_shift_limit=(-255, 255), u=0.5):
if np.random.random() < u:
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(image) # sikisou, saido, meido
hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
h = cv2.add(h, hue_shift)
sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
s = cv2.add(s, sat_shift)
val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
v = cv2.add(v, val_shift)
image = cv2.merge((h, s, v))
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
return image
def randomShiftScaleRotate(image,
shift_limit=(-0.0625, 0.0625),
scale_limit=(-0.1, 0.1),
rotate_limit=(-45, 45), aspect_limit=(0, 0),
borderMode=cv2.BORDER_CONSTANT, u=0.5):
if np.random.random() < u:
height, width, channel = image.shape
angle = np.random.uniform(rotate_limit[0], rotate_limit[1]) # degree
scale = np.random.uniform(1 + scale_limit[0], 1 + scale_limit[1])
aspect = np.random.uniform(1 + aspect_limit[0], 1 + aspect_limit[1])
sx = scale * aspect / (aspect ** 0.5)
sy = scale / (aspect ** 0.5)
dx = round(np.random.uniform(shift_limit[0], shift_limit[1]) * width)
dy = round(np.random.uniform(shift_limit[0], shift_limit[1]) * height)
cc = np.math.cos(angle / 180 * np.math.pi) * sx
ss = np.math.sin(angle / 180 * np.math.pi) * sy
rotate_matrix = np.array([[cc, -ss], [ss, cc]])
box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
box1 = box0 - np.array([width / 2, height / 2])
box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx, height / 2 + dy])
box0 = box0.astype(np.float32)
box1 = box1.astype(np.float32)
mat = cv2.getPerspectiveTransform(box0, box1)
image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
borderValue=(
0, 0,
0,))
return image
def randomHorizontalFlip(image, u=0.5):
if np.random.random() < u:
image = cv2.flip(image, 1)
return image
def randomVerticalFlip(image, u=0.5):
if np.random.random() < u:
image = cv2.flip(image, 0)
return image
def get_mixer(p=0.5, s_l=0.02, s_h=0.4, r_1=0.3, r_2=1/0.3):
def mixer(img1, img2, y1, y2):
img_h, img_w, img_c = img1.shape
p_1 = np.random.rand()
if p_1 > p:
return img1, y1
while True:
s = np.random.uniform(s_l, s_h) * img_h * img_w
r = np.random.uniform(r_1, r_2)
w = int(np.sqrt(s / r))
h = int(np.sqrt(s * r))
left = np.random.randint(0, img_w)
top = np.random.randint(0, img_h)
if left + w <= img_w and top + h <= img_h:
break
img1[top:top + h, left:left + w, :] = img2[top:top + h, left:left + w, :]
return img1, mask1
return mixer
def get_random_eraser(p=0.5, s_l=0.02, s_h=0.4, r_1=0.3, r_2=1/0.3, v_l=0, v_h=255, pixel_level=False):
def eraser(input_img):
img_h, img_w, img_c = input_img.shape
p_1 = np.random.rand()
if p_1 > p:
return input_img
while True:
s = np.random.uniform(s_l, s_h) * img_h * img_w
r = np.random.uniform(r_1, r_2)
w = int(np.sqrt(s / r))
h = int(np.sqrt(s * r))
left = np.random.randint(0, img_w)
top = np.random.randint(0, img_h)
if left + w <= img_w and top + h <= img_h:
break
if pixel_level:
c = np.random.uniform(v_l, v_h, (h, w, img_c))
else:
c = np.random.uniform(v_l, v_h)
input_img[top:top + h, left:left + w, :] = c
return input_img
return eraser
from multiprocessing import Pool
def load_img(args):
img_path = args
img = cv2.imread(img_path)
# print("img shape", img.shape)
img = cv2.resize(img, (input_size, input_size))
img = randomHueSaturationValue(img,
hue_shift_limit=(-50, 50),
sat_shift_limit=(-5, 5),
val_shift_limit=(-15, 15),
u=0.25)
img = randomShiftScaleRotate(img,
shift_limit=(-0.2, 0.2),
scale_limit=(-0.2, 0.5),
rotate_limit=(-30, 30),
aspect_limit=(-0.2, 0.2),
u=0.25)
img = randomHorizontalFlip(img)
# img = randomVerticalFlip(img)
return img
def load_img_valid(args):
img_path = args
img = cv2.imread(img_path)
img = cv2.resize(img, (input_size, input_size))
return img
def train_generator(x_train, y_train, img_dir, batch_size, shuffle=True):
# x_train = x_train.as_matrix()
# y_train = y_train.as_matrix()
y_train = np.eye(55)[y_train]
batch_index = 0
n = x_train.shape[0]
# print("n", n)
eraser = get_random_eraser(v_h=0.)
pool = Pool(8)
while 1:
if batch_index == 0:
index_array = np.arange(n)
if shuffle:
index_array = np.random.permutation(n)
current_index = (batch_index * batch_size) % n
if n >= current_index + batch_size:
current_batch_size = batch_size
batch_index += 1
else:
current_batch_size = n - current_index
batch_index = 0
batch_id = index_array[current_index: current_index + current_batch_size]
batch_x = pool.map(load_img,
['{}'.format(x_train[id])
for id in batch_id])
for id in range(len(batch_x)):
img = batch_x[id]
# img =eraser(img)
# img =eraser(img)
# img =eraser(img)
# img =eraser(img)
# img =eraser(img)
batch_x[id] = img
batch_x = np.array(batch_x, np.float32) / 255
batch_y = y_train[index_array[current_index: current_index + current_batch_size]]
# print("batch shape", batch_x.shape, batch_y.shape)
yield (batch_x, batch_y)
def valid_generator(x_train, y_train, img_dir, batch_size, shuffle=True):
# x_train = x_train.as_matrix()
# y_train = y_train.as_matrix()
y_train = np.eye(55)[y_train]
batch_index = 0
n = x_train.shape[0]
# print("n", n)
eraser = get_random_eraser(v_h=0.)
pool = Pool(4)
while 1:
if batch_index == 0:
index_array = np.arange(n)
if shuffle:
index_array = np.random.permutation(n)
current_index = (batch_index * batch_size) % n
if n >= current_index + batch_size:
current_batch_size = batch_size
batch_index += 1
else:
current_batch_size = n - current_index
batch_index = 0
batch_id = index_array[current_index: current_index + current_batch_size]
batch_x = pool.map(load_img_valid,
['{}'.format(x_train[id])
for id in batch_id])
for id in range(len(batch_x)):
img = batch_x[id]
# img =eraser(img)
# img =eraser(img)
# img =eraser(img)
# img =eraser(img)
# img =eraser(img)
batch_x[id] = img
batch_x = np.array(batch_x, np.float32) / 255
batch_y = y_train[index_array[current_index: current_index + current_batch_size]]
# print("batch shape", batch_x.shape, batch_y.shape)
yield (batch_x, batch_y)
def get_mixer(p=1, s_l=0.02, s_h=0.4, r_1=0.3, r_2=1/0.3):
def mixer(img1, img2, y1, y2):
img_h, img_w, img_c = img1.shape
p_1 = np.random.rand()
if p_1 > p:
return img1, y1
while True:
s = np.random.uniform(s_l, s_h) * img_h * img_w
r = np.random.uniform(r_1, r_2)
w = int(np.sqrt(s / r))
h = int(np.sqrt(s * r))
left = np.random.randint(0, img_w)
top = np.random.randint(0, img_h)
if left + w <= img_w and top + h <= img_h:
break
img1[top:top + h, left:left + w, :] = img2[top:top + h, left:left + w, :]
y = (1- h/img_h*w/img_w) * y1 + h/img_h*w/img_w * y2
return img1, y
return mixer
def mix_generator(X_train, Y_train, img_dir, batch_size, shuffle=True):
alpha = 1
gen1 = train_generator(X_train, Y_train, img_dir, batch_size, shuffle)
gen2 = train_generator(X_train, Y_train, img_dir, batch_size, shuffle)
while True:
batch1 = next(gen1)
batch2 = next(gen2)
current_batch_size = batch1[0].shape[0]
l = np.random.beta(alpha, alpha, current_batch_size)
X_l = l.reshape(current_batch_size, 1, 1, 1)
Y_l = l.reshape(current_batch_size, 1)
batch_x = batch1[0] * X_l + batch2[0] * (1 - X_l)
batch_y = batch1[1] * Y_l + batch2[1] * (1 - Y_l)
yield (batch_x, batch_y)
def mix_generator2(X_train, Y_train, img_dir, batch_size, shuffle=True):
alpha = 0.2
gen1 = mix_generator(X_train, Y_train, img_dir, batch_size, shuffle)
gen2 = mix_generator(X_train, Y_train, img_dir, batch_size, shuffle)
mixer =get_mixer()
while True:
batch1 = next(gen1)
batch2 = next(gen2)
batch_x = []
batch_y = []
for i in range(batch1[0].shape[0]):
x1, y1 = batch1[0][i], batch1[1][i]
x2, y2 = batch2[0][i], batch2[1][i]
new_x, new_y = mixer(x1, x2, y1, y2)
batch_x.append(new_x)
batch_y.append(new_y)
batch_x = np.array(batch_x)
batch_y = np.array(batch_y)
batch = (batch_x, batch_y)
yield batch
def test_generator(x_train, img_dir, batch_size, shuffle=True):
# x_train = x_train.as_matrix()
# y_train = y_train.as_matrix()
batch_index = 0
n = x_train.shape[0]
# print("n", n)
eraser = get_random_eraser(v_h=0.)
while 1:
if batch_index == 0:
index_array = np.arange(n)
if shuffle:
index_array = np.random.permutation(n)
current_index = (batch_index * batch_size) % n
if n >= current_index + batch_size:
current_batch_size = batch_size
batch_index += 1
else:
current_batch_size = n - current_index
batch_index = 0
batch_x = []
batch_id = index_array[current_index: current_index + current_batch_size]
# print(batch_x_base)
for id in batch_id:
# print(x_train[0])
# print(x_train[id])
# print(img_dir + '/{}'.format(x_train[id]))
img = cv2.imread('{}'.format(x_train[id]))
# print("img shape", img.shape)
img = cv2.resize(img, (input_size, input_size))
# img =eraser(img)
batch_x.append(img)
batch_x = np.array(batch_x, np.float32) / 255
# batch_y = y_train[index_array[current_index: current_index + current_batch_size]]
# print("batch shape", batch_x.shape, batch_y.shape)
yield batch_x
def load_data(train_path="input/train_master.tsv", test_path="input/sample_submit.tsv"):
train = pd.read_csv(train_path, delimiter="\t", index_col=False)
test = pd.read_csv(test_path, delimiter="\t", index_col=False, header=None)
print("train shape", train.shape)
print(train.head())
X_train = train['file_name'].as_matrix()
y_train = train['category_id'].as_matrix()
# y_train = np.eye(55)[y_train]
# print(y_train[:5])
# print(y_train.shape)
X_test = test.iloc[:,0]
return X_train, y_train, X_test
from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit
from sklearn.metrics import log_loss
def train(epochs, seed):
# parameter
batch_size = 64
num_class = 55
save_path = base_name
model_path = "_"
# Load data
X_train, y_train, X_test = load_data()
y_test = pd.read_csv("output/submissionevaluate_180210_1_Dense_5th_training_ts1213_seed0testaugx8rot90_val_loss0.23128337343533834_val_acc0.9183333333333333.tsv",
delimiter="\t", index_col=False, header=None)
y_test = y_test.as_matrix()[:,1].astype(np.uint8)
X_train = "input/train/" + X_train
X_test = "input/test/" + X_test
print(X_train[:10])
print(X_train.shape, y_train.shape)
print(X_test.shape, y_test.shape)
# CV
ids_train_split, ids_valid_split = train_test_split(np.arange(X_train.shape[0]),
random_state=42, test_size=0.05,
stratify=y_train)
# data process
X_train_cv = X_train[ids_train_split]
y_train_cv = y_train[ids_train_split]
X_train_cv = np.concatenate([X_train_cv, X_test])
y_train_cv = np.concatenate([y_train_cv, y_test])
X_holdout = X_train[ids_valid_split]
Y_holdout = y_train[ids_valid_split]
# print(X_train_cv.head())
# define file path and get callbacks
weight_path = "model/" + save_path + '.hdf5'
callbacks = get_callbacks(weight_path, patience=16)
gen = train_generator(X_train_cv, y_train_cv, "input/train", batch_size)
gen_val = valid_generator(X_holdout, Y_holdout, "input/train", batch_size, shuffle=False)
gen_val_pred = test_generator(X_holdout, "input/train", batch_size, shuffle=False)
gen_tst_pred = test_generator(X_test, "input/test", batch_size, shuffle=False)
model = get_model(num_class)
model.load_weights(filepath="model/train_180201_2_Dense_4th_training_ts2017.hdf5")
model.fit_generator(generator=gen,
steps_per_epoch=np.ceil(X_train_cv.shape[0] / batch_size),
epochs=epochs,
verbose=1,
callbacks=callbacks,
validation_data=gen_val,
validation_steps=np.ceil(X_holdout.shape[0] / batch_size),
)
# Getting the Best Model
model.save_weights(filepath=weight_path[:-4] + "_nostop.hdf5")
model.load_weights(filepath=weight_path)
# Getting Training Score
# score = model.evaluate_generator(generator=gen_trn_eval,
# steps=np.ceil(X_train.shape[0]/batch_size))
# print('Train loss:', score[0])
# print('Train accuracy:', score[1])
# Getting Valid Score
score = model.evaluate_generator(generator=gen_val,
steps=np.ceil(X_holdout.shape[0]/batch_size))
print('Valid loss:', score[0])
print('Valid accuracy:', score[1])
# Getting validation prediction
pred_valid = model.predict_generator(generator=gen_val_pred,
steps=np.ceil(X_holdout.shape[0]/batch_size))
# Getting Test prediction
pred_test = model.predict_generator(generator=gen_tst_pred,
steps=np.ceil(X_test.shape[0]/batch_size))
submission = pd.DataFrame({'id': X_test, 'predict': np.argmax(pred_test, axis=1)})
submit_path = "output/submission" + save_path + "_val_loss" + str(score[0]) + "_val_acc" + str(score[1]) + ".tsv"
submission.to_csv(submit_path, index=False, header=False, sep='\t')
np.save("input/" + base_name + "_valid.npy", pred_valid)
np.save("input/" + base_name + "_test.npy", pred_test)
def main():
train(epochs=250, seed=0)
if __name__ == "__main__": main()
| 34.418018 | 166 | 0.58753 | import numpy as np
np.random.seed(42)
import pandas as pd
from sklearn.model_selection import train_test_split
from matplotlib import pyplot
import time
import os, glob
import cv2
format = "%H%M"
ts = time.strftime(format)
base_name = os.path.splitext(__file__)[0] + "_ts" + ts
input_size = 221
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Input, Flatten, GaussianNoise
from keras.layers import GlobalMaxPooling2D, Reshape, UpSampling3D, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.merge import Concatenate
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, Callback, EarlyStopping, CSVLogger, ReduceLROnPlateau, LearningRateScheduler
from keras import backend as K
def get_callbacks(save_path, lr=0.001, patience=64):
csv_logger = CSVLogger(save_path + '_log.csv', append=True)
check_path = save_path
save_checkpoint = ModelCheckpoint(filepath=check_path, monitor='val_loss', save_best_only=True)
lerning_rate_schedular = ReduceLROnPlateau(patience=8, min_lr=lr * 0.00001)
def lrs(epoch):
if epoch<100:
return 1e-6
elif epoch<200:
return 1e-7
else:
return 1e-8
learning_rate_Schedular = LearningRateScheduler(lambda epoch: lrs(epoch))
early_stopping = EarlyStopping(monitor='val_loss',
patience=16,
verbose=1,
min_delta=1e-4,
mode='min')
Callbacks = [csv_logger,
save_checkpoint,
learning_rate_Schedular,
]
return Callbacks
def swish(x):
return x * K.sigmoid(x)
from keras.applications.vgg16 import VGG16
from keras.applications.densenet import DenseNet121
from keras.optimizers import SGD, Adam
from keras.layers import GlobalAveragePooling2D
def get_model(num_class):
base_model = DenseNet121(weights=None, include_top=False,
input_shape=[input_size,input_size,3], classes=1)
x = base_model.get_layer("bn").output
x = GlobalAveragePooling2D()(x)
predictions = Dense(num_class, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
optimizer = Adam(lr=0.0001)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
return model
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
sat_shift_limit=(-255, 255),
val_shift_limit=(-255, 255), u=0.5):
if np.random.random() < u:
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(image)
hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
h = cv2.add(h, hue_shift)
sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
s = cv2.add(s, sat_shift)
val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
v = cv2.add(v, val_shift)
image = cv2.merge((h, s, v))
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
return image
def randomShiftScaleRotate(image,
shift_limit=(-0.0625, 0.0625),
scale_limit=(-0.1, 0.1),
rotate_limit=(-45, 45), aspect_limit=(0, 0),
borderMode=cv2.BORDER_CONSTANT, u=0.5):
if np.random.random() < u:
height, width, channel = image.shape
angle = np.random.uniform(rotate_limit[0], rotate_limit[1])
scale = np.random.uniform(1 + scale_limit[0], 1 + scale_limit[1])
aspect = np.random.uniform(1 + aspect_limit[0], 1 + aspect_limit[1])
sx = scale * aspect / (aspect ** 0.5)
sy = scale / (aspect ** 0.5)
dx = round(np.random.uniform(shift_limit[0], shift_limit[1]) * width)
dy = round(np.random.uniform(shift_limit[0], shift_limit[1]) * height)
cc = np.math.cos(angle / 180 * np.math.pi) * sx
ss = np.math.sin(angle / 180 * np.math.pi) * sy
rotate_matrix = np.array([[cc, -ss], [ss, cc]])
box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
box1 = box0 - np.array([width / 2, height / 2])
box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx, height / 2 + dy])
box0 = box0.astype(np.float32)
box1 = box1.astype(np.float32)
mat = cv2.getPerspectiveTransform(box0, box1)
image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
borderValue=(
0, 0,
0,))
return image
def randomHorizontalFlip(image, u=0.5):
if np.random.random() < u:
image = cv2.flip(image, 1)
return image
def randomVerticalFlip(image, u=0.5):
if np.random.random() < u:
image = cv2.flip(image, 0)
return image
def get_mixer(p=0.5, s_l=0.02, s_h=0.4, r_1=0.3, r_2=1/0.3):
def mixer(img1, img2, y1, y2):
img_h, img_w, img_c = img1.shape
p_1 = np.random.rand()
if p_1 > p:
return img1, y1
while True:
s = np.random.uniform(s_l, s_h) * img_h * img_w
r = np.random.uniform(r_1, r_2)
w = int(np.sqrt(s / r))
h = int(np.sqrt(s * r))
left = np.random.randint(0, img_w)
top = np.random.randint(0, img_h)
if left + w <= img_w and top + h <= img_h:
break
img1[top:top + h, left:left + w, :] = img2[top:top + h, left:left + w, :]
return img1, mask1
return mixer
def get_random_eraser(p=0.5, s_l=0.02, s_h=0.4, r_1=0.3, r_2=1/0.3, v_l=0, v_h=255, pixel_level=False):
def eraser(input_img):
img_h, img_w, img_c = input_img.shape
p_1 = np.random.rand()
if p_1 > p:
return input_img
while True:
s = np.random.uniform(s_l, s_h) * img_h * img_w
r = np.random.uniform(r_1, r_2)
w = int(np.sqrt(s / r))
h = int(np.sqrt(s * r))
left = np.random.randint(0, img_w)
top = np.random.randint(0, img_h)
if left + w <= img_w and top + h <= img_h:
break
if pixel_level:
c = np.random.uniform(v_l, v_h, (h, w, img_c))
else:
c = np.random.uniform(v_l, v_h)
input_img[top:top + h, left:left + w, :] = c
return input_img
return eraser
from multiprocessing import Pool
def load_img(args):
img_path = args
img = cv2.imread(img_path)
img = cv2.resize(img, (input_size, input_size))
img = randomHueSaturationValue(img,
hue_shift_limit=(-50, 50),
sat_shift_limit=(-5, 5),
val_shift_limit=(-15, 15),
u=0.25)
img = randomShiftScaleRotate(img,
shift_limit=(-0.2, 0.2),
scale_limit=(-0.2, 0.5),
rotate_limit=(-30, 30),
aspect_limit=(-0.2, 0.2),
u=0.25)
img = randomHorizontalFlip(img)
return img
def load_img_valid(args):
img_path = args
img = cv2.imread(img_path)
img = cv2.resize(img, (input_size, input_size))
return img
def train_generator(x_train, y_train, img_dir, batch_size, shuffle=True):
y_train = np.eye(55)[y_train]
batch_index = 0
n = x_train.shape[0]
eraser = get_random_eraser(v_h=0.)
pool = Pool(8)
while 1:
if batch_index == 0:
index_array = np.arange(n)
if shuffle:
index_array = np.random.permutation(n)
current_index = (batch_index * batch_size) % n
if n >= current_index + batch_size:
current_batch_size = batch_size
batch_index += 1
else:
current_batch_size = n - current_index
batch_index = 0
batch_id = index_array[current_index: current_index + current_batch_size]
batch_x = pool.map(load_img,
['{}'.format(x_train[id])
for id in batch_id])
for id in range(len(batch_x)):
img = batch_x[id]
batch_x[id] = img
batch_x = np.array(batch_x, np.float32) / 255
batch_y = y_train[index_array[current_index: current_index + current_batch_size]]
yield (batch_x, batch_y)
def valid_generator(x_train, y_train, img_dir, batch_size, shuffle=True):
y_train = np.eye(55)[y_train]
batch_index = 0
n = x_train.shape[0]
eraser = get_random_eraser(v_h=0.)
pool = Pool(4)
while 1:
if batch_index == 0:
index_array = np.arange(n)
if shuffle:
index_array = np.random.permutation(n)
current_index = (batch_index * batch_size) % n
if n >= current_index + batch_size:
current_batch_size = batch_size
batch_index += 1
else:
current_batch_size = n - current_index
batch_index = 0
batch_id = index_array[current_index: current_index + current_batch_size]
batch_x = pool.map(load_img_valid,
['{}'.format(x_train[id])
for id in batch_id])
for id in range(len(batch_x)):
img = batch_x[id]
batch_x[id] = img
batch_x = np.array(batch_x, np.float32) / 255
batch_y = y_train[index_array[current_index: current_index + current_batch_size]]
yield (batch_x, batch_y)
def get_mixer(p=1, s_l=0.02, s_h=0.4, r_1=0.3, r_2=1/0.3):
def mixer(img1, img2, y1, y2):
img_h, img_w, img_c = img1.shape
p_1 = np.random.rand()
if p_1 > p:
return img1, y1
while True:
s = np.random.uniform(s_l, s_h) * img_h * img_w
r = np.random.uniform(r_1, r_2)
w = int(np.sqrt(s / r))
h = int(np.sqrt(s * r))
left = np.random.randint(0, img_w)
top = np.random.randint(0, img_h)
if left + w <= img_w and top + h <= img_h:
break
img1[top:top + h, left:left + w, :] = img2[top:top + h, left:left + w, :]
y = (1- h/img_h*w/img_w) * y1 + h/img_h*w/img_w * y2
return img1, y
return mixer
def mix_generator(X_train, Y_train, img_dir, batch_size, shuffle=True):
alpha = 1
gen1 = train_generator(X_train, Y_train, img_dir, batch_size, shuffle)
gen2 = train_generator(X_train, Y_train, img_dir, batch_size, shuffle)
while True:
batch1 = next(gen1)
batch2 = next(gen2)
current_batch_size = batch1[0].shape[0]
l = np.random.beta(alpha, alpha, current_batch_size)
X_l = l.reshape(current_batch_size, 1, 1, 1)
Y_l = l.reshape(current_batch_size, 1)
batch_x = batch1[0] * X_l + batch2[0] * (1 - X_l)
batch_y = batch1[1] * Y_l + batch2[1] * (1 - Y_l)
yield (batch_x, batch_y)
def mix_generator2(X_train, Y_train, img_dir, batch_size, shuffle=True):
alpha = 0.2
gen1 = mix_generator(X_train, Y_train, img_dir, batch_size, shuffle)
gen2 = mix_generator(X_train, Y_train, img_dir, batch_size, shuffle)
mixer =get_mixer()
while True:
batch1 = next(gen1)
batch2 = next(gen2)
batch_x = []
batch_y = []
for i in range(batch1[0].shape[0]):
x1, y1 = batch1[0][i], batch1[1][i]
x2, y2 = batch2[0][i], batch2[1][i]
new_x, new_y = mixer(x1, x2, y1, y2)
batch_x.append(new_x)
batch_y.append(new_y)
batch_x = np.array(batch_x)
batch_y = np.array(batch_y)
batch = (batch_x, batch_y)
yield batch
def test_generator(x_train, img_dir, batch_size, shuffle=True):
batch_index = 0
n = x_train.shape[0]
eraser = get_random_eraser(v_h=0.)
while 1:
if batch_index == 0:
index_array = np.arange(n)
if shuffle:
index_array = np.random.permutation(n)
current_index = (batch_index * batch_size) % n
if n >= current_index + batch_size:
current_batch_size = batch_size
batch_index += 1
else:
current_batch_size = n - current_index
batch_index = 0
batch_x = []
batch_id = index_array[current_index: current_index + current_batch_size]
for id in batch_id:
img = cv2.imread('{}'.format(x_train[id]))
img = cv2.resize(img, (input_size, input_size))
batch_x.append(img)
batch_x = np.array(batch_x, np.float32) / 255
yield batch_x
def load_data(train_path="input/train_master.tsv", test_path="input/sample_submit.tsv"):
train = pd.read_csv(train_path, delimiter="\t", index_col=False)
test = pd.read_csv(test_path, delimiter="\t", index_col=False, header=None)
print("train shape", train.shape)
print(train.head())
X_train = train['file_name'].as_matrix()
y_train = train['category_id'].as_matrix()
X_test = test.iloc[:,0]
return X_train, y_train, X_test
from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit
from sklearn.metrics import log_loss
def train(epochs, seed):
batch_size = 64
num_class = 55
save_path = base_name
model_path = "_"
X_train, y_train, X_test = load_data()
y_test = pd.read_csv("output/submissionevaluate_180210_1_Dense_5th_training_ts1213_seed0testaugx8rot90_val_loss0.23128337343533834_val_acc0.9183333333333333.tsv",
delimiter="\t", index_col=False, header=None)
y_test = y_test.as_matrix()[:,1].astype(np.uint8)
X_train = "input/train/" + X_train
X_test = "input/test/" + X_test
print(X_train[:10])
print(X_train.shape, y_train.shape)
print(X_test.shape, y_test.shape)
ids_train_split, ids_valid_split = train_test_split(np.arange(X_train.shape[0]),
random_state=42, test_size=0.05,
stratify=y_train)
X_train_cv = X_train[ids_train_split]
y_train_cv = y_train[ids_train_split]
X_train_cv = np.concatenate([X_train_cv, X_test])
y_train_cv = np.concatenate([y_train_cv, y_test])
X_holdout = X_train[ids_valid_split]
Y_holdout = y_train[ids_valid_split]
weight_path = "model/" + save_path + '.hdf5'
callbacks = get_callbacks(weight_path, patience=16)
gen = train_generator(X_train_cv, y_train_cv, "input/train", batch_size)
gen_val = valid_generator(X_holdout, Y_holdout, "input/train", batch_size, shuffle=False)
gen_val_pred = test_generator(X_holdout, "input/train", batch_size, shuffle=False)
gen_tst_pred = test_generator(X_test, "input/test", batch_size, shuffle=False)
model = get_model(num_class)
model.load_weights(filepath="model/train_180201_2_Dense_4th_training_ts2017.hdf5")
model.fit_generator(generator=gen,
steps_per_epoch=np.ceil(X_train_cv.shape[0] / batch_size),
epochs=epochs,
verbose=1,
callbacks=callbacks,
validation_data=gen_val,
validation_steps=np.ceil(X_holdout.shape[0] / batch_size),
)
model.save_weights(filepath=weight_path[:-4] + "_nostop.hdf5")
model.load_weights(filepath=weight_path)
score = model.evaluate_generator(generator=gen_val,
steps=np.ceil(X_holdout.shape[0]/batch_size))
print('Valid loss:', score[0])
print('Valid accuracy:', score[1])
pred_valid = model.predict_generator(generator=gen_val_pred,
steps=np.ceil(X_holdout.shape[0]/batch_size))
pred_test = model.predict_generator(generator=gen_tst_pred,
steps=np.ceil(X_test.shape[0]/batch_size))
submission = pd.DataFrame({'id': X_test, 'predict': np.argmax(pred_test, axis=1)})
submit_path = "output/submission" + save_path + "_val_loss" + str(score[0]) + "_val_acc" + str(score[1]) + ".tsv"
submission.to_csv(submit_path, index=False, header=False, sep='\t')
np.save("input/" + base_name + "_valid.npy", pred_valid)
np.save("input/" + base_name + "_test.npy", pred_test)
def main():
train(epochs=250, seed=0)
if __name__ == "__main__": main()
| true | true |
1c388762cb270cccc694c3cc35eec77a5c2a4523 | 8,129 | py | Python | opencood/loss/fpvrcnn_loss.py | YuanYunshuang/OpenCOOD | 98e07eb45f7fdcd32518b2cf8f9052f73ca80bec | [
"Apache-2.0"
] | null | null | null | opencood/loss/fpvrcnn_loss.py | YuanYunshuang/OpenCOOD | 98e07eb45f7fdcd32518b2cf8f9052f73ca80bec | [
"Apache-2.0"
] | null | null | null | opencood/loss/fpvrcnn_loss.py | YuanYunshuang/OpenCOOD | 98e07eb45f7fdcd32518b2cf8f9052f73ca80bec | [
"Apache-2.0"
] | null | null | null | import torch
from torch import nn
import numpy as np
from opencood.loss.ciassd_loss import CiassdLoss, weighted_smooth_l1_loss
class FpvrcnnLoss(nn.Module):
def __init__(self, args):
super(FpvrcnnLoss, self).__init__()
self.ciassd_loss = CiassdLoss(args['stage1'])
self.cls = args['stage2']['cls']
self.reg = args['stage2']['reg']
self.iou = args['stage2']['iou']
self.loss_dict = {}
def forward(self, output_dict, label_dict):
"""
Parameters
----------
output_dict : dict
target_dict : dict
"""
ciassd_loss = self.ciassd_loss(output_dict, label_dict)
# only update ciassd if no bbox is detected in the first stage
if 'fpvrcnn_out' not in output_dict:
self.loss_dict.update({
'loss': ciassd_loss,
'rcnn_loss': torch.Tensor([0]),
'cls_loss': torch.Tensor([0]),
'iou_loss': torch.Tensor([0]),
'reg_loss': torch.Tensor([0]),
})
return ciassd_loss
# rcnn out
rcnn_cls = output_dict['fpvrcnn_out']['rcnn_cls'].view(1, -1, 1)
rcnn_iou = output_dict['fpvrcnn_out']['rcnn_iou'].view(1, -1, 1)
rcnn_reg = output_dict['fpvrcnn_out']['rcnn_reg'].view(1, -1, 7)
tgt_cls = output_dict['rcnn_label_dict']['cls_tgt'].view(1, -1, 1)
tgt_iou = output_dict['rcnn_label_dict']['iou_tgt'].view(1, -1, 1)
tgt_reg = output_dict['rcnn_label_dict']['reg_tgt'].view(1, -1, 7)
pos_norm = tgt_cls.sum()
# cls loss
loss_cls = weighted_sigmoid_binary_cross_entropy(rcnn_cls, tgt_cls)
# iou loss
# TODO: also count the negative samples
loss_iou = weighted_smooth_l1_loss(rcnn_iou, tgt_iou,
weights=tgt_cls).mean()
# regression loss
# Target resampling : Generate a weights mask to force the regressor concentrate on low iou predictions
# sample 50% with iou>0.7 and 50% < 0.7
weights = torch.ones(tgt_iou.shape, device=tgt_iou.device)
weights[tgt_cls == 0] = 0
neg = torch.logical_and(tgt_iou < 0.7, tgt_cls != 0)
pos = torch.logical_and(tgt_iou >= 0.7, tgt_cls != 0)
num_neg = int(neg.sum(dim=1))
num_pos = int(pos.sum(dim=1))
num_pos_smps = max(num_neg, 2)
pos_indices = torch.where(pos)[1]
not_selsected = torch.randperm(num_pos)[:num_pos - num_pos_smps]
# not_selsected_indices = pos_indices[not_selsected]
weights[:, pos_indices[not_selsected]] = 0
loss_reg = weighted_smooth_l1_loss(rcnn_reg, tgt_reg,
weights=weights / max(weights.sum(),
1)).sum()
loss_cls_reduced = loss_cls * self.cls['weight']
loss_iou_reduced = loss_iou * self.iou['weight']
loss_reg_reduced = loss_reg * self.reg['weight']
# if torch.isnan(loss_reg_reduced):
# print('debug')
rcnn_loss = loss_cls_reduced + loss_iou_reduced + loss_reg_reduced
loss = rcnn_loss + ciassd_loss
self.loss_dict.update({
'loss': loss,
'rcnn_loss': rcnn_loss,
'cls_loss': loss_cls_reduced,
'iou_loss': loss_iou_reduced,
'reg_loss': loss_reg_reduced,
})
return loss
def logging(self, epoch, batch_id, batch_len, writer):
"""
Print out the loss function for current iteration.
Parameters
----------
epoch : int
Current epoch for training.
batch_id : int
The current batch.
batch_len : int
Total batch length in one iteration of training,
writer : SummaryWriter
Used to visualize on tensorboard
"""
ciassd_loss_dict = self.ciassd_loss.loss_dict
ciassd_total_loss = ciassd_loss_dict['total_loss']
reg_loss = ciassd_loss_dict['reg_loss']
cls_loss = ciassd_loss_dict['cls_loss']
dir_loss = ciassd_loss_dict['dir_loss']
iou_loss = ciassd_loss_dict['iou_loss']
if (batch_id + 1) % 10 == 0:
print(
"[epoch %d][%d/%d], || Loss: %.4f || Ciassd: %.4f || Rcnn: %.4f "
"|| Cls1: %.4f || Loc1: %.4f || Dir1: %.4f || Iou1: %.4f "
"|| Cls2: %.4f || Loc2: %.4f || Iou2: %.4f" % (
epoch, batch_id + 1, batch_len, self.loss_dict['loss'],
ciassd_total_loss.item(), self.loss_dict['rcnn_loss'],
cls_loss.item(), reg_loss.item(), dir_loss.item(),
iou_loss.item(),
self.loss_dict['cls_loss'].item(),
self.loss_dict['reg_loss'].item(),
self.loss_dict['iou_loss'].item(),
))
writer.add_scalar('Ciassd_regression_loss', reg_loss.item(),
epoch * batch_len + batch_id)
writer.add_scalar('Ciassd_Confidence_loss', cls_loss.item(),
epoch * batch_len + batch_id)
writer.add_scalar('Ciassd_Direction_loss', dir_loss.item(),
epoch * batch_len + batch_id)
writer.add_scalar('Ciassd_Iou_loss', iou_loss.item(),
epoch * batch_len + batch_id)
writer.add_scalar('Ciassd_loss', ciassd_total_loss.item(),
epoch * batch_len + batch_id)
if not self.loss_dict['loss'].item() == 0:
writer.add_scalar('Rcnn_regression_loss',
self.loss_dict['reg_loss'].item(),
epoch * batch_len + batch_id)
writer.add_scalar('Rcnn_Confidence_loss',
self.loss_dict['cls_loss'].item(),
epoch * batch_len + batch_id)
writer.add_scalar('Rcnn_Iou_loss',
self.loss_dict['iou_loss'].item(),
epoch * batch_len + batch_id)
writer.add_scalar('Rcnn_loss', self.loss_dict['rcnn_loss'].item(),
epoch * batch_len + batch_id)
writer.add_scalar('Total_loss', self.loss_dict['loss'].item(),
epoch * batch_len + batch_id)
def weighted_sigmoid_binary_cross_entropy(preds, tgts, weights=None,
class_indices=None):
if weights is not None:
weights = weights.unsqueeze(-1)
if class_indices is not None:
weights *= (
indices_to_dense_vector(class_indices, preds.shape[2])
.view(1, 1, -1)
.type_as(preds)
)
per_entry_cross_ent = nn.functional.binary_cross_entropy_with_logits(preds,
tgts,
weights)
return per_entry_cross_ent
def indices_to_dense_vector(
indices, size, indices_value=1.0, default_value=0, dtype=np.float32
):
"""Creates dense vector with indices set to specific value and rest to zeros.
This function exists because it is unclear if it is safe to use
tf.sparse_to_dense(indices, [size], 1, validate_indices=False)
with indices which are not ordered.
This function accepts a dynamic size (e.g. tf.shape(tensor)[0])
Args:
indices: 1d Tensor with integer indices which are to be set to
indices_values.
size: scalar with size (integer) of output Tensor.
indices_value: values of elements specified by indices in the output vector
default_value: values of other elements in the output vector.
dtype: data type.
Returns:
dense 1D Tensor of shape [size] with indices set to indices_values and the
rest set to default_value.
"""
dense = torch.zeros(size).fill_(default_value)
dense[indices] = indices_value
return dense
| 41.47449 | 111 | 0.559847 | import torch
from torch import nn
import numpy as np
from opencood.loss.ciassd_loss import CiassdLoss, weighted_smooth_l1_loss
class FpvrcnnLoss(nn.Module):
def __init__(self, args):
super(FpvrcnnLoss, self).__init__()
self.ciassd_loss = CiassdLoss(args['stage1'])
self.cls = args['stage2']['cls']
self.reg = args['stage2']['reg']
self.iou = args['stage2']['iou']
self.loss_dict = {}
def forward(self, output_dict, label_dict):
ciassd_loss = self.ciassd_loss(output_dict, label_dict)
if 'fpvrcnn_out' not in output_dict:
self.loss_dict.update({
'loss': ciassd_loss,
'rcnn_loss': torch.Tensor([0]),
'cls_loss': torch.Tensor([0]),
'iou_loss': torch.Tensor([0]),
'reg_loss': torch.Tensor([0]),
})
return ciassd_loss
rcnn_cls = output_dict['fpvrcnn_out']['rcnn_cls'].view(1, -1, 1)
rcnn_iou = output_dict['fpvrcnn_out']['rcnn_iou'].view(1, -1, 1)
rcnn_reg = output_dict['fpvrcnn_out']['rcnn_reg'].view(1, -1, 7)
tgt_cls = output_dict['rcnn_label_dict']['cls_tgt'].view(1, -1, 1)
tgt_iou = output_dict['rcnn_label_dict']['iou_tgt'].view(1, -1, 1)
tgt_reg = output_dict['rcnn_label_dict']['reg_tgt'].view(1, -1, 7)
pos_norm = tgt_cls.sum()
loss_cls = weighted_sigmoid_binary_cross_entropy(rcnn_cls, tgt_cls)
loss_iou = weighted_smooth_l1_loss(rcnn_iou, tgt_iou,
weights=tgt_cls).mean()
weights = torch.ones(tgt_iou.shape, device=tgt_iou.device)
weights[tgt_cls == 0] = 0
neg = torch.logical_and(tgt_iou < 0.7, tgt_cls != 0)
pos = torch.logical_and(tgt_iou >= 0.7, tgt_cls != 0)
num_neg = int(neg.sum(dim=1))
num_pos = int(pos.sum(dim=1))
num_pos_smps = max(num_neg, 2)
pos_indices = torch.where(pos)[1]
not_selsected = torch.randperm(num_pos)[:num_pos - num_pos_smps]
weights[:, pos_indices[not_selsected]] = 0
loss_reg = weighted_smooth_l1_loss(rcnn_reg, tgt_reg,
weights=weights / max(weights.sum(),
1)).sum()
loss_cls_reduced = loss_cls * self.cls['weight']
loss_iou_reduced = loss_iou * self.iou['weight']
loss_reg_reduced = loss_reg * self.reg['weight']
rcnn_loss = loss_cls_reduced + loss_iou_reduced + loss_reg_reduced
loss = rcnn_loss + ciassd_loss
self.loss_dict.update({
'loss': loss,
'rcnn_loss': rcnn_loss,
'cls_loss': loss_cls_reduced,
'iou_loss': loss_iou_reduced,
'reg_loss': loss_reg_reduced,
})
return loss
def logging(self, epoch, batch_id, batch_len, writer):
ciassd_loss_dict = self.ciassd_loss.loss_dict
ciassd_total_loss = ciassd_loss_dict['total_loss']
reg_loss = ciassd_loss_dict['reg_loss']
cls_loss = ciassd_loss_dict['cls_loss']
dir_loss = ciassd_loss_dict['dir_loss']
iou_loss = ciassd_loss_dict['iou_loss']
if (batch_id + 1) % 10 == 0:
print(
"[epoch %d][%d/%d], || Loss: %.4f || Ciassd: %.4f || Rcnn: %.4f "
"|| Cls1: %.4f || Loc1: %.4f || Dir1: %.4f || Iou1: %.4f "
"|| Cls2: %.4f || Loc2: %.4f || Iou2: %.4f" % (
epoch, batch_id + 1, batch_len, self.loss_dict['loss'],
ciassd_total_loss.item(), self.loss_dict['rcnn_loss'],
cls_loss.item(), reg_loss.item(), dir_loss.item(),
iou_loss.item(),
self.loss_dict['cls_loss'].item(),
self.loss_dict['reg_loss'].item(),
self.loss_dict['iou_loss'].item(),
))
writer.add_scalar('Ciassd_regression_loss', reg_loss.item(),
epoch * batch_len + batch_id)
writer.add_scalar('Ciassd_Confidence_loss', cls_loss.item(),
epoch * batch_len + batch_id)
writer.add_scalar('Ciassd_Direction_loss', dir_loss.item(),
epoch * batch_len + batch_id)
writer.add_scalar('Ciassd_Iou_loss', iou_loss.item(),
epoch * batch_len + batch_id)
writer.add_scalar('Ciassd_loss', ciassd_total_loss.item(),
epoch * batch_len + batch_id)
if not self.loss_dict['loss'].item() == 0:
writer.add_scalar('Rcnn_regression_loss',
self.loss_dict['reg_loss'].item(),
epoch * batch_len + batch_id)
writer.add_scalar('Rcnn_Confidence_loss',
self.loss_dict['cls_loss'].item(),
epoch * batch_len + batch_id)
writer.add_scalar('Rcnn_Iou_loss',
self.loss_dict['iou_loss'].item(),
epoch * batch_len + batch_id)
writer.add_scalar('Rcnn_loss', self.loss_dict['rcnn_loss'].item(),
epoch * batch_len + batch_id)
writer.add_scalar('Total_loss', self.loss_dict['loss'].item(),
epoch * batch_len + batch_id)
def weighted_sigmoid_binary_cross_entropy(preds, tgts, weights=None,
class_indices=None):
if weights is not None:
weights = weights.unsqueeze(-1)
if class_indices is not None:
weights *= (
indices_to_dense_vector(class_indices, preds.shape[2])
.view(1, 1, -1)
.type_as(preds)
)
per_entry_cross_ent = nn.functional.binary_cross_entropy_with_logits(preds,
tgts,
weights)
return per_entry_cross_ent
def indices_to_dense_vector(
indices, size, indices_value=1.0, default_value=0, dtype=np.float32
):
dense = torch.zeros(size).fill_(default_value)
dense[indices] = indices_value
return dense
| true | true |
1c38885b0dc3ff66a8b79b9298a5f9ff6c7bc340 | 48,337 | py | Python | pythoms/mzml.py | flowerah/PythoMS | 7d500f20219157657023c8c0a930f580d3768191 | [
"MIT"
] | null | null | null | pythoms/mzml.py | flowerah/PythoMS | 7d500f20219157657023c8c0a930f580d3768191 | [
"MIT"
] | null | null | null | pythoms/mzml.py | flowerah/PythoMS | 7d500f20219157657023c8c0a930f580d3768191 | [
"MIT"
] | null | null | null | """
IGNORE:
CHANGELOG:
-
---2.7 building
to add:
try to extract timepoints and tic from chromatogramList (x values are sorted, so this probably won't work)
IGNORE
"""
import sys
import os
import zlib
import gzip
import base64
import struct
import subprocess
import xml.dom.minidom
import scipy as sci
from random import random
from .progress import Progress
from .spectrum import Spectrum
from .psims import CVParameterSet, stringtodigit
from .tome import resolution, locate_in_list, trimspectrum
# decoding formats for decoding mzML binary data array strings
decode_formats = {
'MS:1000519': ['<', 'i'], # signed 32-bit little-endian integer
# 'MS:1000520':['',''], # [OBSOLETE] Signed 16-bit float
'MS:1000521': ['<', 'f'], # 32-bit precision little-endian floating point conforming to IEEE-754
'MS:1000522': ['<', 'l'], # Signed 64-bit little-endian integer
'MS:1000523': ['<', 'd'], # 64-bit precision little-endian floating point conforming to IEEE-754.
}
class BoundsError(Warning):
"""A warning class to handle bounds errors when integrating (used only by PyRSIR)"""
def __init__(self):
self.warned = {}
def printwarns(self):
"""prints the number of warnings if merited"""
if len(self.warned) > 0:
sys.stdout.write('The following peaks exceeded the bounds of the spectrum n number of times:\n')
for name in self.warned:
sys.stdout.write('"%s": %d\n' % (name, self.warned[name]))
def warn(self, name, intstart, intend, mzstart, mzend):
"""warns the user if there was a mismatch"""
if name not in self.warned:
sys.stdout.write(
'\nThe peak "%s" (%s-%s) is outside of the bounds of the spectrum being summed m/z %.1f-%.1f\n' % (
name, str(intstart), str(intend), mzstart, mzend))
self.warned[name] = 1
else:
self.warned[name] += 1
def branch_attributes(branch: xml.dom.minidom.Element):
"""
Pulls all the attributes of an xml.dom.minidom xml branch.
These are generally things like index, id, etc.
:param xml.dom.minidom branch: An xml.dom.minidom object.
:return: A dictionary of attributes with each key being the attribute name and its value being the value of that
attribute.
:rtype: dict
**Notes**
The script will attempt to convert any values to float or
integer in order to reduce TypeErrors when trying to use
the extracted values.
"""
return {key: stringtodigit(val) for key, val in branch.attributes.items()}
def branch_cvparams(branch):
"""
Interprets an xml branch as CVParams
:param branch:
:return: controlled value parameter set with values
:rtype: CVParameterSet
"""
out = {}
for cvParam in branch.getElementsByTagName('cvParam'):
acc = cvParam.getAttribute('accession') # accession key
out[acc] = {}
for attribute, value in cvParam.attributes.items(): # pull all the attributes
if attribute != 'accession':
# attempt to convert to integer or float, keep as string otherwise
out[acc][attribute] = stringtodigit(value)
return CVParameterSet(**out)
def file_present(filepath):
"""checks for the presence of the specified file or directory in the current working directory"""
tf = os.path.isfile(filepath) # look for file first
if tf is False: # if file cannot be found, look for directory
tf = os.path.isdir(filepath)
return tf
def decodeformat(p: CVParameterSet, speclen: int):
"""
Determines the decode format from the accession parameter
:param p: extracted CVParamterSet of the data array
:param speclen: length of the spectrum (retrievable from the XML file)
:return: decode format
:rtype: str
"""
for key in set(decode_formats) & p.keys(): # find the set combination of the possibilities
return f'{decode_formats[key][0]}{speclen}{decode_formats[key][1]}' # create the decode format
def gettext(nodelist):
"""gets text from a simple XML object"""
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def extract_spectrum(spectrum: xml.dom.minidom.Element, units: bool = False):
"""
Extracts and converts binary data to two lists.
:param spectrum: A spectrum branch element. This element is expected to have two child nodes containing
binaryDataArrays.
:param units: whether to extract the units from the spectrum
:return:
"""
"""pulls and converts binary data to a list"""
# spectrum length (defined in the spectrum attricubes)
speclen = int(spectrum.getAttribute('defaultArrayLength'))
out = []
if units is True:
units = []
for binary in spectrum.getElementsByTagName('binaryDataArray'):
p = branch_cvparams(binary) # grab cvparameters
# determine whether the binary string is zlib compressed
compressed = True if 'MS:1000574' in p else False
# determine unpack format
unpack_format = decodeformat(p, speclen)
# pull the binary string
string = gettext(binary.getElementsByTagName('binary')[0].childNodes)
# decode the string
decoded = base64.standard_b64decode(string)
# if the string is compressed, decompress
if compressed is True:
decoded = zlib.decompress(decoded)
# unpack the string
out.append(list(struct.unpack(unpack_format, decoded)))
if units is not False:
for cv in p:
if cv.unit is not None:
units.append(cv.unit)
break
if units is not False: # extends the units onto out
out.extend(units)
return out
def pw_convert(filename, bit=64, compression=True, gzip=True, verbose=True):
"""
Runs msconvert.exe from ProteoWizard to convert Waters .RAW format to .mzXML
which can then be parsed by python.
module requirements: os, subprocess, sys
ProteoWizard must be installed for this script to function.
go to
http://proteowizard.sourceforge.net/downloads.shtml
to download
This script assumes that the ProteoWizard is installed under either
c:\program files\proteowizard
or
c:\program files (x86)\proteowizard
If you use this python script to convert to mzML, you should cite the paper of the folks who wrote the program
Chambers, M.C. Nature Biotechnology 2012, 30, 918-920
doi 10.1038/nbt.2377
"""
def find_all(fname, path):
"""
Finds all files of a given name within a specified directory.
Adapted from http://stackoverflow.com/questions/1724693/find-a-file-in-python
Module dependancies: os
"""
locations = []
for root, dirs, files in os.walk(path):
if fname in files:
locations.append(os.path.join(root, fname))
return locations
if sys.platform != 'win32':
raise OSError(
'The function that converts to mzML is limited to Windows operating systems.\n'
'You can manually convert to *.mzML using the proteowizard standalone package '
'and supply that mzML file to this script')
locs = []
for val in ['c:\\program files\\proteowizard',
'c:\\program files (x86)\\proteowizard']: # searches for msconvert.exe in expected folders
locs.extend(find_all('msconvert.exe', val))
if len(locs) == 0: # if script cannot find msconvert.exe
raise IOError(
'The python script could not find msconvert.exe\n'
'Please ensure that ProteoWizard is installed in either:\n'
'c:\\program files\\proteowizard\nor\nc:\\program files (x86)\\proteowizard')
outname = filename[:-4] + '.mzML'
callstring = locs[-1] + ' "' + filename + '" --mzML'
if bit in [32, 64]:
callstring += ' --' + str(bit)
else:
raise ValueError(
'ProteoWizard conversion was called with an invalid floating point precision "%s".' % str(bit))
if compression is True: # call for compression
callstring += ' --zlib'
exten = '*.mzML'
if gzip is True: # call to gzip entire mzml
callstring += ' --gzip'
outname += '.gz'
exten += '.gz'
print('callstring', callstring)
if verbose is True:
callstring += ' --verbose'
sys.stdout.write('Generating %s file from %s' % (exten, filename))
sys.stdout.flush()
subprocess.call(callstring)
sys.stdout.write(' DONE\n')
sys.stdout.flush()
else:
subprocess.call(callstring)
return outname
def fix_extension(fn):
"""tries to fix invalid file extensions"""
oopsx = {'.mzm': 'l', '.mz': 'ml', '.m': 'zml', '.': 'mzml'} # incomplete mzml extensions
oopsr = {'.ra': 'w', '.r': 'aw', '.': 'raw'} # incomplete raw extionsions
oopsg = {'.mzml.g': 'z', '.mzml.': 'gz', '.mzml': '.gz', '.mzm': 'l.gz', '.mz': 'ml.gz', '.m': 'zml.gz',
'.': 'mzml.gz'} # incomplete gz extensions
# looks for missing extensions first
if file_present(fn + '.mzml.gz') is True:
return fn + '.mzml.gz'
if file_present(fn + '.mzml') is True:
return fn + '.mzml'
for key in oopsg: # tries to complete mzml.gz shortenings
if fn.lower().endswith(key) is True:
if file_present(fn + oopsg[key]) is True:
return fn + oopsg[key]
for key in oopsx: # tries to complete mzml shortenings
if fn.lower().endswith(key) is True:
if file_present(fn + oopsx[key]) is True:
return fn + oopsx[key]
for key in oopsr: # tries to complete raw shortenings
if fn.lower().endswith(key) is True:
if file_present(fn + oopsr[key]) is True:
return fn + oopsr[key]
if file_present(fn + '.raw') is True: # finally looks for raw file
return fn + '.raw'
raise FileNotFoundError(f'The file {fn} could not be located in the current working directory')
def fps(branch):
"""
extracts function #, process #, and scan # from the idstring of a spectrum branch
returns function, process, scan as integers
"""
idstring = branch.getAttribute('id').split() # pull id string from scan attribute
return [int(x.split('=')[1]) for x in idstring] # return each value after converting to integer
def scan_properties(hand):
"""determines the scan properties of the provided spectrum"""
mstypes = { # ms accession keys and their respective names (for spectrum identification)
'MS:1000928': 'calibration spectrum',
'MS:1000294': 'mass spectrum',
'MS:1000322': 'charge inversion mass spectrum',
'MS:1000325': 'constant neutral gain spectrum',
'MS:1000326': 'constant neutral loss spectrum',
'MS:1000328': 'e/2 mass spectrum',
'MS:1000341': 'precursor ion spectrum',
'MS:1000343': 'product ion spectrum',
'MS:1000579': 'MS1 spectrum',
'MS:1000580': 'MSn spectrum',
'MS:1000581': 'CRM spectrum',
'MS:1000582': 'SIM spectrum',
'MS:1000583': 'SRM spectrum',
}
othertypes = { # other accession keys (non-MS)
'MS:1000620': 'PDA spectrum',
'MS:1000804': 'electromagnetic radiation spectrum',
'MS:1000805': 'emission spectrum',
'MS:1000806': 'absorption spectrum',
}
out = {}
if isinstance(hand, CVParameterSet): # handed a cvparam class object (expected)
p = hand
else: # handed a tree or branch (generate the cvparam class object)
p = CVParameterSet(hand)
for acc in p.keys() & mstypes.keys(): # check for ms spectrum
out['acc'] = acc # accession code
out['name'] = mstypes[acc] # name of spectrum
out['type'] = 'MS' # it is a mass spectrum
out['level'] = p['MS:1000511'].value # ms level
out['window'] = [p['MS:1000501'].value, p['MS:1000500'].value] # scan window
if 'MS:1000129' in p: # negative scan
out['mode'] = '-'
elif 'MS:1000130' in p: # positive scan
out['mode'] = '+'
if 'MS:1000827' in p: # if there is an isolation window target m/z
out['target'] = p['MS:1000827'].value
# if MSn > 2, not sure how to handle this (will have to be hard coded later as I have no examples)
elif out['level'] > 2:
raise ValueError(
'This script has not been coded to handle MSn > 2, please contact the author of the class')
return out
for acc in p.keys() & othertypes.keys(): # if the scan is something else
out['acc'] = acc # accession code
out['name'] = othertypes[acc] # name of spectrum
if 'MS:1000804' in p: # if it is a UV-Vis
out['type'] = 'UV'
else: # other other type (not handled by script)
raise KeyError(
'The script has not been coded to handle spectra types other than MS and UV-Vis. '
'Please contact the authors to get this functionality included.')
return out
class mzML(object):
def __init__(self,
filename: str,
verbose: bool = True,
precision: int = 64,
compression: bool = True,
gzip_file: bool = True,
obo: str = None,
ftt: bool = False,
**kwargs
):
"""
A class for loading and extracting data from an mzML file.
:param str filename: The name of the mzML or mass spectrometric data file. Accepted file types are listed below,
and this script can automatically convert some proprietary file types to mzML by calling ProteoWizard
(see notes).
:param bool verbose: Chatty enable or disable. It can be useful to enable this when processing large files or long
acquisitions, as many of the methods have progress reporters.
:param int precision: The floating point precision to use if converting to mzML. Default 64 (although this
appears to have a minimal effect in the experience of the author). This can be set to 32 to decrease mzML
file sizes.
:param bool compression: Whether or not to compress the mzML files when converting. This can decrease file
sizes at a slight cost in processing time.
:param bool gzip: Whether or not to gzip the mzML files when converting. This substantially decreases file
sizes (mass spectrometric data compresses very well when gzipped). This will slightly increase processing
time.
:param str obo: A specific path or URL to an *.obo file defining the accession keys used in mzML files. If this
is not specified, the default accession URL will be used to download the required obo file. This should not
be necessary normally, as most of the commonly encountered accession keys are hard-coded into this
script. The script will raise an error if it encounters an undefined accession key.
:param bool ftt: Whether to run the function_timetic() method on initialization. This is useful if you require
access to the total ion current and time lists for each function in the mzML file. This does increase file
load times quite significantly (~6x slower).
**Notes**
An mzML file is a data format for mass spectrometric data which can be parsed by python (avoiding the pitfalls
associated with the proprietary files usually generated by the mass spectrometers themselves). The mzML file
structures are expected to conform to those outlined in the HUPO Proteomics Standards Working Group. More
information can be found at https://raw.githubusercontent.com/HUPO-PSI/psi-ms-CV/master/psi-ms.obo
If you wish to use the format conversion functionality of this script, you will need to download and install
ProteoWizard, which can be found at http://proteowizard.sourceforge.net/
"""
# store keyword settings
self.verbose = verbose
self.precision = precision
self.compression = compression
self.gzip_file = gzip_file
self.obo = obo
self.filename = self.check_for_file(filename)
# load file and determine key properties
if self.verbose is True:
# todo why is this not an instantiation
self.Progress = Progress
sys.stdout.write('Loading %s into memory' % self.filename)
sys.stdout.flush()
if self.filename.lower().endswith('.mzml.gz'): # if mzml is gzipped
handle = gzip.open(self.filename) # unzip the file
else:
handle = self.filename
try:
self.tree = xml.dom.minidom.parse(handle) # full mzML file
except:
raise IOError(
'The mzML file "%s" could not be loaded. The file is either unsupported, corrupt, or incomplete.' % self.filename)
self.nscans = int(self.tree.getElementsByTagName('spectrumList')[0].getAttribute('count')) # number of spectra
self.nchroms = int(
self.tree.getElementsByTagName('chromatogramList')[0].getAttribute('count')) # number of chromatograms
self.functions = {}
for spectrum in self.tree.getElementsByTagName('spectrum'):
func, proc, scan = fps(spectrum) # extract each value and convert to integer
if func not in self.functions: # if function is not defined yet
p = branch_cvparams(spectrum) # pull spectrum's cvparameters
self.functions[func] = {
'sr': [int(spectrum.getAttribute('index')), None], # the scan index range that the function spans
'nscans': 1, # number of scans
}
self.functions[func].update(scan_properties(p)) # update with scan properties
else:
self.functions[func]['sr'][1] = int(
spectrum.getAttribute('index')) # otherwise set the scan index range to the current index
self.functions[func]['nscans'] += 1
p = branch_cvparams(spectrum) # pull properties of final spectrum
self.duration = p['MS:1000016'].value # final start scan time
if self.verbose is True:
sys.stdout.write(' DONE\n')
self.BE = BoundsError() # load warning instance for integration
self.ftt = False
if ftt is True:
self.function_timetic()
def __str__(self):
"""The string that is returned when printed"""
return f'{self.__class__.__name__} {self.nscans} spectra, {self.nchroms} chromatograms'
def __repr__(self):
"""The representation that is returned"""
return "%s('%s')" % (self.__class__.__name__, self.filename)
def __len__(self):
return self.nscans
def __getitem__(self, ind):
"""retrieves a scan or summed scans"""
if isinstance(ind, slice): # if getitem is trying to slice
"""
returns the summed scans with the supplied indicies
slice will assume that the intended function is 1
"""
if ind.start is None: # no start
start = 0
else:
start = ind.start
if ind.stop is None: # no stop
stop = self.functions[1]['sr'][1]
else:
stop = ind.stop
return self.sum_scans(start, stop, mute=True)
elif type(ind) is int: # scan index number
"""will return the spectrum of the scan index provided"""
if ind < 0 or ind > self.nscans:
raise IndexError("The scan index number #%d is outside of the mzML's scan index range (0-%d)" % (
ind, self.nscans - 1))
for spectrum in self.tree.getElementsByTagName('spectrum'):
attr = branch_attributes(spectrum)
if attr['index'] == ind:
return extract_spectrum(spectrum)
elif type(ind) is float: # timepoint in function 1
"""float will assume the intended function was 1"""
if ind < 0 or ind > self.duration:
raise ValueError(
"The supplied time %.3f is outside of this file's time range (0 - %.3f)" % (ind, self.duration))
ind = self.scan_index(ind)
for spectrum in self.tree.getElementsByTagName('spectrum'):
attr = branch_attributes(spectrum)
if attr['index'] == ind:
return extract_spectrum(spectrum)
def foreachchrom(self, fn):
"""
a decorator function that will apply the supplied function to every chromatogram in the mzml file
the supplied function will be handed the chromatogram XML object as the first argument
the decorated function will return a list of outputs of the supplied function where each index corresponds to a scan
e.g.::
loaded = mzML(filename)
@loaded.foreachchrom
def do_this(chrom):
# extract the attributes using the mzML.attributes() method
attr = loaded.attributes(chrom)
return attr['id'] # return the name of the chromatogram
do_this()
"""
def foreachchrom(*args, **kwargs):
"""decorates the supplied function to run for every scan"""
prog = Progress(string='Applying function "%s" to chromatogram' % fn.__name__, last=self.nchroms)
out = []
for chromatogram in self.tree.getElementsByTagName('chromatogram'):
if self.verbose is True:
prog.write(int(chromatogram.getAttribute('index')) + 1)
out.append(fn(chromatogram, *args, **kwargs))
if self.verbose is True:
prog.fin()
return out
return foreachchrom
def foreachscan(self, fn):
"""
a decorator function that will apply the supplied function to every spectrum in the mzml file
the supplied function will be handed the spectrum XML object as the first argument
the decorated function will return a list of outputs of the supplied function where each index corresponds to a scan
e.g.::
loaded = mzML(filename)
@loaded.foreachscan
def do_this(scan):
p = loaded.cvparam(scan) # pull spectrum's cvparameters
sst = p['MS:1000016'] # start scan time
x,y = loaded.extract_spectrum(scan,False) # extract the x,y spectrum
# return the start scan time, x list, and y list
return sst,x,y
do_this() # do it
"""
def foreachscan(*args, **kwargs):
"""decorates the supplied function to run for every scan"""
prog = Progress(string='Applying function "%s" to scan' % fn.__name__, last=self.nscans)
out = []
for spectrum in self.tree.getElementsByTagName('spectrum'):
if self.verbose is True:
prog.write(int(spectrum.getAttribute('index')) + 1)
out.append(fn(spectrum, *args, **kwargs))
if self.verbose is True:
prog.fin()
return out
return foreachscan
def associate_to_function(self, affin=None, level=None, dct=None):
"""
Associates a given species to the appropriate function number
in the mzML data file.
**Parameters**
affin: '+', '-', or 'UV'
The affinity of the species. i.e. to positive mode,
negative mode, or UV-Vis spectra respectively.
level: *integer* or None
If the species is found in an MS/MS function,
the MS^n level can be specified here.
dct: *dictionary*
If details are known about the species' affinity,
they can be provided in dictionary format.
Specifically, this function looks for the keys:
'function', 'affin', and 'level'.
**Returns**
function number: *integer*
Returns the appropriate function number in which
the given species should be found.
**Notes**
If nothing is provided to this method, it will return
the integer 1 (assuming that the species will be found
in the first function).
"""
if dct is not None: # if function was handed a dictionary
if 'function' in dct:
return dct['function']
if 'affin' in dct:
affin = dct['affin']
if 'level' in dct:
level = dct['level']
if affin is None and level is None:
return min(self.functions.keys()) # assume first function
elif affin == 'UV': # if UV-Vis affinity
for fn in self.functions: # determine which function is UV-Vis
if self.functions[fn]['acc'] == 'MS:1000804':
return fn
raise ValueError('There is no electromagnetic radiation spectrum function in this mzML file')
elif affin in ['+', '-']: # if affinity to mass spectrum
levelcount = 0 # counter for number of matches to this affinity and level
for fn in self.functions:
if self.functions[fn]['type'] == 'MS': # if fn is ms
if self.functions[fn]['mode'] == affin: # if mode mathes
# if there is no level specified, assume 1
if level is None and self.functions[fn]['level'] == 1:
fnout = fn
levelcount += 1
elif self.functions[fn]['level'] == level: # if level matches
fnout = fn
levelcount += 1
if levelcount > 1:
raise ValueError(
f"There affinity specification of mode: {affin}, level: '{level}' matches more than one function "
f"in the mzML file. \nTo process this species, be more specific in your level specification or "
f"assign it to a specific function number by adding a 'function' key to its dictionary.")
return fnout
else: # if some other affinity
raise ValueError('The specified affinity "%s" is not supported.' % affin)
def auto_resolution(self, n=10, function=None, npeaks=4):
"""
Attempts to automatically determine the resolution of the spectrometer
that the provided mzML data file was recorded on.
The method will find n random samples of the entire spectrum and
calculate the resolution of each of those samples and return the
average resolution.
:param int n: The number of psuedo-random samples of the spectrum to determine
the resolution of. Default 10.
:param int function: The mzML function number to calculate the resolution of. Default 1.
:param int npeaks: number of peaks to to try to find
:return: Estimated resolution of the spectrum
:rtype: float
"""
def findsomepeaks(y):
"""roughly locates 4 peaks by maximum values in the spectrum and returns their index"""
split = int(len(y) / npeaks)
start = 0
end = start + split
splity = []
for i in range(npeaks):
splity.append(sci.asarray(y[start:end]))
start += split
end += split
out = []
for ind, section in enumerate(splity):
maxy = max(section)
if maxy == max(section[1:-1]): # if max is not at the edge of the spectrum
out.append(sci.where(section == maxy)[0][0] + split * ind)
return out
if function is None: # if no function is provided, use first
function = self.associate_to_function()
if self.functions[function]['type'] != 'MS':
raise ValueError(
'The auto_resolution function only operates on mass spectrum functions. '
'Type of specified function %d: %s' % (function, self.functions[function]['type']))
ranges = [] # list of scan intervals
if self.functions[function]['nscans'] <= 20: # if the number of scans is less than 20
ranges = [[1, self.functions[function]['nscans']]]
else:
while len(ranges) < n: # generate 10 pseudo-random intervals to sample
ran = int(random() * self.functions[function]['nscans']) + self.functions[function]['sr'][0]
if ran - 10 >= self.functions[function]['sr'][0] and ran + 10 <= self.functions[function]['sr'][1]:
ranges.append([ran - 10, ran + 10])
if self.verbose is True:
prog = Progress(string='Estimating resolution of the instrument', fraction=False, last=n)
summed = []
for ind, rng in enumerate(ranges):
if self.verbose is True:
prog.write(ind + 1)
summed.append(self.sum_scans(rng[0], rng[1], function, 2, True)) # sum those scans and append output
res = []
for spec in summed: # calculate resolution for each scan range
inds = findsomepeaks(spec[1]) # find some peaks
for ind in inds: # for each of those peaks
res.append(resolution(spec[0], spec[1], ind, threshold=10))
if self.verbose is True:
prog.fin()
res = [y for y in res if y is not None] # removes None values (below S/N)
return sum(res) / len(res) # return average
def check_for_file(self, fn):
"""checks for the mzML file in the working directory and converts it if necessary"""
def version_input(string):
"""checks the python version and uses the appropriate version of user input"""
# if sys.version.startswith('2.7'):
# return raw_input('%s' % string)
if sys.version.startswith('3.'):
return input('%s' % string)
else:
raise EnvironmentError('The version_input method encountered an unsupported version of python.')
valid = [ # supported extensions
'.raw',
'.mzml.gz',
'.mzml',
]
if fn.lower().endswith('.raw') is True: # extension is raw
if file_present(fn[:-4] + '.mzML.gz') is True: # if corresponding gzipped mzml is present
return fn[:-4] + '.mzML.gz'
if file_present(fn[:-4] + '.mzML') is True: # if corresponding mzml is present
return fn[:-4] + '.mzML'
# otherwise convert and return mzml
return pw_convert(fn, self.precision, self.compression, self.gzip_file, verbose=self.verbose)
elif file_present(fn) is True: # if the specified file is present
for exten in valid: # checks for supported extensions
if fn.lower().endswith(exten) is True:
return fn
# otherwise asks user whether to continue
if version_input(
'The extension of the supplied filename "%s" is unexpected and may not be supported.\n'
'Do you wish to proceed with file loading? [Y/N] ' % fn).lower() in ['y', 'yes']:
return fn
else:
sys.exit('The user cancelled mzML loading.')
else:
fn = fix_extension(fn) # try to fix extension
if fn.lower().endswith('.raw') is True: # convert if only raw file is found
return pw_convert(fn, self.precision, self.compression, self.gzip_file, verbose=self.verbose)
return fn
def function_timetic(self):
"""
extracts timepoints and tic lists for each function
this function is separate from mzml contents because it would increase load times significantly (~6x)
"""
if self.verbose is True:
prog = Progress(string='Extracting timepoints and total ion current values from mzML', fraction=False)
for function in self.functions: # add timepoint and tic lists
self.functions[function]['timepoints'] = [] # list for timepoints
self.functions[function]['tic'] = [] # list for total ion current values
if 'level' in self.functions[function] and self.functions[function]['level'] > 1:
self.functions[function]['ce'] = [] # list for collision energies
for spectrum in self.tree.getElementsByTagName('spectrum'):
attr = branch_attributes(spectrum)
function, proc, scan = fps(spectrum) # determine function, process, and scan numbers
if self.verbose is True:
prog.write(attr['index'] + 1)
p = branch_cvparams(spectrum) # pull spectrum's cvparameters
self.functions[function]['timepoints'].append(p['MS:1000016'].value) # start scan time
self.functions[function]['tic'].append(p['MS:1000285'].value) # total ion current
if 'MS:1000045' in p:
self.functions[function]['ce'].append(p['MS:1000045'].value) # collision energy
self.ftt = True
if self.verbose is True:
prog.fin()
def integrate(self, name, start, end, x, y):
"""
Integrates y values given x bounds in a paired set of lists (e.g. a m/z list and an intensity list)
name: name of the peak being integrated (only used for warning purposes)
start: float
start x value
end: float or None
end x value
None will return the nearest value to the provided start value
x: list of x values
y: list of y values (paired with x)
returns: integral
"""
if start > max(x) or start < min(x): # check that start is within the m/z bounds
self.BE.warn(name, start, end, min(x), max(x))
if end is None: # if only a start value is supplied, return closest to that value
try: # try to find the value in the list
return y[locate_in_list(x, start)]
except TypeError: # if the value is not in the list, return 0
return 0
if end > max(x): # check that end is within the m/z bounds
self.BE.warn(name, start, end, min(x), max(x))
else:
l = locate_in_list(x, start, 'greater')
r = locate_in_list(x, end, 'lesser')
if l <= r:
return sum(y[l:r])
else: # catch for if there are no values in the bounds
return 0
def pull_chromatograms(self):
"""
Pulls mzML chromatograms
returns:
dictionary = {'chromatogram 1 id', 'chromatogram 2 id', ...}
dictionary['chromatogram 1 id'] = {
'x': list of x values
'y': list of y values (paired with x)
'xunit': unit of the x values
'yunit': unit of the y values
}
"""
if self.verbose is True:
prog = Progress(string='Extracting chromatogram', last=self.nchroms)
chroms = {} # dictionary of chromatograms
for chromatogram in self.tree.getElementsByTagName('chromatogram'):
attr = branch_attributes(chromatogram) # pull attributes
if self.verbose is True:
prog.write(attr['index'] + 1)
x, y, xunit, yunit = extract_spectrum(chromatogram, True) # extract x list, y list, and units
chroms[attr['id']] = {'x': x, 'y': y, 'xunit': xunit, 'yunit': yunit}
if self.verbose is True:
prog.fin()
return chroms
def pull_species_data(self, sp, sumspec=False):
"""
Extracts integrated data at every timepoint for all species specified in the sp dictionary
This function is intended to by called by PyRSIR.py
sp: dictionary
sp = {species1, species2, ...} //one key for every species to track
sp[species] = {
'bounds':[species x start, species x end], //start and end x values to integrate between
'affin':['+' or '-' or 'UV'}, //which spectrum to look for this species in
'level':integer, //if applicable, the MSn level (optional, but adds specificity)
'function':integer, //the specific function in which to find this species (optional; overrides affin and level)
}
sumspec: bool
toggles summing of all spectra together (creates an additional output item)
also sums the spectra of mass spectrum species to generate an isotope pattern used by the bounds
output:
filled dictionary, each subkey will have:
'raw': list of raw integrated values dictacted by the bounds
'function': the function that the species was associated with
if sumspec is true, will also output a dictionary of Spectrum objects
the keys of this dictionary are the function numbers
explicitly interprets full scan mass spectra and UV species
"""
if sumspec is True:
spec = {}
for function in self.functions: # create spectrum objects for all MS species
if self.functions[function]['type'] == 'MS':
spec[function] = Spectrum(3)
for species in sp: # look for and assign function affinity
sp[species]['function'] = self.associate_to_function(
dct=sp[species]) # associate each species in the spectrum with a function
if 'raw' not in sp[species]: # look for empty raw list
sp[species]['raw'] = []
if self.ftt is False: # if timepoints and tic values have not been extracted yet, extract those
self.function_timetic()
if self.verbose is True:
prog = self.Progress( # generate progress instance
string='Extracting species data from spectrum',
last=self.nscans,
writeevery=5
)
for spectrum in self.tree.getElementsByTagName('spectrum'):
function, proc, scan = fps(spectrum) # pull function, process, and scan numbers
attr = branch_attributes(spectrum) # get attributes
if self.verbose is True:
prog.write(attr['index'] + 1) # outtput progress
# self.sys.stdout.write('\rExtracting species data from spectrum #%d/%d %.1f%%' %(attr['index']+1,self.nscans,float(attr['index']+1)/float(self.nscans)*100.))
x, y = extract_spectrum(spectrum) # generate spectrum
if sumspec is True and function == 1:
spec[function].add_spectrum(x, y)
for key in sp: # integrate each peak
if sp[key]['function'] == function: # if species is related to this function
if self.functions[function]['type'] == 'MS':
sp[key]['raw'].append(
self.integrate(key, sp[key]['bounds'][0], sp[key]['bounds'][1], x, y)) # integrate
if self.functions[function]['type'] == 'UV':
sp[key]['raw'].append(self.integrate(key, sp[key]['bounds'][0], sp[key]['bounds'][1], x,
y) / 1000000.) # integrates and divides by 1 million bring it into au
if self.verbose is True:
prog.fin() # write done
# self.sys.stdout.write(' DONE\n')
self.BE.printwarns() # print bounds warnings (if any)
if sumspec is True:
return sp, spec
return sp, None
def retrieve_scans(self, start=None, end=None, mzstart=None, mzend=None, function=None, mute=False, outside=False):
"""
Retrieves the specified scans or time range from the specified function
start: integer or float
the point to start retrieving scans
if integer, this will be a start scan number
if float, this will be the start time
end: (optional) integer or float
the end point to stop retrieving scans
same options as start
mzstart: (optional) integer or float
left m/z bound
mzend: (optional) integer or float
right m/z bound
fn: integer
the function to pull scans from (default 1)
mute: bool
overrides the verbose setting of the mzml instance
outside: bool
Whether to include the next point outside of the specified m/z bounds.
This is useful for line continuity if the spectrum is to be used for
rendering images.
returns a list with each index corresponding to a scan, with two sublists for x and y data
"""
if function is None: # if not specified, retrieve first function
function = self.associate_to_function()
# find spectrum indicies to extract between
if function not in self.functions:
raise ValueError('The function "%d" is not in this mzml file.' % function)
start = self.scan_index(start, function, bias='greater')
end = self.scan_index(end, function, bias='lesser')
if self.ftt is False: # extract the timepoints and etc from the mzml
self.function_timetic()
if self.verbose is True and mute is False:
prog = Progress(string='Extracting scan data from spectrum', last=self.nscans)
out = []
for spectrum in self.tree.getElementsByTagName('spectrum'): # go through each spectrum
attr = branch_attributes(spectrum)
# func,proc,scan = self.fps(spectrum) # determine function, process, and scan numbers
# p = self.cvparam(spectrum)
if attr['index'] > end:
break
if self.verbose is True and mute is False:
prog.write(attr['index'] + 1)
if start <= attr['index'] <= end: # within the index bounds
x, y = extract_spectrum(spectrum)
if mzstart is not None or mzend is not None:
if mzstart is None:
l = min(x)
else:
l = mzstart
if mzend is None:
r = max(x)
else:
r = mzend
spec = trimspectrum(x, y, l, r, outside)
out.append(spec)
if self.verbose is True and mute is False:
prog.fin()
if len(out) == 0: # if only one scan, return that scan
return out[0]
return out
def scan_index(self, scan=None, function=1, bias='lesser'):
"""
Determines the index for a scan or timepoint in a given function
:param int, float scan: The scan number (int) or time point (float) to find.
:param int function: The mzml function to look in
:param str bias: Bias of index finding (options dictacted by locate_in_list() )
:return: scan index
:rtype: int
"""
if function not in self.functions:
raise KeyError('The function %d is not in this mzML file.' % function)
if scan is None: # if no scan number is specified
if bias == 'greater': # used for start point
return self.functions[function]['sr'][0]
if bias == 'lesser': # used for end point
return self.functions[function]['sr'][1]
if type(scan) is float: # timepoint
if self.ftt is False:
self.function_timetic()
# return located index plus start of the scan range
return locate_in_list(self.functions[function]['timepoints'], scan, bias=bias) + self.functions[function]['sr'][0]
elif type(scan) is int: # scan number
if scan < 1:
raise ValueError('The scan number must be greater or equal to 1 (specified: %d)' % scan)
if scan > self.functions[function]['nscans']:
raise ValueError(f'The scan number {scan} exceeds the number of scans in function {function} '
f'({self.functions[function]["nscans"]})')
# return scan minus 1 (to shift into index domain) plus the start location index
return scan - 1 + self.functions[function]['sr'][0]
else:
raise ValueError(f'An unexpected scan type was handed to the scan_index function ("{scan}", '
f'type: {type(scan)})')
def sum_scans(self,
start=None,
end=None,
function=None,
dec=3,
mute=False
):
"""
Sums the specified scans together. If the scan range moves into another function, an error is raised.
This method has a lower memory overhead than retrieve_scans().
:param float, int start: start point to begin summing. ``int`` is interpreted as a scan number, ``float`` is
interpreted as a time point in the acquisition.
:param float, int end: end point to finish summing. Parameters are the same as with start.
:param int function: mzML function to sum. If this is not provided, the first function will be used.
:param int dec: number of decimal places to track in the spectrum (lower values lower memory overhead).
:param bool mute: override chatty mode of mzML object
:return: summed spectrum in the format ``[[m/z values], [intensity values]]``
:rtype: list
"""
# if no function is specified, use the first function
if function is None:
function = min(self.functions.keys())
elif function not in self.functions: # if fn is not defined
raise KeyError(f'The function {function} is not defined in the mzML object. Available options: '
f'{", ".join([str(key) for key in self.functions.keys()])}')
if self.functions[function]['type'] != 'MS':
raise ValueError(f'The sum_scans function does not have the functionality to sum non-mass spec scans.'
f'The specified function {function} is of type {self.functions[function]["type"]}')
start = self.scan_index(start, function, 'greater')
end = self.scan_index(end, function, 'lesser')
spec = Spectrum(dec, start=self.functions[function]['window'][0],
end=self.functions[function]['window'][1]) # create Spectrum object
if self.verbose is True and mute is False:
prog = Progress(string='Combining spectrum', fraction=False, first=start, last=end)
for spectrum in self.tree.getElementsByTagName('spectrum'): # go through each spectrum
attr = branch_attributes(spectrum) # get attributes
if attr['index'] > end:
break
if self.verbose is True and mute is False:
prog.write(attr['index'] + 1)
if start <= attr['index'] <= end: # if within the specified bounds
x, y = extract_spectrum(spectrum) # pull spectrum
spec.add_spectrum(x, y) # add spectrum to Spectrum object
out = spec.trim()
if self.verbose is True and mute is False:
prog.fin()
return out
if __name__ == '__main__':
filename = 'MultiTest'
mzml = mzML(filename, verbose=True, ftt=True)
# sp = {
# 'pos':{'bounds':[325,327],'affin':'+','spectrum':Spectrum(3),'raw':[]},
# 'neg':{'bounds':[348,350],'affin':'-','spectrum':Spectrum(3),'raw':[]},
# 'uv':{'bounds':[378,None],'affin':'UV','raw':[]}
# }
| 45.515066 | 175 | 0.596148 | import sys
import os
import zlib
import gzip
import base64
import struct
import subprocess
import xml.dom.minidom
import scipy as sci
from random import random
from .progress import Progress
from .spectrum import Spectrum
from .psims import CVParameterSet, stringtodigit
from .tome import resolution, locate_in_list, trimspectrum
decode_formats = {
'MS:1000519': ['<', 'i'],
'MS:1000522': ['<', 'l'],
'MS:1000523': ['<', 'd'],
}
class BoundsError(Warning):
def __init__(self):
self.warned = {}
def printwarns(self):
if len(self.warned) > 0:
sys.stdout.write('The following peaks exceeded the bounds of the spectrum n number of times:\n')
for name in self.warned:
sys.stdout.write('"%s": %d\n' % (name, self.warned[name]))
def warn(self, name, intstart, intend, mzstart, mzend):
if name not in self.warned:
sys.stdout.write(
'\nThe peak "%s" (%s-%s) is outside of the bounds of the spectrum being summed m/z %.1f-%.1f\n' % (
name, str(intstart), str(intend), mzstart, mzend))
self.warned[name] = 1
else:
self.warned[name] += 1
def branch_attributes(branch: xml.dom.minidom.Element):
return {key: stringtodigit(val) for key, val in branch.attributes.items()}
def branch_cvparams(branch):
out = {}
for cvParam in branch.getElementsByTagName('cvParam'):
acc = cvParam.getAttribute('accession')
out[acc] = {}
for attribute, value in cvParam.attributes.items():
if attribute != 'accession':
out[acc][attribute] = stringtodigit(value)
return CVParameterSet(**out)
def file_present(filepath):
tf = os.path.isfile(filepath)
if tf is False:
tf = os.path.isdir(filepath)
return tf
def decodeformat(p: CVParameterSet, speclen: int):
for key in set(decode_formats) & p.keys():
return f'{decode_formats[key][0]}{speclen}{decode_formats[key][1]}'
def gettext(nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def extract_spectrum(spectrum: xml.dom.minidom.Element, units: bool = False):
speclen = int(spectrum.getAttribute('defaultArrayLength'))
out = []
if units is True:
units = []
for binary in spectrum.getElementsByTagName('binaryDataArray'):
p = branch_cvparams(binary)
compressed = True if 'MS:1000574' in p else False
unpack_format = decodeformat(p, speclen)
string = gettext(binary.getElementsByTagName('binary')[0].childNodes)
decoded = base64.standard_b64decode(string)
if compressed is True:
decoded = zlib.decompress(decoded)
out.append(list(struct.unpack(unpack_format, decoded)))
if units is not False:
for cv in p:
if cv.unit is not None:
units.append(cv.unit)
break
if units is not False:
out.extend(units)
return out
def pw_convert(filename, bit=64, compression=True, gzip=True, verbose=True):
def find_all(fname, path):
locations = []
for root, dirs, files in os.walk(path):
if fname in files:
locations.append(os.path.join(root, fname))
return locations
if sys.platform != 'win32':
raise OSError(
'The function that converts to mzML is limited to Windows operating systems.\n'
'You can manually convert to *.mzML using the proteowizard standalone package '
'and supply that mzML file to this script')
locs = []
for val in ['c:\\program files\\proteowizard',
'c:\\program files (x86)\\proteowizard']:
locs.extend(find_all('msconvert.exe', val))
if len(locs) == 0:
raise IOError(
'The python script could not find msconvert.exe\n'
'Please ensure that ProteoWizard is installed in either:\n'
'c:\\program files\\proteowizard\nor\nc:\\program files (x86)\\proteowizard')
outname = filename[:-4] + '.mzML'
callstring = locs[-1] + ' "' + filename + '" --mzML'
if bit in [32, 64]:
callstring += ' --' + str(bit)
else:
raise ValueError(
'ProteoWizard conversion was called with an invalid floating point precision "%s".' % str(bit))
if compression is True:
callstring += ' --zlib'
exten = '*.mzML'
if gzip is True:
callstring += ' --gzip'
outname += '.gz'
exten += '.gz'
print('callstring', callstring)
if verbose is True:
callstring += ' --verbose'
sys.stdout.write('Generating %s file from %s' % (exten, filename))
sys.stdout.flush()
subprocess.call(callstring)
sys.stdout.write(' DONE\n')
sys.stdout.flush()
else:
subprocess.call(callstring)
return outname
def fix_extension(fn):
oopsx = {'.mzm': 'l', '.mz': 'ml', '.m': 'zml', '.': 'mzml'}
oopsr = {'.ra': 'w', '.r': 'aw', '.': 'raw'}
oopsg = {'.mzml.g': 'z', '.mzml.': 'gz', '.mzml': '.gz', '.mzm': 'l.gz', '.mz': 'ml.gz', '.m': 'zml.gz',
'.': 'mzml.gz'}
if file_present(fn + '.mzml.gz') is True:
return fn + '.mzml.gz'
if file_present(fn + '.mzml') is True:
return fn + '.mzml'
for key in oopsg:
if fn.lower().endswith(key) is True:
if file_present(fn + oopsg[key]) is True:
return fn + oopsg[key]
for key in oopsx:
if fn.lower().endswith(key) is True:
if file_present(fn + oopsx[key]) is True:
return fn + oopsx[key]
for key in oopsr:
if fn.lower().endswith(key) is True:
if file_present(fn + oopsr[key]) is True:
return fn + oopsr[key]
if file_present(fn + '.raw') is True:
return fn + '.raw'
raise FileNotFoundError(f'The file {fn} could not be located in the current working directory')
def fps(branch):
idstring = branch.getAttribute('id').split()
return [int(x.split('=')[1]) for x in idstring]
def scan_properties(hand):
mstypes = {
'MS:1000928': 'calibration spectrum',
'MS:1000294': 'mass spectrum',
'MS:1000322': 'charge inversion mass spectrum',
'MS:1000325': 'constant neutral gain spectrum',
'MS:1000326': 'constant neutral loss spectrum',
'MS:1000328': 'e/2 mass spectrum',
'MS:1000341': 'precursor ion spectrum',
'MS:1000343': 'product ion spectrum',
'MS:1000579': 'MS1 spectrum',
'MS:1000580': 'MSn spectrum',
'MS:1000581': 'CRM spectrum',
'MS:1000582': 'SIM spectrum',
'MS:1000583': 'SRM spectrum',
}
othertypes = {
'MS:1000620': 'PDA spectrum',
'MS:1000804': 'electromagnetic radiation spectrum',
'MS:1000805': 'emission spectrum',
'MS:1000806': 'absorption spectrum',
}
out = {}
if isinstance(hand, CVParameterSet):
p = hand
else:
p = CVParameterSet(hand)
for acc in p.keys() & mstypes.keys():
out['acc'] = acc
out['name'] = mstypes[acc]
out['type'] = 'MS'
out['level'] = p['MS:1000511'].value
out['window'] = [p['MS:1000501'].value, p['MS:1000500'].value]
if 'MS:1000129' in p:
out['mode'] = '-'
elif 'MS:1000130' in p:
out['mode'] = '+'
if 'MS:1000827' in p:
out['target'] = p['MS:1000827'].value
elif out['level'] > 2:
raise ValueError(
'This script has not been coded to handle MSn > 2, please contact the author of the class')
return out
for acc in p.keys() & othertypes.keys():
out['acc'] = acc
out['name'] = othertypes[acc]
if 'MS:1000804' in p:
out['type'] = 'UV'
else:
raise KeyError(
'The script has not been coded to handle spectra types other than MS and UV-Vis. '
'Please contact the authors to get this functionality included.')
return out
class mzML(object):
def __init__(self,
filename: str,
verbose: bool = True,
precision: int = 64,
compression: bool = True,
gzip_file: bool = True,
obo: str = None,
ftt: bool = False,
**kwargs
):
self.verbose = verbose
self.precision = precision
self.compression = compression
self.gzip_file = gzip_file
self.obo = obo
self.filename = self.check_for_file(filename)
if self.verbose is True:
self.Progress = Progress
sys.stdout.write('Loading %s into memory' % self.filename)
sys.stdout.flush()
if self.filename.lower().endswith('.mzml.gz'):
handle = gzip.open(self.filename)
else:
handle = self.filename
try:
self.tree = xml.dom.minidom.parse(handle)
except:
raise IOError(
'The mzML file "%s" could not be loaded. The file is either unsupported, corrupt, or incomplete.' % self.filename)
self.nscans = int(self.tree.getElementsByTagName('spectrumList')[0].getAttribute('count'))
self.nchroms = int(
self.tree.getElementsByTagName('chromatogramList')[0].getAttribute('count'))
self.functions = {}
for spectrum in self.tree.getElementsByTagName('spectrum'):
func, proc, scan = fps(spectrum)
if func not in self.functions:
p = branch_cvparams(spectrum)
self.functions[func] = {
'sr': [int(spectrum.getAttribute('index')), None], # the scan index range that the function spans
'nscans': 1, # number of scans
}
self.functions[func].update(scan_properties(p)) # update with scan properties
else:
self.functions[func]['sr'][1] = int(
spectrum.getAttribute('index')) # otherwise set the scan index range to the current index
self.functions[func]['nscans'] += 1
p = branch_cvparams(spectrum) # pull properties of final spectrum
self.duration = p['MS:1000016'].value # final start scan time
if self.verbose is True:
sys.stdout.write(' DONE\n')
self.BE = BoundsError() # load warning instance for integration
self.ftt = False
if ftt is True:
self.function_timetic()
def __str__(self):
return f'{self.__class__.__name__} {self.nscans} spectra, {self.nchroms} chromatograms'
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self.filename)
def __len__(self):
return self.nscans
def __getitem__(self, ind):
if isinstance(ind, slice): # if getitem is trying to slice
if ind.start is None: # no start
start = 0
else:
start = ind.start
if ind.stop is None: # no stop
stop = self.functions[1]['sr'][1]
else:
stop = ind.stop
return self.sum_scans(start, stop, mute=True)
elif type(ind) is int: # scan index number
"""will return the spectrum of the scan index provided"""
if ind < 0 or ind > self.nscans:
raise IndexError("The scan index number #%d is outside of the mzML's scan index range (0-%d)" % (
ind, self.nscans - 1))
for spectrum in self.tree.getElementsByTagName('spectrum'):
attr = branch_attributes(spectrum)
if attr['index'] == ind:
return extract_spectrum(spectrum)
elif type(ind) is float:
"""float will assume the intended function was 1"""
if ind < 0 or ind > self.duration:
raise ValueError(
"The supplied time %.3f is outside of this file's time range (0 - %.3f)" % (ind, self.duration))
ind = self.scan_index(ind)
for spectrum in self.tree.getElementsByTagName('spectrum'):
attr = branch_attributes(spectrum)
if attr['index'] == ind:
return extract_spectrum(spectrum)
def foreachchrom(self, fn):
def foreachchrom(*args, **kwargs):
prog = Progress(string='Applying function "%s" to chromatogram' % fn.__name__, last=self.nchroms)
out = []
for chromatogram in self.tree.getElementsByTagName('chromatogram'):
if self.verbose is True:
prog.write(int(chromatogram.getAttribute('index')) + 1)
out.append(fn(chromatogram, *args, **kwargs))
if self.verbose is True:
prog.fin()
return out
return foreachchrom
def foreachscan(self, fn):
def foreachscan(*args, **kwargs):
prog = Progress(string='Applying function "%s" to scan' % fn.__name__, last=self.nscans)
out = []
for spectrum in self.tree.getElementsByTagName('spectrum'):
if self.verbose is True:
prog.write(int(spectrum.getAttribute('index')) + 1)
out.append(fn(spectrum, *args, **kwargs))
if self.verbose is True:
prog.fin()
return out
return foreachscan
def associate_to_function(self, affin=None, level=None, dct=None):
if dct is not None: # if function was handed a dictionary
if 'function' in dct:
return dct['function']
if 'affin' in dct:
affin = dct['affin']
if 'level' in dct:
level = dct['level']
if affin is None and level is None:
return min(self.functions.keys()) # assume first function
elif affin == 'UV': # if UV-Vis affinity
for fn in self.functions: # determine which function is UV-Vis
if self.functions[fn]['acc'] == 'MS:1000804':
return fn
raise ValueError('There is no electromagnetic radiation spectrum function in this mzML file')
elif affin in ['+', '-']: # if affinity to mass spectrum
levelcount = 0 # counter for number of matches to this affinity and level
for fn in self.functions:
if self.functions[fn]['type'] == 'MS': # if fn is ms
if self.functions[fn]['mode'] == affin: # if mode mathes
# if there is no level specified, assume 1
if level is None and self.functions[fn]['level'] == 1:
fnout = fn
levelcount += 1
elif self.functions[fn]['level'] == level: # if level matches
fnout = fn
levelcount += 1
if levelcount > 1:
raise ValueError(
f"There affinity specification of mode: {affin}, level: '{level}' matches more than one function "
f"in the mzML file. \nTo process this species, be more specific in your level specification or "
f"assign it to a specific function number by adding a 'function' key to its dictionary.")
return fnout
else: # if some other affinity
raise ValueError('The specified affinity "%s" is not supported.' % affin)
def auto_resolution(self, n=10, function=None, npeaks=4):
def findsomepeaks(y):
split = int(len(y) / npeaks)
start = 0
end = start + split
splity = []
for i in range(npeaks):
splity.append(sci.asarray(y[start:end]))
start += split
end += split
out = []
for ind, section in enumerate(splity):
maxy = max(section)
if maxy == max(section[1:-1]): # if max is not at the edge of the spectrum
out.append(sci.where(section == maxy)[0][0] + split * ind)
return out
if function is None: # if no function is provided, use first
function = self.associate_to_function()
if self.functions[function]['type'] != 'MS':
raise ValueError(
'The auto_resolution function only operates on mass spectrum functions. '
'Type of specified function %d: %s' % (function, self.functions[function]['type']))
ranges = [] # list of scan intervals
if self.functions[function]['nscans'] <= 20: # if the number of scans is less than 20
ranges = [[1, self.functions[function]['nscans']]]
else:
while len(ranges) < n: # generate 10 pseudo-random intervals to sample
ran = int(random() * self.functions[function]['nscans']) + self.functions[function]['sr'][0]
if ran - 10 >= self.functions[function]['sr'][0] and ran + 10 <= self.functions[function]['sr'][1]:
ranges.append([ran - 10, ran + 10])
if self.verbose is True:
prog = Progress(string='Estimating resolution of the instrument', fraction=False, last=n)
summed = []
for ind, rng in enumerate(ranges):
if self.verbose is True:
prog.write(ind + 1)
summed.append(self.sum_scans(rng[0], rng[1], function, 2, True)) # sum those scans and append output
res = []
for spec in summed: # calculate resolution for each scan range
inds = findsomepeaks(spec[1]) # find some peaks
for ind in inds: # for each of those peaks
res.append(resolution(spec[0], spec[1], ind, threshold=10))
if self.verbose is True:
prog.fin()
res = [y for y in res if y is not None] # removes None values (below S/N)
return sum(res) / len(res) # return average
def check_for_file(self, fn):
def version_input(string):
# if sys.version.startswith('2.7'):
# return raw_input('%s' % string)
if sys.version.startswith('3.'):
return input('%s' % string)
else:
raise EnvironmentError('The version_input method encountered an unsupported version of python.')
valid = [ # supported extensions
'.raw',
'.mzml.gz',
'.mzml',
]
if fn.lower().endswith('.raw') is True: # extension is raw
if file_present(fn[:-4] + '.mzML.gz') is True: # if corresponding gzipped mzml is present
return fn[:-4] + '.mzML.gz'
if file_present(fn[:-4] + '.mzML') is True: # if corresponding mzml is present
return fn[:-4] + '.mzML'
# otherwise convert and return mzml
return pw_convert(fn, self.precision, self.compression, self.gzip_file, verbose=self.verbose)
elif file_present(fn) is True: # if the specified file is present
for exten in valid: # checks for supported extensions
if fn.lower().endswith(exten) is True:
return fn
# otherwise asks user whether to continue
if version_input(
'The extension of the supplied filename "%s" is unexpected and may not be supported.\n'
'Do you wish to proceed with file loading? [Y/N] ' % fn).lower() in ['y', 'yes']:
return fn
else:
sys.exit('The user cancelled mzML loading.')
else:
fn = fix_extension(fn) # try to fix extension
if fn.lower().endswith('.raw') is True: # convert if only raw file is found
return pw_convert(fn, self.precision, self.compression, self.gzip_file, verbose=self.verbose)
return fn
def function_timetic(self):
if self.verbose is True:
prog = Progress(string='Extracting timepoints and total ion current values from mzML', fraction=False)
for function in self.functions: # add timepoint and tic lists
self.functions[function]['timepoints'] = [] # list for timepoints
self.functions[function]['tic'] = [] # list for total ion current values
if 'level' in self.functions[function] and self.functions[function]['level'] > 1:
self.functions[function]['ce'] = [] # list for collision energies
for spectrum in self.tree.getElementsByTagName('spectrum'):
attr = branch_attributes(spectrum)
function, proc, scan = fps(spectrum) # determine function, process, and scan numbers
if self.verbose is True:
prog.write(attr['index'] + 1)
p = branch_cvparams(spectrum) # pull spectrum's cvparameters
self.functions[function]['timepoints'].append(p['MS:1000016'].value)
self.functions[function]['tic'].append(p['MS:1000285'].value)
if 'MS:1000045' in p:
self.functions[function]['ce'].append(p['MS:1000045'].value)
self.ftt = True
if self.verbose is True:
prog.fin()
def integrate(self, name, start, end, x, y):
if start > max(x) or start < min(x):
self.BE.warn(name, start, end, min(x), max(x))
if end is None:
try:
return y[locate_in_list(x, start)]
except TypeError:
return 0
if end > max(x):
self.BE.warn(name, start, end, min(x), max(x))
else:
l = locate_in_list(x, start, 'greater')
r = locate_in_list(x, end, 'lesser')
if l <= r:
return sum(y[l:r])
else:
return 0
def pull_chromatograms(self):
if self.verbose is True:
prog = Progress(string='Extracting chromatogram', last=self.nchroms)
chroms = {}
for chromatogram in self.tree.getElementsByTagName('chromatogram'):
attr = branch_attributes(chromatogram)
if self.verbose is True:
prog.write(attr['index'] + 1)
x, y, xunit, yunit = extract_spectrum(chromatogram, True)
chroms[attr['id']] = {'x': x, 'y': y, 'xunit': xunit, 'yunit': yunit}
if self.verbose is True:
prog.fin()
return chroms
def pull_species_data(self, sp, sumspec=False):
if sumspec is True:
spec = {}
for function in self.functions:
if self.functions[function]['type'] == 'MS':
spec[function] = Spectrum(3)
for species in sp:
sp[species]['function'] = self.associate_to_function(
dct=sp[species])
if 'raw' not in sp[species]:
sp[species]['raw'] = []
if self.ftt is False:
self.function_timetic()
if self.verbose is True:
prog = self.Progress(
string='Extracting species data from spectrum',
last=self.nscans,
writeevery=5
)
for spectrum in self.tree.getElementsByTagName('spectrum'):
function, proc, scan = fps(spectrum)
attr = branch_attributes(spectrum)
if self.verbose is True:
prog.write(attr['index'] + 1)
x, y = extract_spectrum(spectrum)
if sumspec is True and function == 1:
spec[function].add_spectrum(x, y)
for key in sp:
if sp[key]['function'] == function:
if self.functions[function]['type'] == 'MS':
sp[key]['raw'].append(
self.integrate(key, sp[key]['bounds'][0], sp[key]['bounds'][1], x, y))
if self.functions[function]['type'] == 'UV':
sp[key]['raw'].append(self.integrate(key, sp[key]['bounds'][0], sp[key]['bounds'][1], x,
y) / 1000000.)
if self.verbose is True:
prog.fin()
self.BE.printwarns()
if sumspec is True:
return sp, spec
return sp, None
def retrieve_scans(self, start=None, end=None, mzstart=None, mzend=None, function=None, mute=False, outside=False):
if function is None:
function = self.associate_to_function()
if function not in self.functions:
raise ValueError('The function "%d" is not in this mzml file.' % function)
start = self.scan_index(start, function, bias='greater')
end = self.scan_index(end, function, bias='lesser')
if self.ftt is False:
self.function_timetic()
if self.verbose is True and mute is False:
prog = Progress(string='Extracting scan data from spectrum', last=self.nscans)
out = []
for spectrum in self.tree.getElementsByTagName('spectrum'):
attr = branch_attributes(spectrum)
d:
break
if self.verbose is True and mute is False:
prog.write(attr['index'] + 1)
if start <= attr['index'] <= end:
x, y = extract_spectrum(spectrum)
if mzstart is not None or mzend is not None:
if mzstart is None:
l = min(x)
else:
l = mzstart
if mzend is None:
r = max(x)
else:
r = mzend
spec = trimspectrum(x, y, l, r, outside)
out.append(spec)
if self.verbose is True and mute is False:
prog.fin()
if len(out) == 0:
return out[0]
return out
def scan_index(self, scan=None, function=1, bias='lesser'):
if function not in self.functions:
raise KeyError('The function %d is not in this mzML file.' % function)
if scan is None:
if bias == 'greater':
return self.functions[function]['sr'][0]
if bias == 'lesser':
return self.functions[function]['sr'][1]
if type(scan) is float:
if self.ftt is False:
self.function_timetic()
return locate_in_list(self.functions[function]['timepoints'], scan, bias=bias) + self.functions[function]['sr'][0]
elif type(scan) is int:
if scan < 1:
raise ValueError('The scan number must be greater or equal to 1 (specified: %d)' % scan)
if scan > self.functions[function]['nscans']:
raise ValueError(f'The scan number {scan} exceeds the number of scans in function {function} '
f'({self.functions[function]["nscans"]})')
return scan - 1 + self.functions[function]['sr'][0]
else:
raise ValueError(f'An unexpected scan type was handed to the scan_index function ("{scan}", '
f'type: {type(scan)})')
def sum_scans(self,
start=None,
end=None,
function=None,
dec=3,
mute=False
):
if function is None:
function = min(self.functions.keys())
elif function not in self.functions:
raise KeyError(f'The function {function} is not defined in the mzML object. Available options: '
f'{", ".join([str(key) for key in self.functions.keys()])}')
if self.functions[function]['type'] != 'MS':
raise ValueError(f'The sum_scans function does not have the functionality to sum non-mass spec scans.'
f'The specified function {function} is of type {self.functions[function]["type"]}')
start = self.scan_index(start, function, 'greater')
end = self.scan_index(end, function, 'lesser')
spec = Spectrum(dec, start=self.functions[function]['window'][0],
end=self.functions[function]['window'][1])
if self.verbose is True and mute is False:
prog = Progress(string='Combining spectrum', fraction=False, first=start, last=end)
for spectrum in self.tree.getElementsByTagName('spectrum'):
attr = branch_attributes(spectrum)
if attr['index'] > end:
break
if self.verbose is True and mute is False:
prog.write(attr['index'] + 1)
if start <= attr['index'] <= end:
x, y = extract_spectrum(spectrum)
spec.add_spectrum(x, y)
out = spec.trim()
if self.verbose is True and mute is False:
prog.fin()
return out
if __name__ == '__main__':
filename = 'MultiTest'
mzml = mzML(filename, verbose=True, ftt=True)
| true | true |
1c3888f0cf285a3a0076e254a6838bcd4e5d38db | 451 | py | Python | src/posts/migrations/0002_post_section.py | kelchidoo/financezone | 0b16179b6028ef061a21b5003bbaffe18db8459e | [
"MIT"
] | null | null | null | src/posts/migrations/0002_post_section.py | kelchidoo/financezone | 0b16179b6028ef061a21b5003bbaffe18db8459e | [
"MIT"
] | null | null | null | src/posts/migrations/0002_post_section.py | kelchidoo/financezone | 0b16179b6028ef061a21b5003bbaffe18db8459e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-08-05 19:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='section',
field=models.CharField(default=False, max_length=20),
),
]
| 21.47619 | 65 | 0.609756 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='section',
field=models.CharField(default=False, max_length=20),
),
]
| true | true |
1c388a6c6859d6e68629d021830a224fc96d1d4f | 773 | py | Python | tests/test_font_size.py | ehtec/pdfminer.six | 5b1823f25ab998e904fc5d81687732580f23e3b9 | [
"MIT"
] | null | null | null | tests/test_font_size.py | ehtec/pdfminer.six | 5b1823f25ab998e904fc5d81687732580f23e3b9 | [
"MIT"
] | 1 | 2022-01-31T22:58:07.000Z | 2022-01-31T22:58:07.000Z | tests/test_font_size.py | phantomcyber/pdfminer.six | e35a9319a6ae5d310f08f07a5edf16aadc529c1e | [
"MIT"
] | null | null | null | from helpers import absolute_sample_path
from pdfminer.high_level import extract_pages
from pdfminer.layout import LTChar, LTTextBox
def test_font_size():
path = absolute_sample_path('font-size-test.pdf')
for page in extract_pages(path):
for text_box in page:
if isinstance(text_box, LTTextBox):
for line in text_box:
possible_number = line.get_text().strip()
if possible_number.isdigit():
expected_size = int(possible_number)
for char in line:
if isinstance(char, LTChar):
actual_size = int(round(char.size))
assert expected_size == actual_size
| 38.65 | 67 | 0.570505 | from helpers import absolute_sample_path
from pdfminer.high_level import extract_pages
from pdfminer.layout import LTChar, LTTextBox
def test_font_size():
path = absolute_sample_path('font-size-test.pdf')
for page in extract_pages(path):
for text_box in page:
if isinstance(text_box, LTTextBox):
for line in text_box:
possible_number = line.get_text().strip()
if possible_number.isdigit():
expected_size = int(possible_number)
for char in line:
if isinstance(char, LTChar):
actual_size = int(round(char.size))
assert expected_size == actual_size
| true | true |
1c388b2613d1ec6add292ddb344c67ba4bb59a48 | 256 | py | Python | configs/unet/fcn_unet_s5-d16_256x256_40k_hrf.py | Xlinford/mmsegmentation | 8b444de5e6db2af2538a73a93ac75204f5c3bb2f | [
"Apache-2.0"
] | null | null | null | configs/unet/fcn_unet_s5-d16_256x256_40k_hrf.py | Xlinford/mmsegmentation | 8b444de5e6db2af2538a73a93ac75204f5c3bb2f | [
"Apache-2.0"
] | null | null | null | configs/unet/fcn_unet_s5-d16_256x256_40k_hrf.py | Xlinford/mmsegmentation | 8b444de5e6db2af2538a73a93ac75204f5c3bb2f | [
"Apache-2.0"
] | null | null | null | _base_ = [
'../_base_/models/fcn_unet_s5-d16.py', '../_base_/datasets/hrf.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
]
test_cfg = dict(crop_size=(256, 256), stride=(170, 170))
evaluation = dict(metric='mDice')
| 36.571429 | 74 | 0.660156 | _base_ = [
'../_base_/models/fcn_unet_s5-d16.py', '../_base_/datasets/hrf.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
]
test_cfg = dict(crop_size=(256, 256), stride=(170, 170))
evaluation = dict(metric='mDice')
| true | true |
1c388b552035a4b9b2ec413e86da8a0c41e5beb0 | 3,731 | py | Python | bw2io/extractors/exiobase.py | mfastudillo/brightway2-io | dc383ddb6003a46e78259aeb7f87b9d80965d689 | [
"BSD-3-Clause"
] | null | null | null | bw2io/extractors/exiobase.py | mfastudillo/brightway2-io | dc383ddb6003a46e78259aeb7f87b9d80965d689 | [
"BSD-3-Clause"
] | null | null | null | bw2io/extractors/exiobase.py | mfastudillo/brightway2-io | dc383ddb6003a46e78259aeb7f87b9d80965d689 | [
"BSD-3-Clause"
] | null | null | null | from pathlib import Path
from tqdm import tqdm
import csv
import os
import re
def remove_numerics(string):
"""Transform names like 'Tobacco products (16)' into 'Tobacco products'"""
return re.sub(r" \(\d\d\)$", "", string)
class Exiobase3DataExtractor(object):
@classmethod
def _check_dir(cls, path):
# Note: this assumes industry by industry
assert os.path.isdir(path), "Must supply path to EXIOBASE data folder"
assert "x.txt" in os.listdir(path), "Directory path must include Exiobase files"
@classmethod
def _get_production_volumes(cls, dirpath):
with open(dirpath / "x.txt") as csvfile:
reader = csv.DictReader(csvfile, delimiter="\t")
data = {
(row["sector"], row["region"]): float(row["indout"]) for row in reader
}
return data
@classmethod
def _get_unit_data(cls, dirpath):
lookup = {"M.EUR": "million €"}
with open(dirpath / "unit.txt") as csvfile:
reader = csv.DictReader(csvfile, delimiter="\t")
data = {
(row["sector"], row["region"]): lookup[row["unit"]] for row in reader
}
return data
@classmethod
def get_flows(cls, dirpath):
dirpath = Path(dirpath)
with open(dirpath / "satellite" / "unit.txt") as csvfile:
reader = csv.reader(csvfile, delimiter="\t")
next(reader)
data = {o[0]: o[1] for o in reader}
return data
@classmethod
def get_products(cls, dirpath):
dirpath = Path(dirpath)
cls._check_dir(dirpath)
units = cls._get_unit_data(dirpath)
volumes = cls._get_production_volumes(dirpath)
return [
{
"name": key[0],
"location": key[1],
"unit": units[key],
"production volume": volumes[key],
}
for key in units
]
@classmethod
def get_technosphere_iterator(
cls, dirpath, num_products, ignore_small_balancing_corrections=True
):
dirpath = Path(dirpath)
with open(dirpath / "A.txt") as f:
reader = csv.reader(f, delimiter="\t")
locations = next(reader)[2:]
names = [remove_numerics(o) for o in next(reader)[2:]]
for line in tqdm(reader):
inpt = (remove_numerics(line[1]), line[0])
for index, elem in enumerate(line[2:]):
if elem and float(elem) != 0:
if (
ignore_small_balancing_corrections
and abs(float(elem)) < 1e-15
):
continue
else:
yield (inpt, (names[index], locations[index]), float(elem))
@classmethod
def get_biosphere_iterator(cls, dirpath, ignore_small_balancing_corrections=True):
dirpath = Path(dirpath)
with open(dirpath / "satellite" / "S.txt") as f:
reader = csv.reader(f, delimiter="\t")
locations = next(reader)[1:]
names = [remove_numerics(o) for o in next(reader)[1:]]
for line in tqdm(reader):
flow = line[0]
for index, elem in enumerate(line[1:]):
if elem and float(elem) != 0:
if (
ignore_small_balancing_corrections
and abs(float(elem)) < 1e-15
):
continue
else:
yield (flow, (names[index], locations[index]), float(elem))
| 33.918182 | 88 | 0.5197 | from pathlib import Path
from tqdm import tqdm
import csv
import os
import re
def remove_numerics(string):
return re.sub(r" \(\d\d\)$", "", string)
class Exiobase3DataExtractor(object):
@classmethod
def _check_dir(cls, path):
assert os.path.isdir(path), "Must supply path to EXIOBASE data folder"
assert "x.txt" in os.listdir(path), "Directory path must include Exiobase files"
@classmethod
def _get_production_volumes(cls, dirpath):
with open(dirpath / "x.txt") as csvfile:
reader = csv.DictReader(csvfile, delimiter="\t")
data = {
(row["sector"], row["region"]): float(row["indout"]) for row in reader
}
return data
@classmethod
def _get_unit_data(cls, dirpath):
lookup = {"M.EUR": "million €"}
with open(dirpath / "unit.txt") as csvfile:
reader = csv.DictReader(csvfile, delimiter="\t")
data = {
(row["sector"], row["region"]): lookup[row["unit"]] for row in reader
}
return data
@classmethod
def get_flows(cls, dirpath):
dirpath = Path(dirpath)
with open(dirpath / "satellite" / "unit.txt") as csvfile:
reader = csv.reader(csvfile, delimiter="\t")
next(reader)
data = {o[0]: o[1] for o in reader}
return data
@classmethod
def get_products(cls, dirpath):
dirpath = Path(dirpath)
cls._check_dir(dirpath)
units = cls._get_unit_data(dirpath)
volumes = cls._get_production_volumes(dirpath)
return [
{
"name": key[0],
"location": key[1],
"unit": units[key],
"production volume": volumes[key],
}
for key in units
]
@classmethod
def get_technosphere_iterator(
cls, dirpath, num_products, ignore_small_balancing_corrections=True
):
dirpath = Path(dirpath)
with open(dirpath / "A.txt") as f:
reader = csv.reader(f, delimiter="\t")
locations = next(reader)[2:]
names = [remove_numerics(o) for o in next(reader)[2:]]
for line in tqdm(reader):
inpt = (remove_numerics(line[1]), line[0])
for index, elem in enumerate(line[2:]):
if elem and float(elem) != 0:
if (
ignore_small_balancing_corrections
and abs(float(elem)) < 1e-15
):
continue
else:
yield (inpt, (names[index], locations[index]), float(elem))
@classmethod
def get_biosphere_iterator(cls, dirpath, ignore_small_balancing_corrections=True):
dirpath = Path(dirpath)
with open(dirpath / "satellite" / "S.txt") as f:
reader = csv.reader(f, delimiter="\t")
locations = next(reader)[1:]
names = [remove_numerics(o) for o in next(reader)[1:]]
for line in tqdm(reader):
flow = line[0]
for index, elem in enumerate(line[1:]):
if elem and float(elem) != 0:
if (
ignore_small_balancing_corrections
and abs(float(elem)) < 1e-15
):
continue
else:
yield (flow, (names[index], locations[index]), float(elem))
| true | true |
1c388bae3fb6fc165c920da9ecd9a70d511f6b52 | 1,188 | py | Python | src/gui/mixins.py | dynaryu/vaws | f6ed9b75408f7ce6100ed59b7754f745e59be152 | [
"BSD-3-Clause"
] | null | null | null | src/gui/mixins.py | dynaryu/vaws | f6ed9b75408f7ce6100ed59b7754f745e59be152 | [
"BSD-3-Clause"
] | null | null | null | src/gui/mixins.py | dynaryu/vaws | f6ed9b75408f7ce6100ed59b7754f745e59be152 | [
"BSD-3-Clause"
] | null | null | null |
from PyQt4.QtCore import QVariant, QSettings
from PyQt4.QtGui import QTableWidget
class PersistSizePosMixin(object):
def __init__(self, name):
self.name = name
def initSizePosFromSettings(self):
settings = QSettings()
key = self.name + "/Geometry"
if settings.contains(key):
self.restoreGeometry(settings.value(key).toByteArray())
key = self.name + "/State"
if settings.contains(key):
val = settings.value(key).toByteArray()
self.restoreState(val)
def storeSizePosToSettings(self):
settings = QSettings()
settings.setValue(self.name + "/Geometry", QVariant(self.saveGeometry()))
settings.setValue(self.name + "/State", QVariant(self.saveState()))
def setupTable(t, l=None):
t.setUpdatesEnabled(False)
t.blockSignals(True)
t.setEditTriggers(QTableWidget.NoEditTriggers)
if l is not None:
t.setRowCount(len(l))
t.setSelectionBehavior(QTableWidget.SelectRows)
def finiTable(t):
t.resizeColumnsToContents()
t.setUpdatesEnabled(True)
t.blockSignals(False) | 33 | 82 | 0.632155 |
from PyQt4.QtCore import QVariant, QSettings
from PyQt4.QtGui import QTableWidget
class PersistSizePosMixin(object):
def __init__(self, name):
self.name = name
def initSizePosFromSettings(self):
settings = QSettings()
key = self.name + "/Geometry"
if settings.contains(key):
self.restoreGeometry(settings.value(key).toByteArray())
key = self.name + "/State"
if settings.contains(key):
val = settings.value(key).toByteArray()
self.restoreState(val)
def storeSizePosToSettings(self):
settings = QSettings()
settings.setValue(self.name + "/Geometry", QVariant(self.saveGeometry()))
settings.setValue(self.name + "/State", QVariant(self.saveState()))
def setupTable(t, l=None):
t.setUpdatesEnabled(False)
t.blockSignals(True)
t.setEditTriggers(QTableWidget.NoEditTriggers)
if l is not None:
t.setRowCount(len(l))
t.setSelectionBehavior(QTableWidget.SelectRows)
def finiTable(t):
t.resizeColumnsToContents()
t.setUpdatesEnabled(True)
t.blockSignals(False) | true | true |
1c388c7ef1bcb9c97ba3d2d40f8d2d79a492e919 | 20,373 | py | Python | azure-mgmt-dns/azure/mgmt/dns/v2016_04_01/operations/zones_operations.py | alexeldeib/azure-sdk-for-python | eed1e228847d90d97ca55ded98e10a915b391b61 | [
"MIT"
] | 1 | 2018-07-23T08:59:24.000Z | 2018-07-23T08:59:24.000Z | azure-mgmt-dns/azure/mgmt/dns/v2016_04_01/operations/zones_operations.py | Vinaysaibhogela/azure-sdk-for-python | 3345c2b5ed2b750024f71331a68d7a087157e9a7 | [
"MIT"
] | null | null | null | azure-mgmt-dns/azure/mgmt/dns/v2016_04_01/operations/zones_operations.py | Vinaysaibhogela/azure-sdk-for-python | 3345c2b5ed2b750024f71331a68d7a087157e9a7 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class ZonesOperations(object):
"""ZonesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Specifies the API version. Constant value: "2016-04-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-04-01"
self.config = config
def create_or_update(
self, resource_group_name, zone_name, parameters, if_match=None, if_none_match=None, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a DNS zone. Does not modify DNS records within the
zone.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param zone_name: The name of the DNS zone (without a terminating
dot).
:type zone_name: str
:param parameters: Parameters supplied to the CreateOrUpdate
operation.
:type parameters: ~azure.mgmt.dns.v2016_04_01.models.Zone
:param if_match: The etag of the DNS zone. Omit this value to always
overwrite the current zone. Specify the last-seen etag value to
prevent accidentally overwritting any concurrent changes.
:type if_match: str
:param if_none_match: Set to '*' to allow a new DNS zone to be
created, but to prevent updating an existing zone. Other values will
be ignored.
:type if_none_match: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Zone or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.dns.v2016_04_01.models.Zone or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'zoneName': self._serialize.url("zone_name", zone_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'Zone')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Zone', response)
if response.status_code == 201:
deserialized = self._deserialize('Zone', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}'}
def _delete_initial(
self, resource_group_name, zone_name, if_match=None, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'zoneName': self._serialize.url("zone_name", zone_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ZoneDeleteResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, zone_name, if_match=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes a DNS zone. WARNING: All DNS records in the zone will also be
deleted. This operation cannot be undone.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param zone_name: The name of the DNS zone (without a terminating
dot).
:type zone_name: str
:param if_match: The etag of the DNS zone. Omit this value to always
delete the current zone. Specify the last-seen etag value to prevent
accidentally deleting any concurrent changes.
:type if_match: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns ZoneDeleteResult or
ClientRawResponse<ZoneDeleteResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.dns.v2016_04_01.models.ZoneDeleteResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.dns.v2016_04_01.models.ZoneDeleteResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
zone_name=zone_name,
if_match=if_match,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ZoneDeleteResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}'}
def get(
self, resource_group_name, zone_name, custom_headers=None, raw=False, **operation_config):
"""Gets a DNS zone. Retrieves the zone properties, but not the record sets
within the zone.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param zone_name: The name of the DNS zone (without a terminating
dot).
:type zone_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Zone or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.dns.v2016_04_01.models.Zone or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'zoneName': self._serialize.url("zone_name", zone_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Zone', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}'}
def list_by_resource_group(
self, resource_group_name, top=None, custom_headers=None, raw=False, **operation_config):
"""Lists the DNS zones within a resource group.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param top: The maximum number of record sets to return. If not
specified, returns up to 100 record sets.
:type top: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Zone
:rtype:
~azure.mgmt.dns.v2016_04_01.models.ZonePaged[~azure.mgmt.dns.v2016_04_01.models.Zone]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ZonePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ZonePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones'}
def list(
self, top=None, custom_headers=None, raw=False, **operation_config):
"""Lists the DNS zones in all resource groups in a subscription.
:param top: The maximum number of DNS zones to return. If not
specified, returns up to 100 zones.
:type top: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Zone
:rtype:
~azure.mgmt.dns.v2016_04_01.models.ZonePaged[~azure.mgmt.dns.v2016_04_01.models.Zone]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ZonePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ZonePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/dnszones'}
| 46.407745 | 169 | 0.653659 |
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class ZonesOperations(object):
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-04-01"
self.config = config
def create_or_update(
self, resource_group_name, zone_name, parameters, if_match=None, if_none_match=None, custom_headers=None, raw=False, **operation_config):
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'zoneName': self._serialize.url("zone_name", zone_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(parameters, 'Zone')
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Zone', response)
if response.status_code == 201:
deserialized = self._deserialize('Zone', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}'}
def _delete_initial(
self, resource_group_name, zone_name, if_match=None, custom_headers=None, raw=False, **operation_config):
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'zoneName': self._serialize.url("zone_name", zone_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ZoneDeleteResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, zone_name, if_match=None, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
zone_name=zone_name,
if_match=if_match,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ZoneDeleteResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}'}
def get(
self, resource_group_name, zone_name, custom_headers=None, raw=False, **operation_config):
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'zoneName': self._serialize.url("zone_name", zone_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Zone', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}'}
def list_by_resource_group(
self, resource_group_name, top=None, custom_headers=None, raw=False, **operation_config):
def internal_paging(next_link=None, raw=False):
if not next_link:
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
deserialized = models.ZonePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ZonePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones'}
def list(
self, top=None, custom_headers=None, raw=False, **operation_config):
def internal_paging(next_link=None, raw=False):
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
deserialized = models.ZonePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ZonePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/dnszones'}
| true | true |
1c388e1b43dc8a50c0ebf84601ad29804106cc77 | 1,171 | py | Python | sktime/utils/seasonality.py | Mo-Saif/sktime | f3c71977c113d986276fca2bdedf70faaa33f040 | [
"BSD-3-Clause"
] | 1 | 2019-09-29T07:11:33.000Z | 2019-09-29T07:11:33.000Z | sktime/utils/seasonality.py | ClaudiaSanches/sktime | 63e7839e80ca6d5fe5fc4f33389ec3bcacd8aa59 | [
"BSD-3-Clause"
] | null | null | null | sktime/utils/seasonality.py | ClaudiaSanches/sktime | 63e7839e80ca6d5fe5fc4f33389ec3bcacd8aa59 | [
"BSD-3-Clause"
] | 1 | 2019-05-08T10:42:20.000Z | 2019-05-08T10:42:20.000Z | from statsmodels.tsa.stattools import acf
import numpy as np
def seasonality_test(x, freq):
"""Seasonality test used in M4 competition
# original implementation
# def seasonality_test(original_ts, ppy):
#
# Seasonality test
# :param original_ts: time series
# :param ppy: periods per year
# :return: boolean value: whether the TS is seasonal
#
# s = acf(original_ts, 1)
# for i in range(2, ppy):
# s = s + (acf(original_ts, i) ** 2)
#
# limit = 1.645 * (sqrt((1 + 2 * s) / len(original_ts)))
#
# return (abs(acf(original_ts, ppy))) > limit
Parameters
----------
x : ndarray
Time series
freq : int
Frequency, periods per year (ppy)
Returns
-------
test : bool
Whether or not seasonality is present in data for given frequency
References
----------
https://github.com/M4Competition/M4-methods/blob/master/ML_benchmarks.py
"""
x = np.asarray(x)
crit_val = 1.645
n = len(x)
r = acf(x, nlags=freq)
s = r[1] + np.sum(r[2:] ** 2)
limit = crit_val * np.sqrt((1 + 2 * s) / n)
return np.abs(r[freq]) > limit
| 23.42 | 76 | 0.577284 | from statsmodels.tsa.stattools import acf
import numpy as np
def seasonality_test(x, freq):
x = np.asarray(x)
crit_val = 1.645
n = len(x)
r = acf(x, nlags=freq)
s = r[1] + np.sum(r[2:] ** 2)
limit = crit_val * np.sqrt((1 + 2 * s) / n)
return np.abs(r[freq]) > limit
| true | true |
1c388eed2ac49e4f17d0ea70be50fbca14318c10 | 17,392 | py | Python | pythonforandroid/bootstrap.py | mishk/python-for-android | 1b887bed09a7ec19bfa00c9fb69a54e3f60a2328 | [
"MIT"
] | 2 | 2020-09-18T17:14:12.000Z | 2021-03-24T11:39:12.000Z | pythonforandroid/bootstrap.py | mishk/python-for-android | 1b887bed09a7ec19bfa00c9fb69a54e3f60a2328 | [
"MIT"
] | null | null | null | pythonforandroid/bootstrap.py | mishk/python-for-android | 1b887bed09a7ec19bfa00c9fb69a54e3f60a2328 | [
"MIT"
] | 1 | 2020-07-23T02:40:40.000Z | 2020-07-23T02:40:40.000Z | import functools
import glob
import importlib
import os
from os.path import (join, dirname, isdir, normpath, splitext, basename)
from os import listdir, walk, sep
import sh
import shlex
import shutil
from pythonforandroid.logger import (shprint, info, logger, debug)
from pythonforandroid.util import (
current_directory, ensure_dir, temp_directory, BuildInterruptingException)
from pythonforandroid.recipe import Recipe
def copy_files(src_root, dest_root, override=True, symlink=False):
for root, dirnames, filenames in walk(src_root):
for filename in filenames:
subdir = normpath(root.replace(src_root, ""))
if subdir.startswith(sep): # ensure it is relative
subdir = subdir[1:]
dest_dir = join(dest_root, subdir)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
src_file = join(root, filename)
dest_file = join(dest_dir, filename)
if os.path.isfile(src_file):
if override and os.path.exists(dest_file):
os.unlink(dest_file)
if not os.path.exists(dest_file):
if symlink:
os.symlink(src_file, dest_file)
else:
shutil.copy(src_file, dest_file)
else:
os.makedirs(dest_file)
default_recipe_priorities = [
"webview", "sdl2", "service_only" # last is highest
]
# ^^ NOTE: these are just the default priorities if no special rules
# apply (which you can find in the code below), so basically if no
# known graphical lib or web lib is used - in which case service_only
# is the most reasonable guess.
def _cmp_bootstraps_by_priority(a, b):
def rank_bootstrap(bootstrap):
""" Returns a ranking index for each bootstrap,
with higher priority ranked with higher number. """
if bootstrap.name in default_recipe_priorities:
return default_recipe_priorities.index(bootstrap.name) + 1
return 0
# Rank bootstraps in order:
rank_a = rank_bootstrap(a)
rank_b = rank_bootstrap(b)
if rank_a != rank_b:
return (rank_b - rank_a)
else:
if a.name < b.name: # alphabetic sort for determinism
return -1
else:
return 1
class Bootstrap:
'''An Android project template, containing recipe stuff for
compilation and templated fields for APK info.
'''
name = ''
jni_subdir = '/jni'
ctx = None
bootstrap_dir = None
build_dir = None
dist_name = None
distribution = None
# All bootstraps should include Python in some way:
recipe_depends = ['python3', 'android']
can_be_chosen_automatically = True
'''Determines whether the bootstrap can be chosen as one that
satisfies user requirements. If False, it will not be returned
from Bootstrap.get_bootstrap_from_recipes.
'''
# Other things a Bootstrap might need to track (maybe separately):
# ndk_main.c
# whitelist.txt
# blacklist.txt
@property
def dist_dir(self):
'''The dist dir at which to place the finished distribution.'''
if self.distribution is None:
raise BuildInterruptingException(
'Internal error: tried to access {}.dist_dir, but {}.distribution '
'is None'.format(self, self))
return self.distribution.dist_dir
@property
def jni_dir(self):
return self.name + self.jni_subdir
def check_recipe_choices(self):
'''Checks what recipes are being built to see which of the alternative
and optional dependencies are being used,
and returns a list of these.'''
recipes = []
built_recipes = self.ctx.recipe_build_order or []
for recipe in self.recipe_depends:
if isinstance(recipe, (tuple, list)):
for alternative in recipe:
if alternative in built_recipes:
recipes.append(alternative)
break
return sorted(recipes)
def get_build_dir_name(self):
choices = self.check_recipe_choices()
dir_name = '-'.join([self.name] + choices)
return dir_name
def get_build_dir(self):
return join(self.ctx.build_dir, 'bootstrap_builds', self.get_build_dir_name())
def get_dist_dir(self, name):
return join(self.ctx.dist_dir, name)
def get_common_dir(self):
return os.path.abspath(join(self.bootstrap_dir, "..", 'common'))
@property
def name(self):
modname = self.__class__.__module__
return modname.split(".", 2)[-1]
def get_bootstrap_dirs(self):
"""get all bootstrap directories, following the MRO path"""
# get all bootstrap names along the __mro__, cutting off Bootstrap and object
classes = self.__class__.__mro__[:-2]
bootstrap_names = [cls.name for cls in classes] + ['common']
bootstrap_dirs = [
join(self.ctx.root_dir, 'bootstraps', bootstrap_name)
for bootstrap_name in reversed(bootstrap_names)
]
return bootstrap_dirs
def _copy_in_final_files(self):
if self.name == "sdl2":
# Get the paths for copying SDL2's java source code:
sdl2_recipe = Recipe.get_recipe("sdl2", self.ctx)
sdl2_build_dir = sdl2_recipe.get_jni_dir()
src_dir = join(sdl2_build_dir, "SDL", "android-project",
"app", "src", "main", "java",
"org", "libsdl", "app")
target_dir = join(self.dist_dir, 'src', 'main', 'java', 'org',
'libsdl', 'app')
# Do actual copying:
info('Copying in SDL2 .java files from: ' + str(src_dir))
if not os.path.exists(target_dir):
os.makedirs(target_dir)
copy_files(src_dir, target_dir, override=True)
def prepare_build_dir(self):
"""Ensure that a build dir exists for the recipe. This same single
dir will be used for building all different archs."""
bootstrap_dirs = self.get_bootstrap_dirs()
# now do a cumulative copy of all bootstrap dirs
self.build_dir = self.get_build_dir()
for bootstrap_dir in bootstrap_dirs:
copy_files(join(bootstrap_dir, 'build'), self.build_dir, symlink=self.ctx.symlink_bootstrap_files)
with current_directory(self.build_dir):
with open('project.properties', 'w') as fileh:
fileh.write('target=android-{}'.format(self.ctx.android_api))
def prepare_dist_dir(self):
ensure_dir(self.dist_dir)
def assemble_distribution(self):
''' Copies all the files into the distribution (this function is
overridden by the specific bootstrap classes to do this)
and add in the distribution info.
'''
self._copy_in_final_files()
self.distribution.save_info(self.dist_dir)
@classmethod
def all_bootstraps(cls):
'''Find all the available bootstraps and return them.'''
forbidden_dirs = ('__pycache__', 'common')
bootstraps_dir = join(dirname(__file__), 'bootstraps')
result = set()
for name in listdir(bootstraps_dir):
if name in forbidden_dirs:
continue
filen = join(bootstraps_dir, name)
if isdir(filen):
result.add(name)
return result
@classmethod
def get_usable_bootstraps_for_recipes(cls, recipes, ctx):
'''Returns all bootstrap whose recipe requirements do not conflict
with the given recipes, in no particular order.'''
info('Trying to find a bootstrap that matches the given recipes.')
bootstraps = [cls.get_bootstrap(name, ctx)
for name in cls.all_bootstraps()]
acceptable_bootstraps = set()
# Find out which bootstraps are acceptable:
for bs in bootstraps:
if not bs.can_be_chosen_automatically:
continue
possible_dependency_lists = expand_dependencies(bs.recipe_depends, ctx)
for possible_dependencies in possible_dependency_lists:
ok = True
# Check if the bootstap's dependencies have an internal conflict:
for recipe in possible_dependencies:
recipe = Recipe.get_recipe(recipe, ctx)
if any(conflict in recipes for conflict in recipe.conflicts):
ok = False
break
# Check if bootstrap's dependencies conflict with chosen
# packages:
for recipe in recipes:
try:
recipe = Recipe.get_recipe(recipe, ctx)
except ValueError:
conflicts = []
else:
conflicts = recipe.conflicts
if any(conflict in possible_dependencies
for conflict in conflicts):
ok = False
break
if ok and bs not in acceptable_bootstraps:
acceptable_bootstraps.add(bs)
info('Found {} acceptable bootstraps: {}'.format(
len(acceptable_bootstraps),
[bs.name for bs in acceptable_bootstraps]))
return acceptable_bootstraps
@classmethod
def get_bootstrap_from_recipes(cls, recipes, ctx):
'''Picks a single recommended default bootstrap out of
all_usable_bootstraps_from_recipes() for the given reicpes,
and returns it.'''
known_web_packages = {"flask"} # to pick webview over service_only
recipes_with_deps_lists = expand_dependencies(recipes, ctx)
acceptable_bootstraps = cls.get_usable_bootstraps_for_recipes(
recipes, ctx
)
def have_dependency_in_recipes(dep):
for dep_list in recipes_with_deps_lists:
if dep in dep_list:
return True
return False
# Special rule: return SDL2 bootstrap if there's an sdl2 dep:
if (have_dependency_in_recipes("sdl2") and
"sdl2" in [b.name for b in acceptable_bootstraps]
):
info('Using sdl2 bootstrap since it is in dependencies')
return cls.get_bootstrap("sdl2", ctx)
# Special rule: return "webview" if we depend on common web recipe:
for possible_web_dep in known_web_packages:
if have_dependency_in_recipes(possible_web_dep):
# We have a web package dep!
if "webview" in [b.name for b in acceptable_bootstraps]:
info('Using webview bootstrap since common web packages '
'were found {}'.format(
known_web_packages.intersection(recipes)
))
return cls.get_bootstrap("webview", ctx)
prioritized_acceptable_bootstraps = sorted(
list(acceptable_bootstraps),
key=functools.cmp_to_key(_cmp_bootstraps_by_priority)
)
if prioritized_acceptable_bootstraps:
info('Using the highest ranked/first of these: {}'
.format(prioritized_acceptable_bootstraps[0].name))
return prioritized_acceptable_bootstraps[0]
return None
@classmethod
def get_bootstrap(cls, name, ctx):
'''Returns an instance of a bootstrap with the given name.
This is the only way you should access a bootstrap class, as
it sets the bootstrap directory correctly.
'''
if name is None:
return None
if not hasattr(cls, 'bootstraps'):
cls.bootstraps = {}
if name in cls.bootstraps:
return cls.bootstraps[name]
mod = importlib.import_module('pythonforandroid.bootstraps.{}'
.format(name))
if len(logger.handlers) > 1:
logger.removeHandler(logger.handlers[1])
bootstrap = mod.bootstrap
bootstrap.bootstrap_dir = join(ctx.root_dir, 'bootstraps', name)
bootstrap.ctx = ctx
return bootstrap
def distribute_libs(self, arch, src_dirs, wildcard='*', dest_dir="libs"):
'''Copy existing arch libs from build dirs to current dist dir.'''
info('Copying libs')
tgt_dir = join(dest_dir, arch.arch)
ensure_dir(tgt_dir)
for src_dir in src_dirs:
libs = glob.glob(join(src_dir, wildcard))
if libs:
shprint(sh.cp, '-a', *libs, tgt_dir)
def distribute_javaclasses(self, javaclass_dir, dest_dir="src"):
'''Copy existing javaclasses from build dir to current dist dir.'''
info('Copying java files')
ensure_dir(dest_dir)
filenames = glob.glob(javaclass_dir)
shprint(sh.cp, '-a', *filenames, dest_dir)
def distribute_aars(self, arch):
'''Process existing .aar bundles and copy to current dist dir.'''
info('Unpacking aars')
for aar in glob.glob(join(self.ctx.aars_dir, '*.aar')):
self._unpack_aar(aar, arch)
def _unpack_aar(self, aar, arch):
'''Unpack content of .aar bundle and copy to current dist dir.'''
with temp_directory() as temp_dir:
name = splitext(basename(aar))[0]
jar_name = name + '.jar'
info("unpack {} aar".format(name))
debug(" from {}".format(aar))
debug(" to {}".format(temp_dir))
shprint(sh.unzip, '-o', aar, '-d', temp_dir)
jar_src = join(temp_dir, 'classes.jar')
jar_tgt = join('libs', jar_name)
debug("copy {} jar".format(name))
debug(" from {}".format(jar_src))
debug(" to {}".format(jar_tgt))
ensure_dir('libs')
shprint(sh.cp, '-a', jar_src, jar_tgt)
so_src_dir = join(temp_dir, 'jni', arch.arch)
so_tgt_dir = join('libs', arch.arch)
debug("copy {} .so".format(name))
debug(" from {}".format(so_src_dir))
debug(" to {}".format(so_tgt_dir))
ensure_dir(so_tgt_dir)
so_files = glob.glob(join(so_src_dir, '*.so'))
shprint(sh.cp, '-a', *so_files, so_tgt_dir)
def strip_libraries(self, arch):
info('Stripping libraries')
env = arch.get_env()
tokens = shlex.split(env['STRIP'])
strip = sh.Command(tokens[0])
if len(tokens) > 1:
strip = strip.bake(tokens[1:])
libs_dir = join(self.dist_dir, '_python_bundle',
'_python_bundle', 'modules')
filens = shprint(sh.find, libs_dir, join(self.dist_dir, 'libs'),
'-iname', '*.so', _env=env).stdout.decode('utf-8')
logger.info('Stripping libraries in private dir')
for filen in filens.split('\n'):
if not filen:
continue # skip the last ''
try:
strip(filen, _env=env)
except sh.ErrorReturnCode_1:
logger.debug('Failed to strip ' + filen)
def fry_eggs(self, sitepackages):
info('Frying eggs in {}'.format(sitepackages))
for d in listdir(sitepackages):
rd = join(sitepackages, d)
if isdir(rd) and d.endswith('.egg'):
info(' ' + d)
files = [join(rd, f) for f in listdir(rd) if f != 'EGG-INFO']
if files:
shprint(sh.mv, '-t', sitepackages, *files)
shprint(sh.rm, '-rf', d)
def expand_dependencies(recipes, ctx):
""" This function expands to lists of all different available
alternative recipe combinations, with the dependencies added in
ONLY for all the not-with-alternative recipes.
(So this is like the deps graph very simplified and incomplete, but
hopefully good enough for most basic bootstrap compatibility checks)
"""
# Add in all the deps of recipes where there is no alternative:
recipes_with_deps = list(recipes)
for entry in recipes:
if not isinstance(entry, (tuple, list)) or len(entry) == 1:
if isinstance(entry, (tuple, list)):
entry = entry[0]
try:
recipe = Recipe.get_recipe(entry, ctx)
recipes_with_deps += recipe.depends
except ValueError:
# it's a pure python package without a recipe, so we
# don't know the dependencies...skipping for now
pass
# Split up lists by available alternatives:
recipe_lists = [[]]
for recipe in recipes_with_deps:
if isinstance(recipe, (tuple, list)):
new_recipe_lists = []
for alternative in recipe:
for old_list in recipe_lists:
new_list = [i for i in old_list]
new_list.append(alternative)
new_recipe_lists.append(new_list)
recipe_lists = new_recipe_lists
else:
for existing_list in recipe_lists:
existing_list.append(recipe)
return recipe_lists
| 39.259594 | 110 | 0.594181 | import functools
import glob
import importlib
import os
from os.path import (join, dirname, isdir, normpath, splitext, basename)
from os import listdir, walk, sep
import sh
import shlex
import shutil
from pythonforandroid.logger import (shprint, info, logger, debug)
from pythonforandroid.util import (
current_directory, ensure_dir, temp_directory, BuildInterruptingException)
from pythonforandroid.recipe import Recipe
def copy_files(src_root, dest_root, override=True, symlink=False):
for root, dirnames, filenames in walk(src_root):
for filename in filenames:
subdir = normpath(root.replace(src_root, ""))
if subdir.startswith(sep):
subdir = subdir[1:]
dest_dir = join(dest_root, subdir)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
src_file = join(root, filename)
dest_file = join(dest_dir, filename)
if os.path.isfile(src_file):
if override and os.path.exists(dest_file):
os.unlink(dest_file)
if not os.path.exists(dest_file):
if symlink:
os.symlink(src_file, dest_file)
else:
shutil.copy(src_file, dest_file)
else:
os.makedirs(dest_file)
default_recipe_priorities = [
"webview", "sdl2", "service_only"
]
def _cmp_bootstraps_by_priority(a, b):
def rank_bootstrap(bootstrap):
if bootstrap.name in default_recipe_priorities:
return default_recipe_priorities.index(bootstrap.name) + 1
return 0
rank_a = rank_bootstrap(a)
rank_b = rank_bootstrap(b)
if rank_a != rank_b:
return (rank_b - rank_a)
else:
if a.name < b.name:
return -1
else:
return 1
class Bootstrap:
name = ''
jni_subdir = '/jni'
ctx = None
bootstrap_dir = None
build_dir = None
dist_name = None
distribution = None
recipe_depends = ['python3', 'android']
can_be_chosen_automatically = True
@property
def dist_dir(self):
if self.distribution is None:
raise BuildInterruptingException(
'Internal error: tried to access {}.dist_dir, but {}.distribution '
'is None'.format(self, self))
return self.distribution.dist_dir
@property
def jni_dir(self):
return self.name + self.jni_subdir
def check_recipe_choices(self):
recipes = []
built_recipes = self.ctx.recipe_build_order or []
for recipe in self.recipe_depends:
if isinstance(recipe, (tuple, list)):
for alternative in recipe:
if alternative in built_recipes:
recipes.append(alternative)
break
return sorted(recipes)
def get_build_dir_name(self):
choices = self.check_recipe_choices()
dir_name = '-'.join([self.name] + choices)
return dir_name
def get_build_dir(self):
return join(self.ctx.build_dir, 'bootstrap_builds', self.get_build_dir_name())
def get_dist_dir(self, name):
return join(self.ctx.dist_dir, name)
def get_common_dir(self):
return os.path.abspath(join(self.bootstrap_dir, "..", 'common'))
@property
def name(self):
modname = self.__class__.__module__
return modname.split(".", 2)[-1]
def get_bootstrap_dirs(self):
classes = self.__class__.__mro__[:-2]
bootstrap_names = [cls.name for cls in classes] + ['common']
bootstrap_dirs = [
join(self.ctx.root_dir, 'bootstraps', bootstrap_name)
for bootstrap_name in reversed(bootstrap_names)
]
return bootstrap_dirs
def _copy_in_final_files(self):
if self.name == "sdl2":
sdl2_recipe = Recipe.get_recipe("sdl2", self.ctx)
sdl2_build_dir = sdl2_recipe.get_jni_dir()
src_dir = join(sdl2_build_dir, "SDL", "android-project",
"app", "src", "main", "java",
"org", "libsdl", "app")
target_dir = join(self.dist_dir, 'src', 'main', 'java', 'org',
'libsdl', 'app')
# Do actual copying:
info('Copying in SDL2 .java files from: ' + str(src_dir))
if not os.path.exists(target_dir):
os.makedirs(target_dir)
copy_files(src_dir, target_dir, override=True)
def prepare_build_dir(self):
bootstrap_dirs = self.get_bootstrap_dirs()
# now do a cumulative copy of all bootstrap dirs
self.build_dir = self.get_build_dir()
for bootstrap_dir in bootstrap_dirs:
copy_files(join(bootstrap_dir, 'build'), self.build_dir, symlink=self.ctx.symlink_bootstrap_files)
with current_directory(self.build_dir):
with open('project.properties', 'w') as fileh:
fileh.write('target=android-{}'.format(self.ctx.android_api))
def prepare_dist_dir(self):
ensure_dir(self.dist_dir)
def assemble_distribution(self):
self._copy_in_final_files()
self.distribution.save_info(self.dist_dir)
@classmethod
def all_bootstraps(cls):
forbidden_dirs = ('__pycache__', 'common')
bootstraps_dir = join(dirname(__file__), 'bootstraps')
result = set()
for name in listdir(bootstraps_dir):
if name in forbidden_dirs:
continue
filen = join(bootstraps_dir, name)
if isdir(filen):
result.add(name)
return result
@classmethod
def get_usable_bootstraps_for_recipes(cls, recipes, ctx):
info('Trying to find a bootstrap that matches the given recipes.')
bootstraps = [cls.get_bootstrap(name, ctx)
for name in cls.all_bootstraps()]
acceptable_bootstraps = set()
# Find out which bootstraps are acceptable:
for bs in bootstraps:
if not bs.can_be_chosen_automatically:
continue
possible_dependency_lists = expand_dependencies(bs.recipe_depends, ctx)
for possible_dependencies in possible_dependency_lists:
ok = True
# Check if the bootstap's dependencies have an internal conflict:
for recipe in possible_dependencies:
recipe = Recipe.get_recipe(recipe, ctx)
if any(conflict in recipes for conflict in recipe.conflicts):
ok = False
break
# packages:
for recipe in recipes:
try:
recipe = Recipe.get_recipe(recipe, ctx)
except ValueError:
conflicts = []
else:
conflicts = recipe.conflicts
if any(conflict in possible_dependencies
for conflict in conflicts):
ok = False
break
if ok and bs not in acceptable_bootstraps:
acceptable_bootstraps.add(bs)
info('Found {} acceptable bootstraps: {}'.format(
len(acceptable_bootstraps),
[bs.name for bs in acceptable_bootstraps]))
return acceptable_bootstraps
@classmethod
def get_bootstrap_from_recipes(cls, recipes, ctx):
known_web_packages = {"flask"} # to pick webview over service_only
recipes_with_deps_lists = expand_dependencies(recipes, ctx)
acceptable_bootstraps = cls.get_usable_bootstraps_for_recipes(
recipes, ctx
)
def have_dependency_in_recipes(dep):
for dep_list in recipes_with_deps_lists:
if dep in dep_list:
return True
return False
# Special rule: return SDL2 bootstrap if there's an sdl2 dep:
if (have_dependency_in_recipes("sdl2") and
"sdl2" in [b.name for b in acceptable_bootstraps]
):
info('Using sdl2 bootstrap since it is in dependencies')
return cls.get_bootstrap("sdl2", ctx)
for possible_web_dep in known_web_packages:
if have_dependency_in_recipes(possible_web_dep):
if "webview" in [b.name for b in acceptable_bootstraps]:
info('Using webview bootstrap since common web packages '
'were found {}'.format(
known_web_packages.intersection(recipes)
))
return cls.get_bootstrap("webview", ctx)
prioritized_acceptable_bootstraps = sorted(
list(acceptable_bootstraps),
key=functools.cmp_to_key(_cmp_bootstraps_by_priority)
)
if prioritized_acceptable_bootstraps:
info('Using the highest ranked/first of these: {}'
.format(prioritized_acceptable_bootstraps[0].name))
return prioritized_acceptable_bootstraps[0]
return None
@classmethod
def get_bootstrap(cls, name, ctx):
if name is None:
return None
if not hasattr(cls, 'bootstraps'):
cls.bootstraps = {}
if name in cls.bootstraps:
return cls.bootstraps[name]
mod = importlib.import_module('pythonforandroid.bootstraps.{}'
.format(name))
if len(logger.handlers) > 1:
logger.removeHandler(logger.handlers[1])
bootstrap = mod.bootstrap
bootstrap.bootstrap_dir = join(ctx.root_dir, 'bootstraps', name)
bootstrap.ctx = ctx
return bootstrap
def distribute_libs(self, arch, src_dirs, wildcard='*', dest_dir="libs"):
info('Copying libs')
tgt_dir = join(dest_dir, arch.arch)
ensure_dir(tgt_dir)
for src_dir in src_dirs:
libs = glob.glob(join(src_dir, wildcard))
if libs:
shprint(sh.cp, '-a', *libs, tgt_dir)
def distribute_javaclasses(self, javaclass_dir, dest_dir="src"):
info('Copying java files')
ensure_dir(dest_dir)
filenames = glob.glob(javaclass_dir)
shprint(sh.cp, '-a', *filenames, dest_dir)
def distribute_aars(self, arch):
info('Unpacking aars')
for aar in glob.glob(join(self.ctx.aars_dir, '*.aar')):
self._unpack_aar(aar, arch)
def _unpack_aar(self, aar, arch):
with temp_directory() as temp_dir:
name = splitext(basename(aar))[0]
jar_name = name + '.jar'
info("unpack {} aar".format(name))
debug(" from {}".format(aar))
debug(" to {}".format(temp_dir))
shprint(sh.unzip, '-o', aar, '-d', temp_dir)
jar_src = join(temp_dir, 'classes.jar')
jar_tgt = join('libs', jar_name)
debug("copy {} jar".format(name))
debug(" from {}".format(jar_src))
debug(" to {}".format(jar_tgt))
ensure_dir('libs')
shprint(sh.cp, '-a', jar_src, jar_tgt)
so_src_dir = join(temp_dir, 'jni', arch.arch)
so_tgt_dir = join('libs', arch.arch)
debug("copy {} .so".format(name))
debug(" from {}".format(so_src_dir))
debug(" to {}".format(so_tgt_dir))
ensure_dir(so_tgt_dir)
so_files = glob.glob(join(so_src_dir, '*.so'))
shprint(sh.cp, '-a', *so_files, so_tgt_dir)
def strip_libraries(self, arch):
info('Stripping libraries')
env = arch.get_env()
tokens = shlex.split(env['STRIP'])
strip = sh.Command(tokens[0])
if len(tokens) > 1:
strip = strip.bake(tokens[1:])
libs_dir = join(self.dist_dir, '_python_bundle',
'_python_bundle', 'modules')
filens = shprint(sh.find, libs_dir, join(self.dist_dir, 'libs'),
'-iname', '*.so', _env=env).stdout.decode('utf-8')
logger.info('Stripping libraries in private dir')
for filen in filens.split('\n'):
if not filen:
continue
try:
strip(filen, _env=env)
except sh.ErrorReturnCode_1:
logger.debug('Failed to strip ' + filen)
def fry_eggs(self, sitepackages):
info('Frying eggs in {}'.format(sitepackages))
for d in listdir(sitepackages):
rd = join(sitepackages, d)
if isdir(rd) and d.endswith('.egg'):
info(' ' + d)
files = [join(rd, f) for f in listdir(rd) if f != 'EGG-INFO']
if files:
shprint(sh.mv, '-t', sitepackages, *files)
shprint(sh.rm, '-rf', d)
def expand_dependencies(recipes, ctx):
recipes_with_deps = list(recipes)
for entry in recipes:
if not isinstance(entry, (tuple, list)) or len(entry) == 1:
if isinstance(entry, (tuple, list)):
entry = entry[0]
try:
recipe = Recipe.get_recipe(entry, ctx)
recipes_with_deps += recipe.depends
except ValueError:
# don't know the dependencies...skipping for now
pass
recipe_lists = [[]]
for recipe in recipes_with_deps:
if isinstance(recipe, (tuple, list)):
new_recipe_lists = []
for alternative in recipe:
for old_list in recipe_lists:
new_list = [i for i in old_list]
new_list.append(alternative)
new_recipe_lists.append(new_list)
recipe_lists = new_recipe_lists
else:
for existing_list in recipe_lists:
existing_list.append(recipe)
return recipe_lists
| true | true |
1c388f83b0135bdf4b3681bd6fab412577e4207e | 3,528 | py | Python | testsite/settings.py | deepaksingh1/testsite | a36737f49d6264c0c38f7b8c4b96de248e8e3f41 | [
"MIT"
] | null | null | null | testsite/settings.py | deepaksingh1/testsite | a36737f49d6264c0c38f7b8c4b96de248e8e3f41 | [
"MIT"
] | null | null | null | testsite/settings.py | deepaksingh1/testsite | a36737f49d6264c0c38f7b8c4b96de248e8e3f41 | [
"MIT"
] | null | null | null | """
Django settings for testsite project.
Generated by 'django-admin startproject' using Django 3.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_)@s9t+_-xj-0v&-@o1u8pzv53)idi&+jym4)9@g#42-oa(5il'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['http://deepaks135.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp',
'corsheaders',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
]
ROOT_URLCONF = 'testsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'testsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
# corsheaders
CORS_ORIGIN_ALLOW_ALL = False
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_WHITELIST = (
'http://localhost:8080',
'http://127.0.0.1:8080',
'http://deepaks135.pythonanywhere.com',
)
CORS_ALLOW_METHODS = (
'GET',
'POST',
'PUT',
'DELETE',
)
| 24.5 | 91 | 0.694728 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '_)@s9t+_-xj-0v&-@o1u8pzv53)idi&+jym4)9@g#42-oa(5il'
DEBUG = True
ALLOWED_HOSTS = ['http://deepaks135.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp',
'corsheaders',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
]
ROOT_URLCONF = 'testsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'testsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
# corsheaders
CORS_ORIGIN_ALLOW_ALL = False
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_WHITELIST = (
'http://localhost:8080',
'http://127.0.0.1:8080',
'http://deepaks135.pythonanywhere.com',
)
CORS_ALLOW_METHODS = (
'GET',
'POST',
'PUT',
'DELETE',
)
| true | true |
1c38903631724b2f4bcc64d6a35b37d5295b7e23 | 1,576 | py | Python | pandas/tests/indexes/base_class/test_indexing.py | advatar/pandas | 14b84b45d6c891fd8954ba9bb0c493cd9ec2a662 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2021-01-14T15:16:34.000Z | 2021-01-14T15:16:34.000Z | pandas/tests/indexes/base_class/test_indexing.py | Tian-Jionglu/pandas | 6b3618de9c5deae3bcb4c7d5dfbd35e1f9eeaf15 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/indexes/base_class/test_indexing.py | Tian-Jionglu/pandas | 6b3618de9c5deae3bcb4c7d5dfbd35e1f9eeaf15 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | import numpy as np
import pytest
from pandas.compat import is_numpy_dev
from pandas import Index
import pandas._testing as tm
class TestGetSliceBounds:
@pytest.mark.parametrize("kind", ["getitem", "loc", None])
@pytest.mark.parametrize("side, expected", [("left", 4), ("right", 5)])
def test_get_slice_bounds_within(self, kind, side, expected):
index = Index(list("abcdef"))
result = index.get_slice_bound("e", kind=kind, side=side)
assert result == expected
@pytest.mark.xfail(is_numpy_dev, reason="GH#39089 Numpy changed dtype inference")
@pytest.mark.parametrize("kind", ["getitem", "loc", None])
@pytest.mark.parametrize("side", ["left", "right"])
@pytest.mark.parametrize(
"data, bound, expected", [(list("abcdef"), "x", 6), (list("bcdefg"), "a", 0)]
)
def test_get_slice_bounds_outside(self, kind, side, expected, data, bound):
index = Index(data)
result = index.get_slice_bound(bound, kind=kind, side=side)
assert result == expected
def test_get_slice_bounds_invalid_side(self):
with pytest.raises(ValueError, match="Invalid value for side kwarg"):
Index([]).get_slice_bound("a", kind=None, side="middle")
class TestGetIndexerNonUnique:
def test_get_indexer_non_unique_dtype_mismatch(self):
# GH#25459
indexes, missing = Index(["A", "B"]).get_indexer_non_unique(Index([0]))
tm.assert_numpy_array_equal(np.array([-1], dtype=np.intp), indexes)
tm.assert_numpy_array_equal(np.array([0], dtype=np.intp), missing)
| 39.4 | 85 | 0.673223 | import numpy as np
import pytest
from pandas.compat import is_numpy_dev
from pandas import Index
import pandas._testing as tm
class TestGetSliceBounds:
@pytest.mark.parametrize("kind", ["getitem", "loc", None])
@pytest.mark.parametrize("side, expected", [("left", 4), ("right", 5)])
def test_get_slice_bounds_within(self, kind, side, expected):
index = Index(list("abcdef"))
result = index.get_slice_bound("e", kind=kind, side=side)
assert result == expected
@pytest.mark.xfail(is_numpy_dev, reason="GH#39089 Numpy changed dtype inference")
@pytest.mark.parametrize("kind", ["getitem", "loc", None])
@pytest.mark.parametrize("side", ["left", "right"])
@pytest.mark.parametrize(
"data, bound, expected", [(list("abcdef"), "x", 6), (list("bcdefg"), "a", 0)]
)
def test_get_slice_bounds_outside(self, kind, side, expected, data, bound):
index = Index(data)
result = index.get_slice_bound(bound, kind=kind, side=side)
assert result == expected
def test_get_slice_bounds_invalid_side(self):
with pytest.raises(ValueError, match="Invalid value for side kwarg"):
Index([]).get_slice_bound("a", kind=None, side="middle")
class TestGetIndexerNonUnique:
def test_get_indexer_non_unique_dtype_mismatch(self):
indexes, missing = Index(["A", "B"]).get_indexer_non_unique(Index([0]))
tm.assert_numpy_array_equal(np.array([-1], dtype=np.intp), indexes)
tm.assert_numpy_array_equal(np.array([0], dtype=np.intp), missing)
| true | true |
1c3893b343a7fc290d6d36d72be638b7ffade06e | 1,943 | py | Python | astropy/sphinx/ext/tests/test_automodsumm.py | xiaomi1122/astropy | 8876e902f5efa02a3fc27d82fe15c16001d4df5e | [
"BSD-3-Clause"
] | null | null | null | astropy/sphinx/ext/tests/test_automodsumm.py | xiaomi1122/astropy | 8876e902f5efa02a3fc27d82fe15c16001d4df5e | [
"BSD-3-Clause"
] | null | null | null | astropy/sphinx/ext/tests/test_automodsumm.py | xiaomi1122/astropy | 8876e902f5efa02a3fc27d82fe15c16001d4df5e | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from ....tests.helper import pytest
pytest.importorskip('sphinx') # skips these tests if sphinx not present
class FakeEnv(object):
"""
Mocks up a sphinx env setting construct for automodapi tests
"""
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
class FakeBuilder(object):
"""
Mocks up a sphinx builder setting construct for automodapi tests
"""
def __init__(self, **kwargs):
self.env = FakeEnv(**kwargs)
class FakeApp(object):
"""
Mocks up a `sphinx.application.Application` object for automodapi tests
"""
def __init__(self, srcdir, automodapipresent=True):
self.builder = FakeBuilder(srcdir=srcdir)
self.info = []
self.warnings = []
self._extensions = []
if automodapipresent:
self._extensions.append('astropy.sphinx.ext.automodapi')
def info(self, msg, loc):
self.info.append((msg, loc))
def warn(self, msg, loc):
self.warnings.append((msg, loc))
ams_to_asmry_str = """
Before
.. automodsumm:: astropy.sphinx.ext.automodsumm
:p:
And After
"""
ams_to_asmry_expected = """.. autosummary::
:p:
~astropy.sphinx.ext.automodsumm.Automoddiagram
~astropy.sphinx.ext.automodsumm.Automodsumm
~astropy.sphinx.ext.automodsumm.automodsumm_to_autosummary_lines
~astropy.sphinx.ext.automodsumm.generate_automodsumm_docs
~astropy.sphinx.ext.automodsumm.process_automodsumm_generation
~astropy.sphinx.ext.automodsumm.setup"""
def test_ams_to_asmry(tmpdir):
from ..automodsumm import automodsumm_to_autosummary_lines
fi = tmpdir.join('automodsumm.rst')
fi.write(ams_to_asmry_str)
fakeapp = FakeApp(srcdir='')
resultlines = automodsumm_to_autosummary_lines(str(fi), fakeapp)
assert '\n'.join(resultlines) == ams_to_asmry_expected
| 26.986111 | 75 | 0.687597 |
from ....tests.helper import pytest
pytest.importorskip('sphinx')
class FakeEnv(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
class FakeBuilder(object):
def __init__(self, **kwargs):
self.env = FakeEnv(**kwargs)
class FakeApp(object):
def __init__(self, srcdir, automodapipresent=True):
self.builder = FakeBuilder(srcdir=srcdir)
self.info = []
self.warnings = []
self._extensions = []
if automodapipresent:
self._extensions.append('astropy.sphinx.ext.automodapi')
def info(self, msg, loc):
self.info.append((msg, loc))
def warn(self, msg, loc):
self.warnings.append((msg, loc))
ams_to_asmry_str = """
Before
.. automodsumm:: astropy.sphinx.ext.automodsumm
:p:
And After
"""
ams_to_asmry_expected = """.. autosummary::
:p:
~astropy.sphinx.ext.automodsumm.Automoddiagram
~astropy.sphinx.ext.automodsumm.Automodsumm
~astropy.sphinx.ext.automodsumm.automodsumm_to_autosummary_lines
~astropy.sphinx.ext.automodsumm.generate_automodsumm_docs
~astropy.sphinx.ext.automodsumm.process_automodsumm_generation
~astropy.sphinx.ext.automodsumm.setup"""
def test_ams_to_asmry(tmpdir):
from ..automodsumm import automodsumm_to_autosummary_lines
fi = tmpdir.join('automodsumm.rst')
fi.write(ams_to_asmry_str)
fakeapp = FakeApp(srcdir='')
resultlines = automodsumm_to_autosummary_lines(str(fi), fakeapp)
assert '\n'.join(resultlines) == ams_to_asmry_expected
| true | true |
1c3894096729807804f491952c37d47fec2bd279 | 3,209 | py | Python | stock/models.py | Singosgu/Elvis_WMS | e6911b7daae76be640ece8946104af24b6cf0fa6 | [
"MIT"
] | 3 | 2020-10-19T05:55:28.000Z | 2020-11-12T03:55:06.000Z | stock/models.py | Singosgu/Django_WMS | 8cf5f5282a5619d6f488372020f95441cf89f868 | [
"MIT"
] | 1 | 2020-07-24T07:34:36.000Z | 2020-07-24T07:34:36.000Z | stock/models.py | Singosgu/Elvis_WMS | e6911b7daae76be640ece8946104af24b6cf0fa6 | [
"MIT"
] | 4 | 2020-09-04T13:35:15.000Z | 2020-10-16T15:10:38.000Z | from django.db import models
class StockListModel(models.Model):
goods_code = models.CharField(max_length=32, verbose_name="Goods Code")
goods_desc = models.CharField(max_length=255, verbose_name="Goods Description")
goods_qty = models.BigIntegerField(default=0, verbose_name="Total Qty")
onhand_stock = models.BigIntegerField(default=0, verbose_name='On Hand Stock')
can_order_stock = models.BigIntegerField(default=0, verbose_name='Can Order Stock')
ordered_stock = models.BigIntegerField(default=0, verbose_name='Ordered Stock')
inspect_stock = models.BigIntegerField(default=0, verbose_name='Inspect Stock')
hold_stock = models.BigIntegerField(default=0, verbose_name='Holding Stock')
damage_stock = models.BigIntegerField(default=0, verbose_name='Damage Stock')
asn_stock = models.BigIntegerField(default=0, verbose_name='ASN Stock')
dn_stock = models.BigIntegerField(default=0, verbose_name='DN Stock')
pre_load_stock = models.BigIntegerField(default=0, verbose_name='Pre Load Stock')
pre_sort_stock = models.BigIntegerField(default=0, verbose_name='Pre Sort Stock')
sorted_stock = models.BigIntegerField(default=0, verbose_name='Sorted Stock')
pick_stock = models.BigIntegerField(default=0, verbose_name='Pick Stock')
picked_stock = models.BigIntegerField(default=0, verbose_name='Picked Stock')
back_order_stock = models.BigIntegerField(default=0, verbose_name='Back Order Stock')
supplier = models.CharField(default='', max_length=255, verbose_name='Goods Supplier')
openid = models.CharField(max_length=255, verbose_name="Openid")
create_time = models.DateTimeField(auto_now_add=True, verbose_name="Create Time")
update_time = models.DateTimeField(auto_now=True, blank=True, null=True, verbose_name="Update Time")
class Meta:
db_table = 'stocklist'
verbose_name = 'Stock_List'
verbose_name_plural = "Stock_List"
ordering = ['-id']
def __str__(self):
return self.pk
class StockBinModel(models.Model):
bin_name = models.CharField(max_length=255, verbose_name="Bin Name")
goods_code = models.CharField(max_length=255, verbose_name="Goods Code")
goods_desc = models.CharField(max_length=255, verbose_name="Goods Description")
goods_qty = models.BigIntegerField(default=0, verbose_name="Binstock Qty")
pick_qty = models.BigIntegerField(default=0, verbose_name="BinPick Qty")
picked_qty = models.BigIntegerField(default=0, verbose_name="BinPicked Qty")
bin_size = models.CharField(max_length=255, verbose_name="Bin size")
bin_property = models.CharField(max_length=255, verbose_name="Bin Property")
t_code = models.CharField(max_length=255, verbose_name="Transaction Code")
openid = models.CharField(max_length=255, verbose_name="Openid")
create_time = models.DateTimeField(auto_now_add=False, verbose_name="Create Time")
update_time = models.DateTimeField(auto_now=True, blank=True, null=True, verbose_name="Update Time")
class Meta:
db_table = 'stockbin'
verbose_name = 'Stock_Bin'
verbose_name_plural = "Stock_Bin"
ordering = ['-id']
def __str__(self):
return self.pk
| 56.298246 | 104 | 0.747897 | from django.db import models
class StockListModel(models.Model):
goods_code = models.CharField(max_length=32, verbose_name="Goods Code")
goods_desc = models.CharField(max_length=255, verbose_name="Goods Description")
goods_qty = models.BigIntegerField(default=0, verbose_name="Total Qty")
onhand_stock = models.BigIntegerField(default=0, verbose_name='On Hand Stock')
can_order_stock = models.BigIntegerField(default=0, verbose_name='Can Order Stock')
ordered_stock = models.BigIntegerField(default=0, verbose_name='Ordered Stock')
inspect_stock = models.BigIntegerField(default=0, verbose_name='Inspect Stock')
hold_stock = models.BigIntegerField(default=0, verbose_name='Holding Stock')
damage_stock = models.BigIntegerField(default=0, verbose_name='Damage Stock')
asn_stock = models.BigIntegerField(default=0, verbose_name='ASN Stock')
dn_stock = models.BigIntegerField(default=0, verbose_name='DN Stock')
pre_load_stock = models.BigIntegerField(default=0, verbose_name='Pre Load Stock')
pre_sort_stock = models.BigIntegerField(default=0, verbose_name='Pre Sort Stock')
sorted_stock = models.BigIntegerField(default=0, verbose_name='Sorted Stock')
pick_stock = models.BigIntegerField(default=0, verbose_name='Pick Stock')
picked_stock = models.BigIntegerField(default=0, verbose_name='Picked Stock')
back_order_stock = models.BigIntegerField(default=0, verbose_name='Back Order Stock')
supplier = models.CharField(default='', max_length=255, verbose_name='Goods Supplier')
openid = models.CharField(max_length=255, verbose_name="Openid")
create_time = models.DateTimeField(auto_now_add=True, verbose_name="Create Time")
update_time = models.DateTimeField(auto_now=True, blank=True, null=True, verbose_name="Update Time")
class Meta:
db_table = 'stocklist'
verbose_name = 'Stock_List'
verbose_name_plural = "Stock_List"
ordering = ['-id']
def __str__(self):
return self.pk
class StockBinModel(models.Model):
bin_name = models.CharField(max_length=255, verbose_name="Bin Name")
goods_code = models.CharField(max_length=255, verbose_name="Goods Code")
goods_desc = models.CharField(max_length=255, verbose_name="Goods Description")
goods_qty = models.BigIntegerField(default=0, verbose_name="Binstock Qty")
pick_qty = models.BigIntegerField(default=0, verbose_name="BinPick Qty")
picked_qty = models.BigIntegerField(default=0, verbose_name="BinPicked Qty")
bin_size = models.CharField(max_length=255, verbose_name="Bin size")
bin_property = models.CharField(max_length=255, verbose_name="Bin Property")
t_code = models.CharField(max_length=255, verbose_name="Transaction Code")
openid = models.CharField(max_length=255, verbose_name="Openid")
create_time = models.DateTimeField(auto_now_add=False, verbose_name="Create Time")
update_time = models.DateTimeField(auto_now=True, blank=True, null=True, verbose_name="Update Time")
class Meta:
db_table = 'stockbin'
verbose_name = 'Stock_Bin'
verbose_name_plural = "Stock_Bin"
ordering = ['-id']
def __str__(self):
return self.pk
| true | true |
1c38941befd420085bd5935ca9e675d2d2bbcea6 | 7,359 | py | Python | caserec/recommenders/item_recommendation/item_attribute_knn.py | eduardofressato/CaseRecommender | 0e5675f3afdb14111b14e02a511527647b66aa66 | [
"MIT"
] | 1 | 2019-01-05T02:14:47.000Z | 2019-01-05T02:14:47.000Z | caserec/recommenders/item_recommendation/item_attribute_knn.py | guedes-joaofelipe/CaseRecommender | 5606db93f0296d0b9b5eeaba2bd48787c5ff5625 | [
"MIT"
] | null | null | null | caserec/recommenders/item_recommendation/item_attribute_knn.py | guedes-joaofelipe/CaseRecommender | 5606db93f0296d0b9b5eeaba2bd48787c5ff5625 | [
"MIT"
] | null | null | null | # coding=utf-8
""""
Item Based Collaborative Filtering Recommender with Attributes (Item Attribute KNN)
[Item Recommendation (Ranking)]
Its philosophy is as follows: in order to determine the rating of User u on item m, we can find other movies that
are similar to item m, and based on User u’s ratings on those similar movies we infer his rating on item m.
However, instead of traditional ItemKNN, this approach uses a metadata or pre-computed similarity matrix.
"""
# © 2018. Case Recommender (MIT License)
from collections import defaultdict
import numpy as np
from caserec.recommenders.item_recommendation.itemknn import ItemKNN
from caserec.utils.process_data import ReadFile
__author__ = 'Arthur Fortes <fortes.arthur@gmail.com>'
class ItemAttributeKNN(ItemKNN):
def __init__(self, train_file=None, test_file=None, output_file=None, metadata_file=None, similarity_file=None,
k_neighbors=30, rank_length=10, as_binary=False, as_similar_first=True, metadata_as_binary=False,
metadata_similarity_sep='\t', similarity_metric="cosine", sep='\t', output_sep='\t'):
"""
Item Attribute KNN for Item Recommendation
This algorithm predicts a rank for each user based on the similar items that he/her consumed,
using a metadata or similarity pre-computed file
Usage::
>> ItemAttributeKNN(train, test, similarity_file=sim_matrix, as_similar_first=True).compute()
>> ItemAttributeKNN(train, test, metadata_file=metadata, as_similar_first=True).compute()
:param train_file: File which contains the train set. This file needs to have at least 3 columns
(user item feedback_value).
:type train_file: str
:param test_file: File which contains the test set. This file needs to have at least 3 columns
(user item feedback_value).
:type test_file: str, default None
:param output_file: File with dir to write the final predictions
:type output_file: str, default None
:param metadata_file: File which contains the metadata set. This file needs to have at least 2 columns
(item metadata).
:type metadata_file: str, default None
:param similarity_file: File which contains the similarity set. This file needs to have at least 3 columns
(item item similarity).
:type similarity_file: str, default None
:param k_neighbors: Number of neighbors to use. If None, k_neighbor = int(sqrt(n_users))
:type k_neighbors: int, default None
:param rank_length: Size of the rank that must be generated by the predictions of the recommender algorithm
:type rank_length: int, default 10
:param as_binary: If True, the explicit feedback will be transform to binary
:type as_binary: bool, default False
:param as_similar_first: If True, for each unknown item, which will be predicted, we first look for its k
most similar users and then take the intersection with the users that
seen that item.
:type as_similar_first: bool, default True
:param metadata_as_binary: f True, the explicit value will be transform to binary
:type metadata_as_binary: bool, default False
:param metadata_similarity_sep: Delimiter for similarity or metadata file
:type metadata_similarity_sep: str, default '\t'
:param similarity_metric: Pairwise metric to compute the similarity between the items. Reference about
distances: http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.spatial.distance.pdist.html
:type similarity_metric: str, default cosine
:param sep: Delimiter for input files file
:type sep: str, default '\t'
:param output_sep: Delimiter for output file
:type output_sep: str, default '\t'
"""
super(ItemAttributeKNN, self).__init__(train_file=train_file, test_file=test_file, output_file=output_file,
k_neighbors=k_neighbors, rank_length=rank_length, as_binary=as_binary,
as_similar_first=as_similar_first, similarity_metric=similarity_metric,
sep=sep, output_sep=output_sep)
self.recommender_name = 'Item Attribute KNN Algorithm'
self.metadata_file = metadata_file
self.similarity_file = similarity_file
self.metadata_as_binary = metadata_as_binary
self.metadata_similarity_sep = metadata_similarity_sep
def init_model(self):
"""
Method to fit the model. Create and calculate a similarity matrix by metadata file or a pre-computed similarity
matrix
"""
self.similar_items = defaultdict(list)
# Set the value for k
if self.k_neighbors is None:
self.k_neighbors = int(np.sqrt(len(self.items)))
if self.metadata_file is not None:
metadata = ReadFile(self.metadata_file, sep=self.metadata_similarity_sep, as_binary=self.metadata_as_binary
).read_metadata_or_similarity()
self.matrix = np.zeros((len(self.items), len(metadata['col_2'])))
meta_to_meta_id = {}
for m, data in enumerate(metadata['col_2']):
meta_to_meta_id[data] = m
for item in metadata['col_1']:
for m in metadata['dict'][item]:
self.matrix[self.item_to_item_id[item], meta_to_meta_id[m]] = metadata['dict'][item][m]
# create header info for metadata
sparsity = (1 - (metadata['number_interactions'] / (len(metadata['col_1']) * len(metadata['col_2'])))) * 100
self.extra_info_header = ">> metadata:: %d items and %d metadata (%d interactions) | sparsity:: %.2f%%" % \
(len(metadata['col_1']), len(metadata['col_2']), metadata['number_interactions'],
sparsity)
# Create similarity matrix based on metadata or similarity file. Transpose=False, because it is an
# item x metadata matrix
self.si_matrix = self.compute_similarity(transpose=False)
elif self.similarity_file is not None:
similarity = ReadFile(self.similarity_file, sep=self.metadata_similarity_sep, as_binary=False
).read_metadata_or_similarity()
self.si_matrix = np.zeros((len(self.items), len(self.items)))
# Fill similarity matrix
for i in similarity['col_1']:
for i_j in similarity['dict'][i]:
self.si_matrix[self.item_to_item_id[i], self.item_to_item_id[int(i_j)]] = similarity['dict'][i][i_j]
# Remove NaNs
self.si_matrix[np.isnan(self.si_matrix)] = 0.0
else:
raise ValueError("This algorithm needs a similarity matrix or a metadata file!")
# Create original matrix user x item for prediction process
self.create_matrix()
for i_id, item in enumerate(self.items):
self.similar_items[i_id] = sorted(range(len(self.si_matrix[i_id])),
key=lambda k: -self.si_matrix[i_id][k])[1:self.k_neighbors + 1]
| 45.99375 | 120 | 0.655524 |
from collections import defaultdict
import numpy as np
from caserec.recommenders.item_recommendation.itemknn import ItemKNN
from caserec.utils.process_data import ReadFile
__author__ = 'Arthur Fortes <fortes.arthur@gmail.com>'
class ItemAttributeKNN(ItemKNN):
def __init__(self, train_file=None, test_file=None, output_file=None, metadata_file=None, similarity_file=None,
k_neighbors=30, rank_length=10, as_binary=False, as_similar_first=True, metadata_as_binary=False,
metadata_similarity_sep='\t', similarity_metric="cosine", sep='\t', output_sep='\t'):
super(ItemAttributeKNN, self).__init__(train_file=train_file, test_file=test_file, output_file=output_file,
k_neighbors=k_neighbors, rank_length=rank_length, as_binary=as_binary,
as_similar_first=as_similar_first, similarity_metric=similarity_metric,
sep=sep, output_sep=output_sep)
self.recommender_name = 'Item Attribute KNN Algorithm'
self.metadata_file = metadata_file
self.similarity_file = similarity_file
self.metadata_as_binary = metadata_as_binary
self.metadata_similarity_sep = metadata_similarity_sep
def init_model(self):
self.similar_items = defaultdict(list)
if self.k_neighbors is None:
self.k_neighbors = int(np.sqrt(len(self.items)))
if self.metadata_file is not None:
metadata = ReadFile(self.metadata_file, sep=self.metadata_similarity_sep, as_binary=self.metadata_as_binary
).read_metadata_or_similarity()
self.matrix = np.zeros((len(self.items), len(metadata['col_2'])))
meta_to_meta_id = {}
for m, data in enumerate(metadata['col_2']):
meta_to_meta_id[data] = m
for item in metadata['col_1']:
for m in metadata['dict'][item]:
self.matrix[self.item_to_item_id[item], meta_to_meta_id[m]] = metadata['dict'][item][m]
sparsity = (1 - (metadata['number_interactions'] / (len(metadata['col_1']) * len(metadata['col_2'])))) * 100
self.extra_info_header = ">> metadata:: %d items and %d metadata (%d interactions) | sparsity:: %.2f%%" % \
(len(metadata['col_1']), len(metadata['col_2']), metadata['number_interactions'],
sparsity)
self.si_matrix = self.compute_similarity(transpose=False)
elif self.similarity_file is not None:
similarity = ReadFile(self.similarity_file, sep=self.metadata_similarity_sep, as_binary=False
).read_metadata_or_similarity()
self.si_matrix = np.zeros((len(self.items), len(self.items)))
for i in similarity['col_1']:
for i_j in similarity['dict'][i]:
self.si_matrix[self.item_to_item_id[i], self.item_to_item_id[int(i_j)]] = similarity['dict'][i][i_j]
self.si_matrix[np.isnan(self.si_matrix)] = 0.0
else:
raise ValueError("This algorithm needs a similarity matrix or a metadata file!")
self.create_matrix()
for i_id, item in enumerate(self.items):
self.similar_items[i_id] = sorted(range(len(self.si_matrix[i_id])),
key=lambda k: -self.si_matrix[i_id][k])[1:self.k_neighbors + 1]
| true | true |
1c38942cb70c69cf580c8bc3fbbe42ea9fe08095 | 302 | py | Python | test/test_kind_section.py | JianxingHuang/ecint | 4c41d9d7c77ee85dd3af69aa0999d093b6a8c7a3 | [
"MIT"
] | null | null | null | test/test_kind_section.py | JianxingHuang/ecint | 4c41d9d7c77ee85dd3af69aa0999d093b6a8c7a3 | [
"MIT"
] | null | null | null | test/test_kind_section.py | JianxingHuang/ecint | 4c41d9d7c77ee85dd3af69aa0999d093b6a8c7a3 | [
"MIT"
] | 1 | 2021-04-10T08:43:02.000Z | 2021-04-10T08:43:02.000Z | from aiida.orm import StructureData
from ase.io import read
from ecint.preprocessor.kind import SetsFromYaml
structure_path = './data/h2o.xyz'
atoms = read(structure_path)
structure = StructureData(ase=atoms)
sets = SetsFromYaml(structure, None)
kind_section = sets.kind_section
print(kind_section)
| 23.230769 | 48 | 0.807947 | from aiida.orm import StructureData
from ase.io import read
from ecint.preprocessor.kind import SetsFromYaml
structure_path = './data/h2o.xyz'
atoms = read(structure_path)
structure = StructureData(ase=atoms)
sets = SetsFromYaml(structure, None)
kind_section = sets.kind_section
print(kind_section)
| true | true |
1c3895135dfb43dda73ddd7725e49c81497da474 | 13,247 | py | Python | test/unit/models/test_proposals.py | RottenCoin/sentinel | baae290dae92f393db3f884180bf1f2a47a605ce | [
"MIT"
] | null | null | null | test/unit/models/test_proposals.py | RottenCoin/sentinel | baae290dae92f393db3f884180bf1f2a47a605ce | [
"MIT"
] | null | null | null | test/unit/models/test_proposals.py | RottenCoin/sentinel | baae290dae92f393db3f884180bf1f2a47a605ce | [
"MIT"
] | null | null | null | import pytest
import sys
import os
import time
os.environ['SENTINEL_ENV'] = 'test'
os.environ['SENTINEL_CONFIG'] = os.path.normpath(os.path.join(os.path.dirname(__file__), '../../test_sentinel.conf'))
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../../../lib')))
import misc
import config
from models import GovernanceObject, Proposal, Vote
# clear DB tables before each execution
def setup():
# clear tables first
Vote.delete().execute()
Proposal.delete().execute()
GovernanceObject.delete().execute()
def teardown():
pass
# list of proposal govobjs to import for testing
@pytest.fixture
def go_list_proposals():
items = [
{u'AbsoluteYesCount': 1000,
u'AbstainCount': 7,
u'CollateralHash': u'acb67ec3f3566c9b94a26b70b36c1f74a010a37c0950c22d683cc50da324fdca',
u'DataHex': u'5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20323132323532303430302c20226e616d65223a20226465616e2d6d696c6c65722d35343933222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a2032352e37352c202273746172745f65706f6368223a20313437343236313038362c202274797065223a20312c202275726c223a2022687474703a2f2f6461736863656e7472616c2e6f72672f6465616e2d6d696c6c65722d35343933227d5d5d',
u'DataString': u'[["proposal", {"end_epoch": 2122520400, "name": "dean-miller-5493", "payment_address": "yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui", "payment_amount": 25.75, "start_epoch": 1474261086, "type": 1, "url": "http://omegacentral.org/dean-miller-5493"}]]',
u'Hash': u'dfd7d63979c0b62456b63d5fc5306dbec451180adee85876cbf5b28c69d1a86c',
u'IsValidReason': u'',
u'NoCount': 25,
u'YesCount': 1025,
u'fBlockchainValidity': True,
u'fCachedDelete': False,
u'fCachedEndorsed': False,
u'fCachedFunding': False,
u'fCachedValid': True},
{u'AbsoluteYesCount': 1000,
u'AbstainCount': 29,
u'CollateralHash': u'3efd23283aa98c2c33f80e4d9ed6f277d195b72547b6491f43280380f6aac810',
u'DataHex': u'5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20323132323532303430302c20226e616d65223a20226665726e616e64657a2d37363235222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a2032352e37352c202273746172745f65706f6368223a20313437343236313038362c202274797065223a20312c202275726c223a2022687474703a2f2f6461736863656e7472616c2e6f72672f6665726e616e64657a2d37363235227d5d5d',
u'DataString': u'[["proposal", {"end_epoch": 2122520400, "name": "fernandez-7625", "payment_address": "yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui", "payment_amount": 25.75, "start_epoch": 1474261086, "type": 1, "url": "http://omegacentral.org/fernandez-7625"}]]',
u'Hash': u'0523445762025b2e01a2cd34f1d10f4816cf26ee1796167e5b029901e5873630',
u'IsValidReason': u'',
u'NoCount': 56,
u'YesCount': 1056,
u'fBlockchainValidity': True,
u'fCachedDelete': False,
u'fCachedEndorsed': False,
u'fCachedFunding': False,
u'fCachedValid': True},
]
return items
# Proposal
@pytest.fixture
def proposal():
# NOTE: no governance_object_id is set
pobj = Proposal(
start_epoch=1483250400, # 2017-01-01
end_epoch=2122520400,
name="wine-n-cheeze-party",
url="https://omegacentral.com/wine-n-cheeze-party",
payment_address="yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui",
payment_amount=13
)
# NOTE: this object is (intentionally) not saved yet.
# We want to return an built, but unsaved, object
return pobj
def test_proposal_is_valid(proposal):
from omegacoind import OmegaDaemon
import omegacoinlib
omegacoind = OmegaDaemon.from_omegacoin_conf(config.omegacoin_conf)
orig = Proposal(**proposal.get_dict()) # make a copy
# fixture as-is should be valid
assert proposal.is_valid() is True
# ============================================================
# ensure end_date not greater than start_date
# ============================================================
proposal.end_epoch = proposal.start_epoch
assert proposal.is_valid() is False
proposal.end_epoch = proposal.start_epoch - 1
assert proposal.is_valid() is False
proposal.end_epoch = proposal.start_epoch + 0
assert proposal.is_valid() is False
proposal.end_epoch = proposal.start_epoch + 1
assert proposal.is_valid() is True
# reset
proposal = Proposal(**orig.get_dict())
# ============================================================
# ensure valid proposal name
# ============================================================
proposal.name = ' heya!@209h '
assert proposal.is_valid() is False
proposal.name = "anything' OR 'x'='x"
assert proposal.is_valid() is False
proposal.name = ' '
assert proposal.is_valid() is False
proposal.name = ''
assert proposal.is_valid() is False
proposal.name = '0'
assert proposal.is_valid() is True
proposal.name = 'R66-Y'
assert proposal.is_valid() is True
# binary gibberish
proposal.name = omegacoinlib.deserialise('22385c7530303933375c75303363375c75303232395c75303138635c75303064335c75303163345c75303264385c75303236615c75303134625c75303163335c75303063335c75303362385c75303266615c75303261355c75303266652f2b5c75303065395c75303164655c75303136655c75303338645c75303062385c75303138635c75303064625c75303064315c75303038325c75303133325c753032333222')
assert proposal.is_valid() is False
# reset
proposal = Proposal(**orig.get_dict())
# ============================================================
# ensure valid payment address
# ============================================================
proposal.payment_address = '7'
assert proposal.is_valid() is False
proposal.payment_address = 'YYE8KWYAUU5YSWSYMB3Q3RYX8XTUU9Y7UI'
assert proposal.is_valid() is False
proposal.payment_address = 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Uj'
assert proposal.is_valid() is False
proposal.payment_address = '221 B Baker St., London, United Kingdom'
assert proposal.is_valid() is False
# this is actually the Omega foundation multisig address...
proposal.payment_address = '7gnwGHt17heGpG9Crfeh4KGpYNFugPhJdh'
assert proposal.is_valid() is False
proposal.payment_address = 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui'
assert proposal.is_valid() is True
# reset
proposal = Proposal(**orig.get_dict())
# validate URL
proposal.url = ' '
assert proposal.is_valid() is False
proposal.url = ' '
assert proposal.is_valid() is False
proposal.url = 'http://bit.ly/1e1EYJv'
assert proposal.is_valid() is True
proposal.url = 'https://example.com/resource.ext?param=1&other=2'
assert proposal.is_valid() is True
proposal.url = 'www.com'
assert proposal.is_valid() is True
proposal.url = 'v.ht/'
assert proposal.is_valid() is True
proposal.url = 'ipfs:///ipfs/QmPwwoytFU3gZYk5tSppumxaGbHymMUgHsSvrBdQH69XRx/'
assert proposal.is_valid() is True
proposal.url = '/ipfs/QmPwwoytFU3gZYk5tSppumxaGbHymMUgHsSvrBdQH69XRx/'
assert proposal.is_valid() is True
proposal.url = 's3://bucket/thing/anotherthing/file.pdf'
assert proposal.is_valid() is True
proposal.url = 'http://zqktlwi4fecvo6ri.onion/wiki/index.php/Main_Page'
assert proposal.is_valid() is True
proposal.url = 'ftp://ftp.funet.fi/pub/standards/RFC/rfc959.txt'
assert proposal.is_valid() is True
# gibberish URL
proposal.url = omegacoinlib.deserialise('22687474703a2f2f5c75303330385c75303065665c75303362345c75303362315c75303266645c75303331345c625c75303134655c75303031615c75303139655c75303133365c75303264315c75303238655c75303364395c75303230665c75303363355c75303030345c75303336665c75303238355c75303165375c75303063635c75303139305c75303262615c75303239316a5c75303130375c75303362365c7530306562645c75303133335c75303335665c7530326562715c75303038655c75303332645c75303362645c75303064665c75303135654f365c75303237335c75303363645c7530333539275c75303165345c75303339615c75303365385c75303334345c75303130615c75303265662e5c75303231625c75303164356a5c75303232345c75303163645c75303336365c75303064625c75303339665c75303230305c75303337615c75303138395c75303263325c75303038345c75303066615c75303031335c75303233655c75303135345c75303165395c75303139635c75303239375c75303039355c75303038345c75303362305c7530306233435c75303135345c75303063665c75303163345c75303261335c75303362655c75303136305c75303139365c75303263665c75303131305c7530313031475c75303162645c75303338645c75303363325c75303138625c75303235625c75303266325c75303264635c75303139335c75303066665c75303066645c75303133625c75303234305c75303137615c75303062355c75303031645c75303238655c75303166315c75303232315c75303161615c75303265325c75303335625c75303333665c75303239345c75303335315c75303038345c75303339395c75303262385c75303132375c75303330357a5c75303263625c75303066305c75303062355c75303164335c75303338385c75303364385c75303130625c75303266325c75303137305c75303335315c75303030305c75303136385c75303039646d5c75303331315c75303236615c75303330375c75303332635c75303361635c665c75303363335c75303264365c75303238645c75303136395c7530323438635c75303163385c75303261355c75303164615c75303165375c75303337355c75303332645c7530333165755c75303131665c75303338375c75303135325c75303065325c75303135326c5c75303164325c75303164615c75303136645c75303061665c75303333375c75303264375c75303339375c75303139395c75303134635c75303165385c75303234315c75303336635c75303130645c75303230635c75303161615c75303339355c75303133315c75303064615c75303165615c75303336645c75303064325c75303337365c75303363315c75303132645c75303266305c75303064364f255c75303263635c75303162645c75303062385c75303238365c75303136395c75303337335c75303232335c75303336655c75303037665c75303062616b5c75303132365c75303233305c75303330645c75303362385c75303164355c75303166615c75303338395c75303062635c75303135325c75303334365c75303139645c75303135615c75303031395c75303061385c75303133615c75303338635c75303339625c75303261655c75303065395c75303362635c75303166385c75303031665c75303230615c75303263355c75303134335c75303361635c75303334355c75303236645c75303139365c75303362665c75303135615c75303137305c75303165395c75303231395c75303332665c75303232645c75303030365c75303066305c75303134665c75303337375c75303234325d5c75303164325c75303337655c75303265665c75303331395c75303261355c75303265385c75303338395c75303235645c75303334315c75303338395c7530323230585c75303062645c75303166365c75303238645c75303231375c75303066665c75303130385c75303331305c75303330335c75303031395c75303039635c75303363315c75303039615c75303334355c75303331305c75303162335c75303263315c75303132395c75303234335c75303038627c5c75303361335c75303261635c75303165655c75303030305c75303237615c75303038385c75303066355c75303232375c75303236635c75303236355c7530336336205c75303038615c7530333561787c735c75303336305c75303362655c75303235385c75303334345c75303264365c75303262355c75303361315c75303135345c75303131625c75303061625c75303038615c75303332655c75303238325c75303031393d5c75303263335c75303332655c75303163645c75303139305c75303231305c75303131365c75303334305c75303234665c75303162635c75303333645c75303135305c75303132335c75303233645c75303133345c75303062327a5c75303331635c75303136312a5c753032316522')
assert proposal.is_valid() is False
# reset
proposal = Proposal(**orig.get_dict())
# ============================================================
# ensure proposal can't request negative omegacoin
# ============================================================
proposal.payment_amount = -1
assert proposal.is_valid() is False
def test_proposal_is_expired(proposal):
cycle = 24 # testnet
now = misc.now()
proposal.start_epoch = now - (86400 * 2) # two days ago
proposal.end_epoch = now - (60 * 60) # expired one hour ago
assert proposal.is_expired(superblockcycle=cycle) is False
# fudge factor + a 24-block cycle == an expiry window of 9086, so...
proposal.end_epoch = now - 9085
assert proposal.is_expired(superblockcycle=cycle) is False
proposal.end_epoch = now - 9087
assert proposal.is_expired(superblockcycle=cycle) is True
def test_proposal_is_deletable(proposal):
now = misc.now()
assert proposal.is_deletable() is False
proposal.end_epoch = now - (86400 * 29)
assert proposal.is_deletable() is False
# add a couple seconds for time variance
proposal.end_epoch = now - ((86400 * 30) + 2)
assert proposal.is_deletable() is True
# deterministic ordering
def test_approved_and_ranked(go_list_proposals):
from omegacoind import OmegaDaemon
omegacoind = OmegaDaemon.from_omegacoin_conf(config.omegacoin_conf)
for item in go_list_proposals:
(go, subobj) = GovernanceObject.import_gobject_from_omegacoind(omegacoind, item)
prop_list = Proposal.approved_and_ranked(proposal_quorum=1, next_superblock_max_budget=60)
assert prop_list[0].object_hash == u'dfd7d63979c0b62456b63d5fc5306dbec451180adee85876cbf5b28c69d1a86c'
assert prop_list[1].object_hash == u'0523445762025b2e01a2cd34f1d10f4816cf26ee1796167e5b029901e5873630'
| 52.988 | 3,633 | 0.780479 | import pytest
import sys
import os
import time
os.environ['SENTINEL_ENV'] = 'test'
os.environ['SENTINEL_CONFIG'] = os.path.normpath(os.path.join(os.path.dirname(__file__), '../../test_sentinel.conf'))
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../../../lib')))
import misc
import config
from models import GovernanceObject, Proposal, Vote
def setup():
Vote.delete().execute()
Proposal.delete().execute()
GovernanceObject.delete().execute()
def teardown():
pass
@pytest.fixture
def go_list_proposals():
items = [
{u'AbsoluteYesCount': 1000,
u'AbstainCount': 7,
u'CollateralHash': u'acb67ec3f3566c9b94a26b70b36c1f74a010a37c0950c22d683cc50da324fdca',
u'DataHex': u'5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20323132323532303430302c20226e616d65223a20226465616e2d6d696c6c65722d35343933222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a2032352e37352c202273746172745f65706f6368223a20313437343236313038362c202274797065223a20312c202275726c223a2022687474703a2f2f6461736863656e7472616c2e6f72672f6465616e2d6d696c6c65722d35343933227d5d5d',
u'DataString': u'[["proposal", {"end_epoch": 2122520400, "name": "dean-miller-5493", "payment_address": "yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui", "payment_amount": 25.75, "start_epoch": 1474261086, "type": 1, "url": "http://omegacentral.org/dean-miller-5493"}]]',
u'Hash': u'dfd7d63979c0b62456b63d5fc5306dbec451180adee85876cbf5b28c69d1a86c',
u'IsValidReason': u'',
u'NoCount': 25,
u'YesCount': 1025,
u'fBlockchainValidity': True,
u'fCachedDelete': False,
u'fCachedEndorsed': False,
u'fCachedFunding': False,
u'fCachedValid': True},
{u'AbsoluteYesCount': 1000,
u'AbstainCount': 29,
u'CollateralHash': u'3efd23283aa98c2c33f80e4d9ed6f277d195b72547b6491f43280380f6aac810',
u'DataHex': u'5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20323132323532303430302c20226e616d65223a20226665726e616e64657a2d37363235222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a2032352e37352c202273746172745f65706f6368223a20313437343236313038362c202274797065223a20312c202275726c223a2022687474703a2f2f6461736863656e7472616c2e6f72672f6665726e616e64657a2d37363235227d5d5d',
u'DataString': u'[["proposal", {"end_epoch": 2122520400, "name": "fernandez-7625", "payment_address": "yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui", "payment_amount": 25.75, "start_epoch": 1474261086, "type": 1, "url": "http://omegacentral.org/fernandez-7625"}]]',
u'Hash': u'0523445762025b2e01a2cd34f1d10f4816cf26ee1796167e5b029901e5873630',
u'IsValidReason': u'',
u'NoCount': 56,
u'YesCount': 1056,
u'fBlockchainValidity': True,
u'fCachedDelete': False,
u'fCachedEndorsed': False,
u'fCachedFunding': False,
u'fCachedValid': True},
]
return items
@pytest.fixture
def proposal():
pobj = Proposal(
start_epoch=1483250400,
end_epoch=2122520400,
name="wine-n-cheeze-party",
url="https://omegacentral.com/wine-n-cheeze-party",
payment_address="yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui",
payment_amount=13
)
return pobj
def test_proposal_is_valid(proposal):
from omegacoind import OmegaDaemon
import omegacoinlib
omegacoind = OmegaDaemon.from_omegacoin_conf(config.omegacoin_conf)
orig = Proposal(**proposal.get_dict())
assert proposal.is_valid() is True
proposal.end_epoch = proposal.start_epoch
assert proposal.is_valid() is False
proposal.end_epoch = proposal.start_epoch - 1
assert proposal.is_valid() is False
proposal.end_epoch = proposal.start_epoch + 0
assert proposal.is_valid() is False
proposal.end_epoch = proposal.start_epoch + 1
assert proposal.is_valid() is True
proposal = Proposal(**orig.get_dict())
proposal.name = ' heya!@209h '
assert proposal.is_valid() is False
proposal.name = "anything' OR 'x'='x"
assert proposal.is_valid() is False
proposal.name = ' '
assert proposal.is_valid() is False
proposal.name = ''
assert proposal.is_valid() is False
proposal.name = '0'
assert proposal.is_valid() is True
proposal.name = 'R66-Y'
assert proposal.is_valid() is True
proposal.name = omegacoinlib.deserialise('22385c7530303933375c75303363375c75303232395c75303138635c75303064335c75303163345c75303264385c75303236615c75303134625c75303163335c75303063335c75303362385c75303266615c75303261355c75303266652f2b5c75303065395c75303164655c75303136655c75303338645c75303062385c75303138635c75303064625c75303064315c75303038325c75303133325c753032333222')
assert proposal.is_valid() is False
proposal = Proposal(**orig.get_dict())
proposal.payment_address = '7'
assert proposal.is_valid() is False
proposal.payment_address = 'YYE8KWYAUU5YSWSYMB3Q3RYX8XTUU9Y7UI'
assert proposal.is_valid() is False
proposal.payment_address = 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Uj'
assert proposal.is_valid() is False
proposal.payment_address = '221 B Baker St., London, United Kingdom'
assert proposal.is_valid() is False
proposal.payment_address = '7gnwGHt17heGpG9Crfeh4KGpYNFugPhJdh'
assert proposal.is_valid() is False
proposal.payment_address = 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui'
assert proposal.is_valid() is True
proposal = Proposal(**orig.get_dict())
proposal.url = ' '
assert proposal.is_valid() is False
proposal.url = ' '
assert proposal.is_valid() is False
proposal.url = 'http://bit.ly/1e1EYJv'
assert proposal.is_valid() is True
proposal.url = 'https://example.com/resource.ext?param=1&other=2'
assert proposal.is_valid() is True
proposal.url = 'www.com'
assert proposal.is_valid() is True
proposal.url = 'v.ht/'
assert proposal.is_valid() is True
proposal.url = 'ipfs:///ipfs/QmPwwoytFU3gZYk5tSppumxaGbHymMUgHsSvrBdQH69XRx/'
assert proposal.is_valid() is True
proposal.url = '/ipfs/QmPwwoytFU3gZYk5tSppumxaGbHymMUgHsSvrBdQH69XRx/'
assert proposal.is_valid() is True
proposal.url = 's3://bucket/thing/anotherthing/file.pdf'
assert proposal.is_valid() is True
proposal.url = 'http://zqktlwi4fecvo6ri.onion/wiki/index.php/Main_Page'
assert proposal.is_valid() is True
proposal.url = 'ftp://ftp.funet.fi/pub/standards/RFC/rfc959.txt'
assert proposal.is_valid() is True
proposal.url = omegacoinlib.deserialise('22687474703a2f2f5c75303330385c75303065665c75303362345c75303362315c75303266645c75303331345c625c75303134655c75303031615c75303139655c75303133365c75303264315c75303238655c75303364395c75303230665c75303363355c75303030345c75303336665c75303238355c75303165375c75303063635c75303139305c75303262615c75303239316a5c75303130375c75303362365c7530306562645c75303133335c75303335665c7530326562715c75303038655c75303332645c75303362645c75303064665c75303135654f365c75303237335c75303363645c7530333539275c75303165345c75303339615c75303365385c75303334345c75303130615c75303265662e5c75303231625c75303164356a5c75303232345c75303163645c75303336365c75303064625c75303339665c75303230305c75303337615c75303138395c75303263325c75303038345c75303066615c75303031335c75303233655c75303135345c75303165395c75303139635c75303239375c75303039355c75303038345c75303362305c7530306233435c75303135345c75303063665c75303163345c75303261335c75303362655c75303136305c75303139365c75303263665c75303131305c7530313031475c75303162645c75303338645c75303363325c75303138625c75303235625c75303266325c75303264635c75303139335c75303066665c75303066645c75303133625c75303234305c75303137615c75303062355c75303031645c75303238655c75303166315c75303232315c75303161615c75303265325c75303335625c75303333665c75303239345c75303335315c75303038345c75303339395c75303262385c75303132375c75303330357a5c75303263625c75303066305c75303062355c75303164335c75303338385c75303364385c75303130625c75303266325c75303137305c75303335315c75303030305c75303136385c75303039646d5c75303331315c75303236615c75303330375c75303332635c75303361635c665c75303363335c75303264365c75303238645c75303136395c7530323438635c75303163385c75303261355c75303164615c75303165375c75303337355c75303332645c7530333165755c75303131665c75303338375c75303135325c75303065325c75303135326c5c75303164325c75303164615c75303136645c75303061665c75303333375c75303264375c75303339375c75303139395c75303134635c75303165385c75303234315c75303336635c75303130645c75303230635c75303161615c75303339355c75303133315c75303064615c75303165615c75303336645c75303064325c75303337365c75303363315c75303132645c75303266305c75303064364f255c75303263635c75303162645c75303062385c75303238365c75303136395c75303337335c75303232335c75303336655c75303037665c75303062616b5c75303132365c75303233305c75303330645c75303362385c75303164355c75303166615c75303338395c75303062635c75303135325c75303334365c75303139645c75303135615c75303031395c75303061385c75303133615c75303338635c75303339625c75303261655c75303065395c75303362635c75303166385c75303031665c75303230615c75303263355c75303134335c75303361635c75303334355c75303236645c75303139365c75303362665c75303135615c75303137305c75303165395c75303231395c75303332665c75303232645c75303030365c75303066305c75303134665c75303337375c75303234325d5c75303164325c75303337655c75303265665c75303331395c75303261355c75303265385c75303338395c75303235645c75303334315c75303338395c7530323230585c75303062645c75303166365c75303238645c75303231375c75303066665c75303130385c75303331305c75303330335c75303031395c75303039635c75303363315c75303039615c75303334355c75303331305c75303162335c75303263315c75303132395c75303234335c75303038627c5c75303361335c75303261635c75303165655c75303030305c75303237615c75303038385c75303066355c75303232375c75303236635c75303236355c7530336336205c75303038615c7530333561787c735c75303336305c75303362655c75303235385c75303334345c75303264365c75303262355c75303361315c75303135345c75303131625c75303061625c75303038615c75303332655c75303238325c75303031393d5c75303263335c75303332655c75303163645c75303139305c75303231305c75303131365c75303334305c75303234665c75303162635c75303333645c75303135305c75303132335c75303233645c75303133345c75303062327a5c75303331635c75303136312a5c753032316522')
assert proposal.is_valid() is False
proposal = Proposal(**orig.get_dict())
# ============================================================
proposal.payment_amount = -1
assert proposal.is_valid() is False
def test_proposal_is_expired(proposal):
cycle = 24 # testnet
now = misc.now()
proposal.start_epoch = now - (86400 * 2) # two days ago
proposal.end_epoch = now - (60 * 60) # expired one hour ago
assert proposal.is_expired(superblockcycle=cycle) is False
# fudge factor + a 24-block cycle == an expiry window of 9086, so...
proposal.end_epoch = now - 9085
assert proposal.is_expired(superblockcycle=cycle) is False
proposal.end_epoch = now - 9087
assert proposal.is_expired(superblockcycle=cycle) is True
def test_proposal_is_deletable(proposal):
now = misc.now()
assert proposal.is_deletable() is False
proposal.end_epoch = now - (86400 * 29)
assert proposal.is_deletable() is False
# add a couple seconds for time variance
proposal.end_epoch = now - ((86400 * 30) + 2)
assert proposal.is_deletable() is True
# deterministic ordering
def test_approved_and_ranked(go_list_proposals):
from omegacoind import OmegaDaemon
omegacoind = OmegaDaemon.from_omegacoin_conf(config.omegacoin_conf)
for item in go_list_proposals:
(go, subobj) = GovernanceObject.import_gobject_from_omegacoind(omegacoind, item)
prop_list = Proposal.approved_and_ranked(proposal_quorum=1, next_superblock_max_budget=60)
assert prop_list[0].object_hash == u'dfd7d63979c0b62456b63d5fc5306dbec451180adee85876cbf5b28c69d1a86c'
assert prop_list[1].object_hash == u'0523445762025b2e01a2cd34f1d10f4816cf26ee1796167e5b029901e5873630'
| true | true |
1c389685ff52c912f2612efea656ffd0b097758d | 7,565 | py | Python | model/deeplab.py | Shang-XH/BAFTT | 62392325342f48b8a89f0c2bf71e48026dd90629 | [
"MIT"
] | 4 | 2021-09-07T03:29:38.000Z | 2021-09-07T04:24:31.000Z | model/deeplab.py | Shang-XH/BAFTT | 62392325342f48b8a89f0c2bf71e48026dd90629 | [
"MIT"
] | null | null | null | model/deeplab.py | Shang-XH/BAFTT | 62392325342f48b8a89f0c2bf71e48026dd90629 | [
"MIT"
] | null | null | null | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch
import numpy as np
affine_par = True
def outS(i):
i = int(i)
i = (i+1)/2
i = int(np.ceil((i+1)/2.0))
i = (i+1)/2
return i
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, affine = affine_par)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, affine = affine_par)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change
self.bn1 = nn.BatchNorm2d(planes,affine = affine_par)
# for i in self.bn1.parameters():
# i.requires_grad = False
padding = dilation
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, # change
padding=padding, bias=False, dilation = dilation)
self.bn2 = nn.BatchNorm2d(planes,affine = affine_par)
for i in self.bn2.parameters():
i.requires_grad = False
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4, affine = affine_par)
# for i in self.bn3.parameters():
# i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Classifier_Module(nn.Module):
def __init__(self, dilation_series, padding_series, num_classes):
super(Classifier_Module, self).__init__()
self.conv2d_list = nn.ModuleList()
for dilation, padding in zip(dilation_series, padding_series):
self.conv2d_list.append(nn.Conv2d(2048, num_classes, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias = True))
for m in self.conv2d_list:
m.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.conv2d_list[0](x)
for i in range(len(self.conv2d_list)-1):
out += self.conv2d_list[i+1](x)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
self.layer5 = self._make_pred_layer(Classifier_Module, [6,12,18,24],[6,12,18,24],num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# for i in m.parameters():
# i.requires_grad = False
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion or dilation == 2 or dilation == 4:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion,affine = affine_par))
# for i in downsample._modules['1'].parameters():
# i.requires_grad = False
layers = []
layers.append(block(self.inplanes, planes, stride,dilation=dilation, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def _make_pred_layer(self,block, dilation_series, padding_series,num_classes):
return block(dilation_series,padding_series,num_classes)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
return x
def get_1x_lr_params_NOscale(self):
"""
This generator returns all the parameters of the net except for
the last classification layer. Note that for each batchnorm layer,
requires_grad is set to False in deeplab_resnet.py, therefore this function does not return
any batchnorm parameter
"""
b = []
b.append(self.conv1)
b.append(self.bn1)
b.append(self.layer1)
b.append(self.layer2)
b.append(self.layer3)
b.append(self.layer4)
for i in range(len(b)):
for j in b[i].modules():
jj = 0
for k in j.parameters():
jj+=1
if k.requires_grad:
yield k
def get_10x_lr_params(self):
"""
This generator returns all the parameters for the last layer of the net,
which does the classification of pixel into classes
"""
b = []
b.append(self.layer5.parameters())
for j in range(len(b)):
for i in b[j]:
yield i
def optim_parameters(self, args):
return [{'params': self.get_1x_lr_params_NOscale(), 'lr': args.learning_rate},
{'params': self.get_10x_lr_params(), 'lr': 10*args.learning_rate}]
def Res_Deeplab(num_classes=21):
model = ResNet(Bottleneck,[3, 4, 23, 3], num_classes)
return model
| 33.325991 | 139 | 0.590482 | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch
import numpy as np
affine_par = True
def outS(i):
i = int(i)
i = (i+1)/2
i = int(np.ceil((i+1)/2.0))
i = (i+1)/2
return i
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, affine = affine_par)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, affine = affine_par)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(planes,affine = affine_par)
padding = dilation
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,
padding=padding, bias=False, dilation = dilation)
self.bn2 = nn.BatchNorm2d(planes,affine = affine_par)
for i in self.bn2.parameters():
i.requires_grad = False
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4, affine = affine_par)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Classifier_Module(nn.Module):
def __init__(self, dilation_series, padding_series, num_classes):
super(Classifier_Module, self).__init__()
self.conv2d_list = nn.ModuleList()
for dilation, padding in zip(dilation_series, padding_series):
self.conv2d_list.append(nn.Conv2d(2048, num_classes, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias = True))
for m in self.conv2d_list:
m.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.conv2d_list[0](x)
for i in range(len(self.conv2d_list)-1):
out += self.conv2d_list[i+1](x)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
self.layer5 = self._make_pred_layer(Classifier_Module, [6,12,18,24],[6,12,18,24],num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion or dilation == 2 or dilation == 4:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion,affine = affine_par))
layers = []
layers.append(block(self.inplanes, planes, stride,dilation=dilation, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def _make_pred_layer(self,block, dilation_series, padding_series,num_classes):
return block(dilation_series,padding_series,num_classes)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
return x
def get_1x_lr_params_NOscale(self):
b = []
b.append(self.conv1)
b.append(self.bn1)
b.append(self.layer1)
b.append(self.layer2)
b.append(self.layer3)
b.append(self.layer4)
for i in range(len(b)):
for j in b[i].modules():
jj = 0
for k in j.parameters():
jj+=1
if k.requires_grad:
yield k
def get_10x_lr_params(self):
b = []
b.append(self.layer5.parameters())
for j in range(len(b)):
for i in b[j]:
yield i
def optim_parameters(self, args):
return [{'params': self.get_1x_lr_params_NOscale(), 'lr': args.learning_rate},
{'params': self.get_10x_lr_params(), 'lr': 10*args.learning_rate}]
def Res_Deeplab(num_classes=21):
model = ResNet(Bottleneck,[3, 4, 23, 3], num_classes)
return model
| true | true |
1c38968752d7f0fcd0cd4b1a3ffa0328cb1afaf8 | 1,374 | py | Python | poolink_backend/bases/models.py | jaethewiederholen/Poolink_backend | 3a1b28856bc8916aedb7735de8b64fef3269ef1b | [
"MIT"
] | null | null | null | poolink_backend/bases/models.py | jaethewiederholen/Poolink_backend | 3a1b28856bc8916aedb7735de8b64fef3269ef1b | [
"MIT"
] | null | null | null | poolink_backend/bases/models.py | jaethewiederholen/Poolink_backend | 3a1b28856bc8916aedb7735de8b64fef3269ef1b | [
"MIT"
] | null | null | null | import timeago
from annoying.fields import AutoOneToOneField as _AutoOneToOneField
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from model_utils.models import TimeStampedModel
class AutoOneToOneField(_AutoOneToOneField):
pass
class Manager(models.Manager):
pass
class AvailableManager(Manager):
def get_queryset(self):
return super().get_queryset().filter(is_active=True)
class UpdateMixin(object):
def update(self, **kwargs):
if self._state.adding:
raise self.DoesNotExist
for field, value in kwargs.items():
setattr(self, field, value)
self.save(update_fields=kwargs.keys())
class Model(UpdateMixin, TimeStampedModel, models.Model):
remark = models.TextField(blank=True, null=True, verbose_name="비고")
is_active = models.BooleanField("활성화 여부", default=True, blank=True, null=True)
class Meta:
abstract = True
objects = Manager()
available = AvailableManager()
@property
def time(self):
return timeago.format(self.created, timezone.now(), "ko")
def __init__(self, *args, **kwargs):
super(Model, self).__init__(*args, **kwargs)
self._meta.get_field("created").verbose_name = _("생성일")
self._meta.get_field("modified").verbose_name = _("수정일")
| 26.941176 | 82 | 0.69869 | import timeago
from annoying.fields import AutoOneToOneField as _AutoOneToOneField
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from model_utils.models import TimeStampedModel
class AutoOneToOneField(_AutoOneToOneField):
pass
class Manager(models.Manager):
pass
class AvailableManager(Manager):
def get_queryset(self):
return super().get_queryset().filter(is_active=True)
class UpdateMixin(object):
def update(self, **kwargs):
if self._state.adding:
raise self.DoesNotExist
for field, value in kwargs.items():
setattr(self, field, value)
self.save(update_fields=kwargs.keys())
class Model(UpdateMixin, TimeStampedModel, models.Model):
remark = models.TextField(blank=True, null=True, verbose_name="비고")
is_active = models.BooleanField("활성화 여부", default=True, blank=True, null=True)
class Meta:
abstract = True
objects = Manager()
available = AvailableManager()
@property
def time(self):
return timeago.format(self.created, timezone.now(), "ko")
def __init__(self, *args, **kwargs):
super(Model, self).__init__(*args, **kwargs)
self._meta.get_field("created").verbose_name = _("생성일")
self._meta.get_field("modified").verbose_name = _("수정일")
| true | true |
1c389742a4897a973a09502694d1595d5bc5011e | 1,727 | py | Python | pyobjc_framework_Quartz-3.3a0-py3.6-macosx-10.13-x86_64.egg/Quartz/__init__.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 4 | 2019-03-11T18:05:49.000Z | 2021-05-22T21:09:09.000Z | pyobjc_framework_Quartz-3.3a0-py3.6-macosx-10.13-x86_64.egg/Quartz/__init__.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | pyobjc_framework_Quartz-3.3a0-py3.6-macosx-10.13-x86_64.egg/Quartz/__init__.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1 | 2019-03-18T18:53:36.000Z | 2019-03-18T18:53:36.000Z | """
Helper module that makes it easier to import all of Quartz
"""
import sys
import objc
import Foundation
import AppKit
def _load():
submods = []
sys.modules['Quartz'] = mod = objc.ObjCLazyModule('Quartz',
None, None, {}, None, {
'__doc__': __doc__,
'objc': objc,
'__path__': __path__,
'__loader__': globals().get('__loader__', None),
}, submods)
try:
from Quartz import CoreGraphics as m
submods.append(m)
mod.CoreGraphics = m
except ImportError:
pass
try:
from Quartz import ImageIO as m
submods.append(m)
mod.ImageIO = m
except ImportError:
pass
try:
from Quartz import ImageKit as m
submods.append(m)
mod.ImageIO = m
except ImportError:
pass
try:
from Quartz import CoreVideo as m
submods.append(m)
mod.CoreVideo = m
except ImportError:
pass
try:
from Quartz import QuartzCore as m
submods.append(m)
mod.QuartCore = m
except ImportError:
pass
try:
from Quartz import ImageIO as m
submods.append(m)
mod.ImageIO = m
except ImportError:
pass
try:
from Quartz import PDFKit as m
submods.append(m)
mod.PDFKit = m
except ImportError:
pass
try:
from Quartz import QuartzFilters as m
submods.append(m)
mod.QuartzFilters = m
except ImportError:
pass
try:
from Quartz import QuickLookUI as m
submods.append(m)
mod.QuickLookUI = m
except ImportError:
pass
_load()
| 20.559524 | 64 | 0.557035 | import sys
import objc
import Foundation
import AppKit
def _load():
submods = []
sys.modules['Quartz'] = mod = objc.ObjCLazyModule('Quartz',
None, None, {}, None, {
'__doc__': __doc__,
'objc': objc,
'__path__': __path__,
'__loader__': globals().get('__loader__', None),
}, submods)
try:
from Quartz import CoreGraphics as m
submods.append(m)
mod.CoreGraphics = m
except ImportError:
pass
try:
from Quartz import ImageIO as m
submods.append(m)
mod.ImageIO = m
except ImportError:
pass
try:
from Quartz import ImageKit as m
submods.append(m)
mod.ImageIO = m
except ImportError:
pass
try:
from Quartz import CoreVideo as m
submods.append(m)
mod.CoreVideo = m
except ImportError:
pass
try:
from Quartz import QuartzCore as m
submods.append(m)
mod.QuartCore = m
except ImportError:
pass
try:
from Quartz import ImageIO as m
submods.append(m)
mod.ImageIO = m
except ImportError:
pass
try:
from Quartz import PDFKit as m
submods.append(m)
mod.PDFKit = m
except ImportError:
pass
try:
from Quartz import QuartzFilters as m
submods.append(m)
mod.QuartzFilters = m
except ImportError:
pass
try:
from Quartz import QuickLookUI as m
submods.append(m)
mod.QuickLookUI = m
except ImportError:
pass
_load()
| true | true |
1c38977b569e372892bc6e1bb21a826b7739d05d | 2,373 | py | Python | example/custom_log_format.py | hoangvx/json-logging-python | 6c43871fce2659de88827a1f8b690be80e55c328 | [
"Apache-2.0"
] | null | null | null | example/custom_log_format.py | hoangvx/json-logging-python | 6c43871fce2659de88827a1f8b690be80e55c328 | [
"Apache-2.0"
] | null | null | null | example/custom_log_format.py | hoangvx/json-logging-python | 6c43871fce2659de88827a1f8b690be80e55c328 | [
"Apache-2.0"
] | null | null | null | # This example shows how the logger can be set up to use a custom JSON format.
import logging
import json
import traceback
from datetime import datetime
import copy
import json_logging
import sys
json_logging.ENABLE_JSON_LOGGING = True
def extra(**kw):
'''Add the required nested props layer'''
return {'extra': {'props': kw}}
class CustomJSONLog(logging.Formatter):
"""
Customized logger
"""
python_log_prefix = 'python.'
def get_exc_fields(self, record):
if record.exc_info:
exc_info = self.format_exception(record.exc_info)
else:
exc_info = record.exc_text
return {f'{self.python_log_prefix}exc_info': exc_info}
@classmethod
def format_exception(cls, exc_info):
return ''.join(traceback.format_exception(*exc_info)) if exc_info else ''
def format(self, record):
json_log_object = {"@timestamp": datetime.utcnow().isoformat(),
"level": record.levelname,
"message": record.getMessage(),
"caller": record.filename + '::' + record.funcName
}
json_log_object['data'] = {
f'{self.python_log_prefix}logger_name': record.name,
f'{self.python_log_prefix}module': record.module,
f'{self.python_log_prefix}funcName': record.funcName,
f'{self.python_log_prefix}filename': record.filename,
f'{self.python_log_prefix}lineno': record.lineno,
f'{self.python_log_prefix}thread': f'{record.threadName}[{record.thread}]',
f'{self.python_log_prefix}pid': record.process
}
if hasattr(record, 'props'):
json_log_object['data'].update(record.props)
if record.exc_info or record.exc_text:
json_log_object['data'].update(self.get_exc_fields(record))
return json.dumps(json_log_object)
def logger_init():
json_logging.init(custom_formatter=CustomJSONLog)
# You would normally import logger_init and setup the logger in your main module - e.g.
# main.py
logger_init()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stderr))
logger.info('Starting')
try:
1/0
except: # noqa pylint: disable=bare-except
logger.exception('You can\t divide by zero')
| 31.64 | 87 | 0.653182 |
import logging
import json
import traceback
from datetime import datetime
import copy
import json_logging
import sys
json_logging.ENABLE_JSON_LOGGING = True
def extra(**kw):
return {'extra': {'props': kw}}
class CustomJSONLog(logging.Formatter):
python_log_prefix = 'python.'
def get_exc_fields(self, record):
if record.exc_info:
exc_info = self.format_exception(record.exc_info)
else:
exc_info = record.exc_text
return {f'{self.python_log_prefix}exc_info': exc_info}
@classmethod
def format_exception(cls, exc_info):
return ''.join(traceback.format_exception(*exc_info)) if exc_info else ''
def format(self, record):
json_log_object = {"@timestamp": datetime.utcnow().isoformat(),
"level": record.levelname,
"message": record.getMessage(),
"caller": record.filename + '::' + record.funcName
}
json_log_object['data'] = {
f'{self.python_log_prefix}logger_name': record.name,
f'{self.python_log_prefix}module': record.module,
f'{self.python_log_prefix}funcName': record.funcName,
f'{self.python_log_prefix}filename': record.filename,
f'{self.python_log_prefix}lineno': record.lineno,
f'{self.python_log_prefix}thread': f'{record.threadName}[{record.thread}]',
f'{self.python_log_prefix}pid': record.process
}
if hasattr(record, 'props'):
json_log_object['data'].update(record.props)
if record.exc_info or record.exc_text:
json_log_object['data'].update(self.get_exc_fields(record))
return json.dumps(json_log_object)
def logger_init():
json_logging.init(custom_formatter=CustomJSONLog)
logger_init()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stderr))
logger.info('Starting')
try:
1/0
except:
logger.exception('You can\t divide by zero')
| true | true |
1c389798083410ad4a61a2080241e05412c16cb9 | 6,038 | py | Python | hubspot/crm/quotes/api/search_api.py | Ronfer/hubspot-api-python | 1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4 | [
"Apache-2.0"
] | 117 | 2020-04-06T08:22:53.000Z | 2022-03-18T03:41:29.000Z | hubspot/crm/quotes/api/search_api.py | Ronfer/hubspot-api-python | 1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4 | [
"Apache-2.0"
] | 62 | 2020-04-06T16:21:06.000Z | 2022-03-17T16:50:44.000Z | hubspot/crm/quotes/api/search_api.py | Ronfer/hubspot-api-python | 1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4 | [
"Apache-2.0"
] | 45 | 2020-04-06T16:13:52.000Z | 2022-03-30T21:33:17.000Z | # coding: utf-8
"""
Quotes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from hubspot.crm.quotes.api_client import ApiClient
from hubspot.crm.quotes.exceptions import ApiTypeError, ApiValueError # noqa: F401
class SearchApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def do_search(self, public_object_search_request, **kwargs): # noqa: E501
"""do_search # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.do_search(public_object_search_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param PublicObjectSearchRequest public_object_search_request: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: CollectionResponseWithTotalSimplePublicObjectForwardPaging
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.do_search_with_http_info(public_object_search_request, **kwargs) # noqa: E501
def do_search_with_http_info(self, public_object_search_request, **kwargs): # noqa: E501
"""do_search # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.do_search_with_http_info(public_object_search_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param PublicObjectSearchRequest public_object_search_request: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(CollectionResponseWithTotalSimplePublicObjectForwardPaging, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["public_object_search_request"]
all_params.extend(["async_req", "_return_http_data_only", "_preload_content", "_request_timeout"])
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method do_search" % key)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'public_object_search_request' is set
if self.api_client.client_side_validation and ("public_object_search_request" not in local_var_params or local_var_params["public_object_search_request"] is None): # noqa: E501 # noqa: E501
raise ApiValueError("Missing the required parameter `public_object_search_request` when calling `do_search`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "public_object_search_request" in local_var_params:
body_params = local_var_params["public_object_search_request"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(["application/json", "*/*"]) # noqa: E501
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(["application/json"]) # noqa: E501 # noqa: E501
# Authentication setting
auth_settings = ["hapikey"] # noqa: E501
return self.api_client.call_api(
"/crm/v3/objects/quotes/search",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="CollectionResponseWithTotalSimplePublicObjectForwardPaging", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get("_return_http_data_only"), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
| 43.753623 | 199 | 0.649884 |
from __future__ import absolute_import
import re
import six
from hubspot.crm.quotes.api_client import ApiClient
from hubspot.crm.quotes.exceptions import ApiTypeError, ApiValueError
class SearchApi(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def do_search(self, public_object_search_request, **kwargs):
kwargs["_return_http_data_only"] = True
return self.do_search_with_http_info(public_object_search_request, **kwargs)
def do_search_with_http_info(self, public_object_search_request, **kwargs):
local_var_params = locals()
all_params = ["public_object_search_request"]
all_params.extend(["async_req", "_return_http_data_only", "_preload_content", "_request_timeout"])
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method do_search" % key)
local_var_params[key] = val
del local_var_params["kwargs"]
if self.api_client.client_side_validation and ("public_object_search_request" not in local_var_params or local_var_params["public_object_search_request"] is None): raise ApiValueError("Missing the required parameter `public_object_search_request` when calling `do_search`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "public_object_search_request" in local_var_params:
body_params = local_var_params["public_object_search_request"]
header_params["Accept"] = self.api_client.select_header_accept(["application/json", "*/*"])
header_params["Content-Type"] = self.api_client.select_header_content_type(["application/json"]) auth_settings = ["hapikey"]
return self.api_client.call_api(
"/crm/v3/objects/quotes/search",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="CollectionResponseWithTotalSimplePublicObjectForwardPaging",
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get("_return_http_data_only"),
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
| true | true |
1c3897f1c9439d5c963e8334e6f36bff19799a70 | 2,234 | py | Python | alpafa/cli.py | timothybazalgette/alpafa | b11486719ff9b411ca4a405685302ab3f1155702 | [
"MIT"
] | 3 | 2017-07-14T15:10:34.000Z | 2018-10-01T03:28:15.000Z | alpafa/cli.py | timothybazalgette/alpafa | b11486719ff9b411ca4a405685302ab3f1155702 | [
"MIT"
] | null | null | null | alpafa/cli.py | timothybazalgette/alpafa | b11486719ff9b411ca4a405685302ab3f1155702 | [
"MIT"
] | null | null | null | '''Defines the command line interface for ALPALFA.'''
import argparse
from .alpafa import Lexicon
from .parse import parse_file, ParserError
def set_args():
'''Sets command line parameters, and runs ALPAFA.'''
parser = argparse.ArgumentParser(prog='alpafa',
description='Applies the algorithm from AAFP to a correctly \
formatted input file.')
parser.add_argument('input_file', help='correctly formatted UTF-8 input file')
parser.add_argument('output_file', help='name of file to output')
parser.add_argument('--no_uf', dest='uf', action='store_false',
help='do not implement unvalued features')
parser.add_argument('--no_cselect', dest='cselect', action='store_false',
help='do not implement c-selection')
parser.add_argument('--log', dest='log', action='store_true',
help='include a log of algorithm operations')
parser.add_argument('--categories', dest='cats', action='store_true',
help='list all categories before heads')
parser.add_argument('--dependents', dest='dependents', action='store_true',
help='list all dependent features below their relevant categories (implies \
--categories)')
args = parser.parse_args()
if args.dependents:
args.cats = True
return (args.input_file, args.output_file, args.uf, args.cselect, args.log, args.cats,
args.dependents)
def run_alpafa(input_file, output_file, uf, cselect, log, cats, dependents):
'''Parse an input file, and apply ALPAFA to its contents, printing the output to a specified
file.
'''
try:
prominence, heads = parse_file(input_file)
except FileNotFoundError as e:
print('alpafa: input failure: ' + str(e)[10:])
return
except ParserError as e:
print('alpafa: parsing failure: {}'.format(e))
return
lex = Lexicon(prominence, heads, uf, cselect)
with open(output_file, 'w', encoding='utf-8') as f:
f.write(lex.display(log, cats, dependents))
print(lex.stats())
def main():
run_alpafa(*set_args())
| 40.618182 | 100 | 0.628021 |
import argparse
from .alpafa import Lexicon
from .parse import parse_file, ParserError
def set_args():
parser = argparse.ArgumentParser(prog='alpafa',
description='Applies the algorithm from AAFP to a correctly \
formatted input file.')
parser.add_argument('input_file', help='correctly formatted UTF-8 input file')
parser.add_argument('output_file', help='name of file to output')
parser.add_argument('--no_uf', dest='uf', action='store_false',
help='do not implement unvalued features')
parser.add_argument('--no_cselect', dest='cselect', action='store_false',
help='do not implement c-selection')
parser.add_argument('--log', dest='log', action='store_true',
help='include a log of algorithm operations')
parser.add_argument('--categories', dest='cats', action='store_true',
help='list all categories before heads')
parser.add_argument('--dependents', dest='dependents', action='store_true',
help='list all dependent features below their relevant categories (implies \
--categories)')
args = parser.parse_args()
if args.dependents:
args.cats = True
return (args.input_file, args.output_file, args.uf, args.cselect, args.log, args.cats,
args.dependents)
def run_alpafa(input_file, output_file, uf, cselect, log, cats, dependents):
try:
prominence, heads = parse_file(input_file)
except FileNotFoundError as e:
print('alpafa: input failure: ' + str(e)[10:])
return
except ParserError as e:
print('alpafa: parsing failure: {}'.format(e))
return
lex = Lexicon(prominence, heads, uf, cselect)
with open(output_file, 'w', encoding='utf-8') as f:
f.write(lex.display(log, cats, dependents))
print(lex.stats())
def main():
run_alpafa(*set_args())
| true | true |
1c38982d038dc6812349ad9b132b32e7a77b8f6d | 216 | py | Python | backend/utils/basefilters.py | zerlee/open-cmdb | e05eeab70bf2c2e14603597bf99c45b6c3330d1e | [
"BSD-3-Clause"
] | 126 | 2019-09-17T17:49:35.000Z | 2022-03-31T13:34:35.000Z | backend/utils/basefilters.py | tom2jack/open-cmdb | 68bc028d5d6162dbfa724d7bbf17363f65e44557 | [
"BSD-3-Clause"
] | 5 | 2020-01-19T08:43:38.000Z | 2021-06-10T21:58:30.000Z | backend/utils/basefilters.py | tom2jack/open-cmdb | 68bc028d5d6162dbfa724d7bbf17363f65e44557 | [
"BSD-3-Clause"
] | 52 | 2019-09-20T06:10:32.000Z | 2022-03-31T13:34:28.000Z | # -*- coding: utf-8 -*-
import django_filters
class BaseFilter(django_filters.FilterSet):
sort = django_filters.OrderingFilter(fields=('create_time',))
class Meta:
model = None
fields = {}
| 19.636364 | 65 | 0.652778 |
import django_filters
class BaseFilter(django_filters.FilterSet):
sort = django_filters.OrderingFilter(fields=('create_time',))
class Meta:
model = None
fields = {}
| true | true |
1c389af6bf03ce9aac0253b5095c78e8a33575ef | 1,676 | py | Python | out/pulp/pulp_python/.github_workflows_scripts_stage-changelog-for-master.py | heyjoakim/munaiah-analyser | d76056cecd3b7f4a6cd72d7fd526cea18aa671d6 | [
"MIT"
] | 1 | 2022-01-03T17:47:20.000Z | 2022-01-03T17:47:20.000Z | out/pulp/pulp_python/.github_workflows_scripts_stage-changelog-for-master.py | heyjoakim/munaiah-analyser | d76056cecd3b7f4a6cd72d7fd526cea18aa671d6 | [
"MIT"
] | null | null | null | out/pulp/pulp_python/.github_workflows_scripts_stage-changelog-for-master.py | heyjoakim/munaiah-analyser | d76056cecd3b7f4a6cd72d7fd526cea18aa671d6 | [
"MIT"
] | 1 | 2021-12-22T13:59:34.000Z | 2021-12-22T13:59:34.000Z | # WARNING: DO NOT EDIT!
#
# This file was generated by plugin_template, and is managed by it. Please use
# './plugin-template --github pulp_python' to update this file.
#
# For more info visit https://github.com/pulp/plugin_template
import argparse
import os
import textwrap
from git import Repo
from git.exc import GitCommandError
helper = textwrap.dedent(
"""\
Stage the changelog for a release on master branch.
Example:
$ python .github/workflows/scripts/stage-changelog-for-master.py 3.4.0
"""
)
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description=helper)
parser.add_argument(
"release_version",
type=str,
help="The version string for the release.",
)
args = parser.parse_args()
release_version_arg = args.release_version
release_path = os.path.dirname(os.path.abspath(__file__))
plugin_path = release_path.split("/.github")[0]
print(f"\n\nRepo path: {plugin_path}")
repo = Repo(plugin_path)
changelog_commit = None
# Look for a commit with the requested release version
for commit in repo.iter_commits():
if f"{release_version_arg} changelog" == commit.message.split("\n")[0]:
changelog_commit = commit
break
if not changelog_commit:
raise RuntimeError("Changelog commit for {release_version_arg} was not found.")
git = repo.git
git.stash()
git.checkout("origin/master")
try:
git.cherry_pick(changelog_commit.hexsha)
except GitCommandError:
git.add("CHANGES/")
# Don't try opening an editor for the commit message
with git.custom_environment(GIT_EDITOR="true"):
git.cherry_pick("--continue")
git.reset("origin/master")
| 25.784615 | 99 | 0.727924 |
import argparse
import os
import textwrap
from git import Repo
from git.exc import GitCommandError
helper = textwrap.dedent(
"""\
Stage the changelog for a release on master branch.
Example:
$ python .github/workflows/scripts/stage-changelog-for-master.py 3.4.0
"""
)
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description=helper)
parser.add_argument(
"release_version",
type=str,
help="The version string for the release.",
)
args = parser.parse_args()
release_version_arg = args.release_version
release_path = os.path.dirname(os.path.abspath(__file__))
plugin_path = release_path.split("/.github")[0]
print(f"\n\nRepo path: {plugin_path}")
repo = Repo(plugin_path)
changelog_commit = None
for commit in repo.iter_commits():
if f"{release_version_arg} changelog" == commit.message.split("\n")[0]:
changelog_commit = commit
break
if not changelog_commit:
raise RuntimeError("Changelog commit for {release_version_arg} was not found.")
git = repo.git
git.stash()
git.checkout("origin/master")
try:
git.cherry_pick(changelog_commit.hexsha)
except GitCommandError:
git.add("CHANGES/")
with git.custom_environment(GIT_EDITOR="true"):
git.cherry_pick("--continue")
git.reset("origin/master")
| true | true |
1c389b6847ff9e816da0201b051797f00f747eee | 3,547 | py | Python | config.py | 0xN1nja/Instagram-DM-Bot | 70ec33c3e64635bc5f9c20b841a6c6fe8bdb66b3 | [
"MIT"
] | 1 | 2022-02-12T12:00:41.000Z | 2022-02-12T12:00:41.000Z | config.py | N1nja0p/Instagram-DM-Bot | 70ec33c3e64635bc5f9c20b841a6c6fe8bdb66b3 | [
"MIT"
] | null | null | null | config.py | N1nja0p/Instagram-DM-Bot | 70ec33c3e64635bc5f9c20b841a6c6fe8bdb66b3 | [
"MIT"
] | null | null | null | # Run This Script Before Running bot.py
import getpass
import re
import os
# Constants
WELCOME_MESSAGE = '''
###
# # # #### ##### ## #### ##### ## # #
# ## # # # # # # # # # # # ## ##
# # # # #### # # # # # # # # # ## #
# # # # # # ###### # ### ##### ###### # #
# # ## # # # # # # # # # # # # #
### # # #### # # # #### # # # # # #
##### # #
# # ## ##
# # # ## #
# # # #
# # # #
##### # #
##### #### #####
# # # # #
##### # # #
# # # # #
# # # # #
##### #### #
'''
print(WELCOME_MESSAGE)
def validate_schedule_input(scheduling_time: str):
if not re.match(r"\d\d:\d\d", scheduling_time):
return False
else:
return True
def validate_webhook_url(webhook_url: str):
if webhook_url.startswith("https://") or webhook_url.startswith("http://"):
return True
else:
return False
username = input("Enter Your Instagram Username : ").lower()
if len(username) > 0:
USERNAME = username
else:
print("Invalid Username!")
exit()
password = getpass.getpass("Enter Your Instagram Password : ")
if len(password) > 0:
PASSWORD = password
else:
print("Invalid Password!")
exit()
t_username = input("Enter Target's Username : ").lower()
if len(t_username) > 0:
TARGET_USERNAME = t_username
else:
print("Enter Target's Username Correctly!")
exit()
schedule_message = input("Do You Want To Schedule Message (Y/N) (Case Sensitive) : ").lower()
if schedule_message == "y":
s_time = input("Enter Sending Time (24hr) Eg : 00:00 : ")
if validate_schedule_input(s_time):
SENDING_TIME = s_time
SCHEDULE_MESSAGE = True
DONT_SCHEDULE = False
else:
print("Invalid Time Format.")
exit()
elif schedule_message == "n":
SENDING_TIME = None
DONT_SCHEDULE = True
SCHEDULE_MESSAGE = False
else:
print("Please Enter Value Correctly!")
exit()
shutdown_pc = input("Do You Want To Shutdown PC After Sending Message (Y/N) (Case Sensitive) : ").lower()
if shutdown_pc == "y":
SHUTDOWN = True
elif shutdown_pc == "n":
SHUTDOWN = False
else:
print("Please Enter Value Correctly!")
exit()
chromedriver_path = input(
"Enter Chrome Driver Path (Download From https://chromedriver.chromium.org/ According To Your Chrome Version) : ")
if "chromedriver" in chromedriver_path and os.path.isfile(chromedriver_path):
CHROME_DRIVER_PATH = chromedriver_path
else:
print("Invalid Chrome Driver Path!")
exit()
message = input("Type Message To Send : ")
if len(message) > 0:
MESSAGE = message
else:
print("Please Enter Message Correctly!")
exit()
webhook_url = input("Enter Discord Webhook URL : ")
if len(webhook_url) > 0 and validate_webhook_url(webhook_url):
WEBHOOK_URL = webhook_url
else:
print("Invalid Webhook URL!")
exit()
with open("config.txt", "w") as f:
f.write(str(USERNAME) + "\n")
f.write(str(PASSWORD) + "\n")
f.write(str(TARGET_USERNAME) + "\n")
f.write(str(MESSAGE) + "\n")
f.write(str(SHUTDOWN) + "\n")
f.write(str(SENDING_TIME) + "\n")
f.write(str(CHROME_DRIVER_PATH) + "\n")
f.write(str(DONT_SCHEDULE) + "\n")
f.write(str(SCHEDULE_MESSAGE) + "\n")
f.write(str(WEBHOOK_URL) + "\n")
print("Done! Now Run bot.py")
| 28.837398 | 118 | 0.540175 |
import getpass
import re
import os
WELCOME_MESSAGE = '''
###
# # # #### ##### ## #### ##### ## # #
# ## # # # # # # # # # # # ## ##
# # # # #### # # # # # # # # # ## #
# # # # # # ###### # ### ##### ###### # #
# # ## # # # # # # # # # # # # #
### # # #### # # # #### # # # # # #
##### # #
# # ## ##
# # # ## #
# # # #
# # # #
##### # #
##### #### #####
# # # # #
##### # # #
# # # # #
# # # # #
##### #### #
'''
print(WELCOME_MESSAGE)
def validate_schedule_input(scheduling_time: str):
if not re.match(r"\d\d:\d\d", scheduling_time):
return False
else:
return True
def validate_webhook_url(webhook_url: str):
if webhook_url.startswith("https://") or webhook_url.startswith("http://"):
return True
else:
return False
username = input("Enter Your Instagram Username : ").lower()
if len(username) > 0:
USERNAME = username
else:
print("Invalid Username!")
exit()
password = getpass.getpass("Enter Your Instagram Password : ")
if len(password) > 0:
PASSWORD = password
else:
print("Invalid Password!")
exit()
t_username = input("Enter Target's Username : ").lower()
if len(t_username) > 0:
TARGET_USERNAME = t_username
else:
print("Enter Target's Username Correctly!")
exit()
schedule_message = input("Do You Want To Schedule Message (Y/N) (Case Sensitive) : ").lower()
if schedule_message == "y":
s_time = input("Enter Sending Time (24hr) Eg : 00:00 : ")
if validate_schedule_input(s_time):
SENDING_TIME = s_time
SCHEDULE_MESSAGE = True
DONT_SCHEDULE = False
else:
print("Invalid Time Format.")
exit()
elif schedule_message == "n":
SENDING_TIME = None
DONT_SCHEDULE = True
SCHEDULE_MESSAGE = False
else:
print("Please Enter Value Correctly!")
exit()
shutdown_pc = input("Do You Want To Shutdown PC After Sending Message (Y/N) (Case Sensitive) : ").lower()
if shutdown_pc == "y":
SHUTDOWN = True
elif shutdown_pc == "n":
SHUTDOWN = False
else:
print("Please Enter Value Correctly!")
exit()
chromedriver_path = input(
"Enter Chrome Driver Path (Download From https://chromedriver.chromium.org/ According To Your Chrome Version) : ")
if "chromedriver" in chromedriver_path and os.path.isfile(chromedriver_path):
CHROME_DRIVER_PATH = chromedriver_path
else:
print("Invalid Chrome Driver Path!")
exit()
message = input("Type Message To Send : ")
if len(message) > 0:
MESSAGE = message
else:
print("Please Enter Message Correctly!")
exit()
webhook_url = input("Enter Discord Webhook URL : ")
if len(webhook_url) > 0 and validate_webhook_url(webhook_url):
WEBHOOK_URL = webhook_url
else:
print("Invalid Webhook URL!")
exit()
with open("config.txt", "w") as f:
f.write(str(USERNAME) + "\n")
f.write(str(PASSWORD) + "\n")
f.write(str(TARGET_USERNAME) + "\n")
f.write(str(MESSAGE) + "\n")
f.write(str(SHUTDOWN) + "\n")
f.write(str(SENDING_TIME) + "\n")
f.write(str(CHROME_DRIVER_PATH) + "\n")
f.write(str(DONT_SCHEDULE) + "\n")
f.write(str(SCHEDULE_MESSAGE) + "\n")
f.write(str(WEBHOOK_URL) + "\n")
print("Done! Now Run bot.py")
| true | true |
1c389b74884a98f17aa63cfe4af08f81c6037768 | 11,506 | py | Python | pysteps/utils/transformation.py | tjniemi/pysteps | 76324d8f315f63c6723887f4c99d155749a31e83 | [
"BSD-3-Clause"
] | 285 | 2018-07-11T10:42:43.000Z | 2022-03-23T13:44:54.000Z | pysteps/utils/transformation.py | tjniemi/pysteps | 76324d8f315f63c6723887f4c99d155749a31e83 | [
"BSD-3-Clause"
] | 246 | 2018-07-16T06:17:12.000Z | 2022-03-22T15:45:08.000Z | pysteps/utils/transformation.py | tjniemi/pysteps | 76324d8f315f63c6723887f4c99d155749a31e83 | [
"BSD-3-Clause"
] | 97 | 2018-07-12T12:05:45.000Z | 2022-03-31T14:56:31.000Z | # -*- coding: utf-8 -*-
"""
pysteps.utils.transformation
============================
Methods for transforming data values.
.. autosummary::
:toctree: ../generated/
boxcox_transform
dB_transform
NQ_transform
sqrt_transform
"""
import numpy as np
import scipy.stats as scipy_stats
import warnings
from scipy.interpolate import interp1d
warnings.filterwarnings(
"ignore", category=RuntimeWarning
) # To deactivate warnings for comparison operators with NaNs
def boxcox_transform(
R, metadata=None, Lambda=None, threshold=None, zerovalue=None, inverse=False
):
"""The one-parameter Box-Cox transformation.
The Box-Cox transform is a well-known power transformation introduced by
Box and Cox (1964). In its one-parameter version, the Box-Cox transform
takes the form T(x) = ln(x) for Lambda = 0,
or T(x) = (x**Lambda - 1)/Lambda otherwise.
Default parameters will produce a log transform (i.e. Lambda=0).
Parameters
----------
R: array-like
Array of any shape to be transformed.
metadata: dict, optional
Metadata dictionary containing the transform, zerovalue and threshold
attributes as described in the documentation of
:py:mod:`pysteps.io.importers`.
Lambda: float, optional
Parameter Lambda of the Box-Cox transformation.
It is 0 by default, which produces the log transformation.
Choose Lambda < 1 for positively skewed data, Lambda > 1 for negatively
skewed data.
threshold: float, optional
The value that is used for thresholding with the same units as R.
If None, the threshold contained in metadata is used.
If no threshold is found in the metadata,
a value of 0.1 is used as default.
zerovalue: float, optional
The value to be assigned to no rain pixels as defined by the threshold.
It is equal to the threshold - 1 by default.
inverse: bool, optional
If set to True, it performs the inverse transform. False by default.
Returns
-------
R: array-like
Array of any shape containing the (back-)transformed units.
metadata: dict
The metadata with updated attributes.
References
----------
Box, G. E. and Cox, D. R. (1964), An Analysis of Transformations. Journal
of the Royal Statistical Society: Series B (Methodological), 26: 211-243.
doi:10.1111/j.2517-6161.1964.tb00553.x
"""
R = R.copy()
if metadata is None:
if inverse:
metadata = {"transform": "BoxCox"}
else:
metadata = {"transform": None}
else:
metadata = metadata.copy()
if not inverse:
if metadata["transform"] == "BoxCox":
return R, metadata
if Lambda is None:
Lambda = metadata.get("BoxCox_lambda", 0.0)
if threshold is None:
threshold = metadata.get("threshold", 0.1)
zeros = R < threshold
# Apply Box-Cox transform
if Lambda == 0.0:
R[~zeros] = np.log(R[~zeros])
threshold = np.log(threshold)
else:
R[~zeros] = (R[~zeros] ** Lambda - 1) / Lambda
threshold = (threshold ** Lambda - 1) / Lambda
# Set value for zeros
if zerovalue is None:
zerovalue = threshold - 1 # TODO: set to a more meaningful value
R[zeros] = zerovalue
metadata["transform"] = "BoxCox"
metadata["BoxCox_lambda"] = Lambda
metadata["zerovalue"] = zerovalue
metadata["threshold"] = threshold
elif inverse:
if metadata["transform"] not in ["BoxCox", "log"]:
return R, metadata
if Lambda is None:
Lambda = metadata.pop("BoxCox_lambda", 0.0)
if threshold is None:
threshold = metadata.get("threshold", -10.0)
if zerovalue is None:
zerovalue = 0.0
# Apply inverse Box-Cox transform
if Lambda == 0.0:
R = np.exp(R)
threshold = np.exp(threshold)
else:
R = np.exp(np.log(Lambda * R + 1) / Lambda)
threshold = np.exp(np.log(Lambda * threshold + 1) / Lambda)
R[R < threshold] = zerovalue
metadata["transform"] = None
metadata["zerovalue"] = zerovalue
metadata["threshold"] = threshold
return R, metadata
def dB_transform(R, metadata=None, threshold=None, zerovalue=None, inverse=False):
"""Methods to transform precipitation intensities to/from dB units.
Parameters
----------
R: array-like
Array of any shape to be (back-)transformed.
metadata: dict, optional
Metadata dictionary containing the transform, zerovalue and threshold
attributes as described in the documentation of
:py:mod:`pysteps.io.importers`.
threshold: float, optional
Optional value that is used for thresholding with the same units as R.
If None, the threshold contained in metadata is used.
If no threshold is found in the metadata,
a value of 0.1 is used as default.
zerovalue: float, optional
The value to be assigned to no rain pixels as defined by the threshold.
It is equal to the threshold - 1 by default.
inverse: bool, optional
If set to True, it performs the inverse transform. False by default.
Returns
-------
R: array-like
Array of any shape containing the (back-)transformed units.
metadata: dict
The metadata with updated attributes.
"""
R = R.copy()
if metadata is None:
if inverse:
metadata = {"transform": "dB"}
else:
metadata = {"transform": None}
else:
metadata = metadata.copy()
# to dB units
if not inverse:
if metadata["transform"] == "dB":
return R, metadata
if threshold is None:
threshold = metadata.get("threshold", 0.1)
zeros = R < threshold
# Convert to dB
R[~zeros] = 10.0 * np.log10(R[~zeros])
threshold = 10.0 * np.log10(threshold)
# Set value for zeros
if zerovalue is None:
zerovalue = threshold - 5 # TODO: set to a more meaningful value
R[zeros] = zerovalue
metadata["transform"] = "dB"
metadata["zerovalue"] = zerovalue
metadata["threshold"] = threshold
return R, metadata
# from dB units
elif inverse:
if metadata["transform"] != "dB":
return R, metadata
if threshold is None:
threshold = metadata.get("threshold", -10.0)
if zerovalue is None:
zerovalue = 0.0
R = 10.0 ** (R / 10.0)
threshold = 10.0 ** (threshold / 10.0)
R[R < threshold] = zerovalue
metadata["transform"] = None
metadata["threshold"] = threshold
metadata["zerovalue"] = zerovalue
return R, metadata
def NQ_transform(R, metadata=None, inverse=False, **kwargs):
"""The normal quantile transformation as in Bogner et al (2012).
Zero rain vales are set to zero in norm space.
Parameters
----------
R: array-like
Array of any shape to be transformed.
metadata: dict, optional
Metadata dictionary containing the transform, zerovalue and threshold
attributes as described in the documentation of
:py:mod:`pysteps.io.importers`.
inverse: bool, optional
If set to True, it performs the inverse transform. False by default.
Other Parameters
----------------
a: float, optional
The offset fraction to be used for plotting positions;
typically in (0,1).
The default is 0., that is, it spaces the points evenly in the uniform
distribution.
Returns
-------
R: array-like
Array of any shape containing the (back-)transformed units.
metadata: dict
The metadata with updated attributes.
References
----------
Bogner, K., Pappenberger, F., and Cloke, H. L.: Technical Note: The normal
quantile transformation and its application in a flood forecasting system,
Hydrol. Earth Syst. Sci., 16, 1085-1094,
https://doi.org/10.5194/hess-16-1085-2012, 2012.
"""
# defaults
a = kwargs.get("a", 0.0)
R = R.copy()
shape0 = R.shape
R = R.ravel().astype(float)
idxNan = np.isnan(R)
R_ = R[~idxNan]
if metadata is None:
if inverse:
metadata = {"transform": "NQT"}
else:
metadata = {"transform": None}
metadata["zerovalue"] = np.min(R_)
else:
metadata = metadata.copy()
if not inverse:
# Plotting positions
# https://en.wikipedia.org/wiki/Q%E2%80%93Q_plot#Plotting_position
n = R_.size
Rpp = ((np.arange(n) + 1 - a) / (n + 1 - 2 * a)).reshape(R_.shape)
# NQ transform
Rqn = scipy_stats.norm.ppf(Rpp)
R__ = np.interp(R_, R_[np.argsort(R_)], Rqn)
# set zero rain to 0 in norm space
R__[R[~idxNan] == metadata["zerovalue"]] = 0
# build inverse transform
metadata["inqt"] = interp1d(
Rqn, R_[np.argsort(R_)], bounds_error=False, fill_value=(R_.min(), R_.max())
)
metadata["transform"] = "NQT"
metadata["zerovalue"] = 0
metadata["threshold"] = R__[R__ > 0].min()
else:
f = metadata.pop("inqt")
R__ = f(R_)
metadata["transform"] = None
metadata["zerovalue"] = R__.min()
metadata["threshold"] = R__[R__ > R__.min()].min()
R[~idxNan] = R__
return R.reshape(shape0), metadata
def sqrt_transform(R, metadata=None, inverse=False, **kwargs):
"""Square-root transform.
Parameters
----------
R: array-like
Array of any shape to be transformed.
metadata: dict, optional
Metadata dictionary containing the transform, zerovalue and threshold
attributes as described in the documentation of
:py:mod:`pysteps.io.importers`.
inverse: bool, optional
If set to True, it performs the inverse transform. False by default.
Returns
-------
R: array-like
Array of any shape containing the (back-)transformed units.
metadata: dict
The metadata with updated attributes.
"""
R = R.copy()
if metadata is None:
if inverse:
metadata = {"transform": "sqrt"}
else:
metadata = {"transform": None}
metadata["zerovalue"] = np.nan
metadata["threshold"] = np.nan
else:
metadata = metadata.copy()
if not inverse:
# sqrt transform
R = np.sqrt(R)
metadata["transform"] = "sqrt"
metadata["zerovalue"] = np.sqrt(metadata["zerovalue"])
metadata["threshold"] = np.sqrt(metadata["threshold"])
else:
# inverse sqrt transform
R = R ** 2
metadata["transform"] = None
metadata["zerovalue"] = metadata["zerovalue"] ** 2
metadata["threshold"] = metadata["threshold"] ** 2
return R, metadata
| 30.120419 | 89 | 0.578046 |
import numpy as np
import scipy.stats as scipy_stats
import warnings
from scipy.interpolate import interp1d
warnings.filterwarnings(
"ignore", category=RuntimeWarning
)
def boxcox_transform(
R, metadata=None, Lambda=None, threshold=None, zerovalue=None, inverse=False
):
R = R.copy()
if metadata is None:
if inverse:
metadata = {"transform": "BoxCox"}
else:
metadata = {"transform": None}
else:
metadata = metadata.copy()
if not inverse:
if metadata["transform"] == "BoxCox":
return R, metadata
if Lambda is None:
Lambda = metadata.get("BoxCox_lambda", 0.0)
if threshold is None:
threshold = metadata.get("threshold", 0.1)
zeros = R < threshold
if Lambda == 0.0:
R[~zeros] = np.log(R[~zeros])
threshold = np.log(threshold)
else:
R[~zeros] = (R[~zeros] ** Lambda - 1) / Lambda
threshold = (threshold ** Lambda - 1) / Lambda
if zerovalue is None:
zerovalue = threshold - 1
R[zeros] = zerovalue
metadata["transform"] = "BoxCox"
metadata["BoxCox_lambda"] = Lambda
metadata["zerovalue"] = zerovalue
metadata["threshold"] = threshold
elif inverse:
if metadata["transform"] not in ["BoxCox", "log"]:
return R, metadata
if Lambda is None:
Lambda = metadata.pop("BoxCox_lambda", 0.0)
if threshold is None:
threshold = metadata.get("threshold", -10.0)
if zerovalue is None:
zerovalue = 0.0
if Lambda == 0.0:
R = np.exp(R)
threshold = np.exp(threshold)
else:
R = np.exp(np.log(Lambda * R + 1) / Lambda)
threshold = np.exp(np.log(Lambda * threshold + 1) / Lambda)
R[R < threshold] = zerovalue
metadata["transform"] = None
metadata["zerovalue"] = zerovalue
metadata["threshold"] = threshold
return R, metadata
def dB_transform(R, metadata=None, threshold=None, zerovalue=None, inverse=False):
R = R.copy()
if metadata is None:
if inverse:
metadata = {"transform": "dB"}
else:
metadata = {"transform": None}
else:
metadata = metadata.copy()
if not inverse:
if metadata["transform"] == "dB":
return R, metadata
if threshold is None:
threshold = metadata.get("threshold", 0.1)
zeros = R < threshold
R[~zeros] = 10.0 * np.log10(R[~zeros])
threshold = 10.0 * np.log10(threshold)
if zerovalue is None:
zerovalue = threshold - 5
R[zeros] = zerovalue
metadata["transform"] = "dB"
metadata["zerovalue"] = zerovalue
metadata["threshold"] = threshold
return R, metadata
elif inverse:
if metadata["transform"] != "dB":
return R, metadata
if threshold is None:
threshold = metadata.get("threshold", -10.0)
if zerovalue is None:
zerovalue = 0.0
R = 10.0 ** (R / 10.0)
threshold = 10.0 ** (threshold / 10.0)
R[R < threshold] = zerovalue
metadata["transform"] = None
metadata["threshold"] = threshold
metadata["zerovalue"] = zerovalue
return R, metadata
def NQ_transform(R, metadata=None, inverse=False, **kwargs):
a = kwargs.get("a", 0.0)
R = R.copy()
shape0 = R.shape
R = R.ravel().astype(float)
idxNan = np.isnan(R)
R_ = R[~idxNan]
if metadata is None:
if inverse:
metadata = {"transform": "NQT"}
else:
metadata = {"transform": None}
metadata["zerovalue"] = np.min(R_)
else:
metadata = metadata.copy()
if not inverse:
e
Rpp = ((np.arange(n) + 1 - a) / (n + 1 - 2 * a)).reshape(R_.shape)
Rqn = scipy_stats.norm.ppf(Rpp)
R__ = np.interp(R_, R_[np.argsort(R_)], Rqn)
R__[R[~idxNan] == metadata["zerovalue"]] = 0
metadata["inqt"] = interp1d(
Rqn, R_[np.argsort(R_)], bounds_error=False, fill_value=(R_.min(), R_.max())
)
metadata["transform"] = "NQT"
metadata["zerovalue"] = 0
metadata["threshold"] = R__[R__ > 0].min()
else:
f = metadata.pop("inqt")
R__ = f(R_)
metadata["transform"] = None
metadata["zerovalue"] = R__.min()
metadata["threshold"] = R__[R__ > R__.min()].min()
R[~idxNan] = R__
return R.reshape(shape0), metadata
def sqrt_transform(R, metadata=None, inverse=False, **kwargs):
R = R.copy()
if metadata is None:
if inverse:
metadata = {"transform": "sqrt"}
else:
metadata = {"transform": None}
metadata["zerovalue"] = np.nan
metadata["threshold"] = np.nan
else:
metadata = metadata.copy()
if not inverse:
R = np.sqrt(R)
metadata["transform"] = "sqrt"
metadata["zerovalue"] = np.sqrt(metadata["zerovalue"])
metadata["threshold"] = np.sqrt(metadata["threshold"])
else:
R = R ** 2
metadata["transform"] = None
metadata["zerovalue"] = metadata["zerovalue"] ** 2
metadata["threshold"] = metadata["threshold"] ** 2
return R, metadata
| true | true |
1c389cdc023786c1791a1386642881a485285220 | 4,197 | py | Python | SegNet_Mobile/train.py | Ice833/Semantic-Segmentation | 23d23f6da3b34884c044a2253d65a1e4097adb2d | [
"MIT"
] | 1 | 2020-12-16T08:29:13.000Z | 2020-12-16T08:29:13.000Z | SegNet_Mobile/train.py | Ice833/Semantic-Segmentation | 23d23f6da3b34884c044a2253d65a1e4097adb2d | [
"MIT"
] | null | null | null | SegNet_Mobile/train.py | Ice833/Semantic-Segmentation | 23d23f6da3b34884c044a2253d65a1e4097adb2d | [
"MIT"
] | null | null | null | from nets.segnet import mobilenet_segnet
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from PIL import Image
import keras
from keras import backend as K
import numpy as np
NCLASSES = 2
HEIGHT = 416
WIDTH = 416
def generate_arrays_from_file(lines,batch_size):
# 获取总长度
n = len(lines)
i = 0
while 1:
X_train = []
Y_train = []
# 获取一个batch_size大小的数据
for _ in range(batch_size):
if i==0:
np.random.shuffle(lines)
name = lines[i].split(';')[0]
# 从文件中读取图像
img = Image.open(r".\dataset2\jpg" + '/' + name)
img = img.resize((WIDTH,HEIGHT))
img = np.array(img)
img = img/255
X_train.append(img)
name = (lines[i].split(';')[1]).replace("\n", "")
# 从文件中读取图像
img = Image.open(r".\dataset2\png" + '/' + name)
img = img.resize((int(WIDTH/2),int(HEIGHT/2)))
img = np.array(img)
seg_labels = np.zeros((int(HEIGHT/2),int(WIDTH/2),NCLASSES))
for c in range(NCLASSES):
seg_labels[: , : , c ] = (img[:,:,0] == c ).astype(int)
seg_labels = np.reshape(seg_labels, (-1,NCLASSES))
Y_train.append(seg_labels)
# 读完一个周期后重新开始
i = (i+1) % n
yield (np.array(X_train),np.array(Y_train))
def loss(y_true, y_pred):
loss = K.categorical_crossentropy(y_true,y_pred)
return loss
if __name__ == "__main__":
log_dir = "logs/"
# 获取model
model = mobilenet_segnet(n_classes=NCLASSES,input_height=HEIGHT, input_width=WIDTH)
# model.summary()
BASE_WEIGHT_PATH = ('https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.6/')
model_name = 'mobilenet_%s_%d_tf_no_top.h5' % ( '1_0' , 224 )
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = keras.utils.get_file(model_name, weight_path )
model.load_weights(weights_path,by_name=True,skip_mismatch=True)
# 打开数据集的txt
with open(r".\dataset2\train.txt","r") as f:
lines = f.readlines()
# 打乱行,这个txt主要用于帮助读取数据来训练
# 打乱的数据更有利于训练
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
# 90%用于训练,10%用于估计。
num_val = int(len(lines)*0.1)
num_train = len(lines) - num_val
# 保存的方式,3世代保存一次
checkpoint_period = ModelCheckpoint(
log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss',
save_weights_only=True,
save_best_only=True,
period=3
)
# 学习率下降的方式,val_loss3次不下降就下降学习率继续训练
reduce_lr = ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=3,
verbose=1
)
# 是否需要早停,当val_loss一直不下降的时候意味着模型基本训练完毕,可以停止
early_stopping = EarlyStopping(
monitor='val_loss',
min_delta=0,
patience=10,
verbose=1
)
# 交叉熵
model.compile(loss = loss,
optimizer = Adam(lr=1e-4),
metrics = ['accuracy'])
batch_size = 4
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
# 开始训练
model.fit_generator(generate_arrays_from_file(lines[:num_train], batch_size),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=generate_arrays_from_file(lines[num_train:], batch_size),
validation_steps=max(1, num_val//batch_size),
epochs=50,
initial_epoch=0,
callbacks=[checkpoint_period, reduce_lr])
model.save_weights(log_dir+'last1.h5')
| 35.268908 | 112 | 0.533715 | from nets.segnet import mobilenet_segnet
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from PIL import Image
import keras
from keras import backend as K
import numpy as np
NCLASSES = 2
HEIGHT = 416
WIDTH = 416
def generate_arrays_from_file(lines,batch_size):
n = len(lines)
i = 0
while 1:
X_train = []
Y_train = []
for _ in range(batch_size):
if i==0:
np.random.shuffle(lines)
name = lines[i].split(';')[0]
img = Image.open(r".\dataset2\jpg" + '/' + name)
img = img.resize((WIDTH,HEIGHT))
img = np.array(img)
img = img/255
X_train.append(img)
name = (lines[i].split(';')[1]).replace("\n", "")
img = Image.open(r".\dataset2\png" + '/' + name)
img = img.resize((int(WIDTH/2),int(HEIGHT/2)))
img = np.array(img)
seg_labels = np.zeros((int(HEIGHT/2),int(WIDTH/2),NCLASSES))
for c in range(NCLASSES):
seg_labels[: , : , c ] = (img[:,:,0] == c ).astype(int)
seg_labels = np.reshape(seg_labels, (-1,NCLASSES))
Y_train.append(seg_labels)
i = (i+1) % n
yield (np.array(X_train),np.array(Y_train))
def loss(y_true, y_pred):
loss = K.categorical_crossentropy(y_true,y_pred)
return loss
if __name__ == "__main__":
log_dir = "logs/"
model = mobilenet_segnet(n_classes=NCLASSES,input_height=HEIGHT, input_width=WIDTH)
BASE_WEIGHT_PATH = ('https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.6/')
model_name = 'mobilenet_%s_%d_tf_no_top.h5' % ( '1_0' , 224 )
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = keras.utils.get_file(model_name, weight_path )
model.load_weights(weights_path,by_name=True,skip_mismatch=True)
with open(r".\dataset2\train.txt","r") as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines)*0.1)
num_train = len(lines) - num_val
checkpoint_period = ModelCheckpoint(
log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss',
save_weights_only=True,
save_best_only=True,
period=3
)
reduce_lr = ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=3,
verbose=1
)
early_stopping = EarlyStopping(
monitor='val_loss',
min_delta=0,
patience=10,
verbose=1
)
model.compile(loss = loss,
optimizer = Adam(lr=1e-4),
metrics = ['accuracy'])
batch_size = 4
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(generate_arrays_from_file(lines[:num_train], batch_size),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=generate_arrays_from_file(lines[num_train:], batch_size),
validation_steps=max(1, num_val//batch_size),
epochs=50,
initial_epoch=0,
callbacks=[checkpoint_period, reduce_lr])
model.save_weights(log_dir+'last1.h5')
| true | true |
1c389df546c6534fdb305322dbcd914463587192 | 772 | py | Python | Module01/String/StringOp.py | fenglihanxiao/Python | 872baf3a3a5ee42740161152605ca2b1ddf4cd30 | [
"MIT"
] | null | null | null | Module01/String/StringOp.py | fenglihanxiao/Python | 872baf3a3a5ee42740161152605ca2b1ddf4cd30 | [
"MIT"
] | null | null | null | Module01/String/StringOp.py | fenglihanxiao/Python | 872baf3a3a5ee42740161152605ca2b1ddf4cd30 | [
"MIT"
] | null | null | null | """
1. String basic operations
2. Reference StringAPI.png
"""
str1 = "i have a family where have three amy people."
print(len(str1))
print(str1[5])
print(max(str1))
print(str1.index("amy"))
print("have" in str1)
print("feng" not in str1)
print(str1.islower())
"""
1. String operation isXXX method
2. Reference StringAPI.png
"""
str2 = "aaa"
print(str2.islower())
"""
1. startswith and endswith
"""
str3 = "Feng"
print(str3.startswith("Fe"))
files = []
filename = "test.jpg"
filename2 = "test.png"
filename3 = "test.py"
files.append(filename)
files.append(filename2)
files.append(filename3)
for f in files:
if f.endswith(".jpg") or f.endswith(".png"):
print("Image files:%s" % f)
else:
print("The other type of file:%s" % f) | 19.3 | 53 | 0.648964 |
str1 = "i have a family where have three amy people."
print(len(str1))
print(str1[5])
print(max(str1))
print(str1.index("amy"))
print("have" in str1)
print("feng" not in str1)
print(str1.islower())
str2 = "aaa"
print(str2.islower())
str3 = "Feng"
print(str3.startswith("Fe"))
files = []
filename = "test.jpg"
filename2 = "test.png"
filename3 = "test.py"
files.append(filename)
files.append(filename2)
files.append(filename3)
for f in files:
if f.endswith(".jpg") or f.endswith(".png"):
print("Image files:%s" % f)
else:
print("The other type of file:%s" % f) | true | true |
1c389dfcf00846c922740280931a38638ee8ddc2 | 67,237 | py | Python | stm32ral.py | adamgreig/stm32ral | d316aaad753fd427b1d9fe641ca9611f5f20053c | [
"Apache-2.0",
"MIT"
] | 24 | 2019-02-20T19:01:43.000Z | 2021-09-30T13:45:01.000Z | stm32ral.py | adamgreig/stm32ral | d316aaad753fd427b1d9fe641ca9611f5f20053c | [
"Apache-2.0",
"MIT"
] | 10 | 2019-02-27T22:41:42.000Z | 2021-02-27T11:38:20.000Z | stm32ral.py | adamgreig/stm32ral | d316aaad753fd427b1d9fe641ca9611f5f20053c | [
"Apache-2.0",
"MIT"
] | 4 | 2019-04-20T08:36:46.000Z | 2020-03-23T12:56:39.000Z | #!/usr/bin/env python3
"""
stm32ral.py
Copyright 2018 Adam Greig
Licensed under MIT and Apache 2.0, see LICENSE_MIT and LICENSE_APACHE.
"""
import os
import copy
import argparse
import itertools
import subprocess
import multiprocessing
import xml.etree.ElementTree as ET
from fnmatch import fnmatch
CRATE_LIB_PREAMBLE = """\
// Copyright 2018 Adam Greig
// See LICENSE-APACHE and LICENSE-MIT for license details.
//! This project provides a register access layer (RAL) for all
//! STM32 microcontrollers.
//!
//! When built, you must specify a device feature, such as `stm32f405`.
//! This will cause all modules in that device's module to be re-exported
//! from the top level, so that for example `stm32ral::gpio` will resolve to
//! `stm32ral::stm32f4::stm32f405::gpio`.
//!
//! In the generated documentation, all devices are visible inside their family
//! modules, but when built for a specific device, only that devices' constants
//! will be available.
//!
//! See the
//! [README](https://github.com/adamgreig/stm32ral/blob/master/README.md)
//! for example usage.
#![no_std]
#[cfg(feature="rt")]
extern crate cortex_m_rt;
mod register;
#[cfg(feature="rt")]
pub use cortex_m_rt::interrupt;
pub use crate::register::{RORegister, UnsafeRORegister};
pub use crate::register::{WORegister, UnsafeWORegister};
pub use crate::register::{RWRegister, UnsafeRWRegister};
"""
CRATE_CARGO_TOML_PREAMBLE = """\
# Generated by stm32ral.py. Do not edit manually.
[package]
name = "stm32ral"
authors = ["Adam Greig <adam@adamgreig.com>"]
description = "Register access layer for all STM32 microcontrollers"
repository = "https://github.com/adamgreig/stm32ral"
documentation = "https://docs.rs/stm32ral"
readme = "README.md"
keywords = ["stm32", "embedded", "no_std"]
categories = ["embedded", "no-std"]
license = "MIT/Apache-2.0"
edition = "2018"
exclude = ["/stm32-rs"]
# Change version in stm32ral.py, not in Cargo.toml!
version = "0.7.0"
[package.metadata.docs.rs]
features = ["doc"]
no-default-features = true
targets = []
[dependencies]
# Change dependency versions in stm32ral.py, not here!
external_cortex_m = { package = "cortex-m", version = "0.7.3" }
cortex-m-rt = { version = ">=0.6.15,<0.8", optional = true }
[features]
default = ["rt"]
rt = ["cortex-m-rt/device"]
inline-asm = ["external_cortex_m/inline-asm"]
rtfm = ["rtic"]
rtic = []
nosync = []
doc = []
"""
BUILD_RS_TEMPLATE = """\
use std::env;
use std::fs;
use std::path::PathBuf;
fn main() {{
if env::var_os("CARGO_FEATURE_RT").is_some() {{
let out = &PathBuf::from(env::var_os("OUT_DIR").unwrap());
println!("cargo:rustc-link-search={{}}", out.display());
let device_file = {device_clauses};
fs::copy(device_file, out.join("device.x")).unwrap();
println!("cargo:rerun-if-changed={{}}", device_file);
}}
println!("cargo:rerun-if-changed=build.rs");
}}
"""
UNSAFE_REGISTERS = [
# DMA peripheral and memory address registers
"S?PAR", "S?M?AR", "CPAR?", "CMAR?",
# DMA2D address registers
"FGMAR", "BGMAR", "FGCMAR", "BGCMAR", "OMAR",
# LTDC frame buffer address register
"L?CFBAR",
# USB OTG DMA address register
"DIEPDMA*", "DOEPDMA*", "HCDMA*",
# Ethernet DMA descriptor list address register
"DMARDLAR", "DMATDLAR",
# Cache operations
"ICIALLU", "?C?MVA?", "DC?SW", "DCCIMVAC", "DCCISW", "BPIALL",
]
class Node:
"""
A node in the overall graph.
"""
pass
class EnumeratedValue(Node):
"""
Represents a possible named value for a field.
Has a name, description, and value.
Belongs to one or more parent Fields.
"""
def __init__(self, name, desc, value, register_size):
self.name = name
self.desc = desc
self.value = value
self.register_size = register_size
if self.name[0] in "0123456789":
self.name = "_" + self.name
print("Name started with a number:", self.name)
def to_dict(self):
return {"name": self.name, "desc": self.desc, "value": self.value}
def to_rust(self, field_width):
return f"""
/// 0b{self.value:0{field_width}b}: {escape_desc(self.desc)}
pub const {self.name}: u{self.register_size} = 0b{self.value:0{field_width}b};"""
@classmethod
def from_svd(cls, svd, node, register_size):
name = get_string(node, 'name')
desc = get_string(node, 'description')
value = get_int(node, 'value')
return cls(name, desc, value, register_size)
def __eq__(self, other):
return (
self.name == other.name and
self.value == other.value and
self.desc == other.desc)
def __lt__(self, other):
return self.value < other.value
class EnumeratedValues(Node):
"""
Represents possible named values for a field, emitted as a concrete Enum.
Contains many child EnumeratedValues.
"""
def __init__(self, name):
self.name = name
self.values = []
def to_dict(self):
return {"name": self.name,
"values": [v.to_dict() for v in self.values]}
def to_rust(self, field_width):
values = "\n".join(v.to_rust(field_width) for v in self.values)
if self.name == "R":
desc = "Read-only values"
elif self.name == "W":
desc = "Write-only values"
else:
desc = "Read-write values"
if not values:
desc += " (empty)"
return f"""\
/// {desc}
pub mod {self.name} {{
{values}
}}"""
@classmethod
def from_svd(cls, svd, node, register_size):
usage = get_string(node, 'usage')
if usage == "read":
name = "R"
elif usage == "write":
name = "W"
else:
name = "RW"
evs = cls(name)
for ev in node.findall('enumeratedValue'):
evs.values.append(EnumeratedValue.from_svd(svd, ev, register_size))
return evs
@classmethod
def empty(cls, name):
return cls(name)
def __eq__(self, other):
return (
self.name == other.name and
len(self.values) == len(other.values) and
all(v1 == v2 for v1, v2
in zip(sorted(self.values), sorted(other.values))))
class EnumeratedValuesLink(Node):
"""
Represents an EnumeratedValues enum which is included with 'use'.
"""
def __init__(self, field, evs):
self.field = field
self.evs = evs
def to_dict(self):
return {"field": self.field.name, "evs": self.evs.name}
def to_rust(self, field_width):
return f"pub use ::super::{self.field.name}::{self.evs.name};"
def __eq__(self, other):
return self.evs.__eq__(other)
@property
def name(self):
return self.evs.name
@property
def values(self):
return self.evs.values
class Field(Node):
"""
Represents a field in a register.
Has a name, description, width, offset, access, and three child
EnumeratedValues: R, W, and RW.
Belongs to a parent Register.
May contain one or more child EnumeratedValues.
register_size is the size of the register containing this field
"""
def __init__(self, name, desc, register_size, width, offset, access, r, w, rw):
self.name = name
self.desc = desc
self.width = width
self.offset = offset
self.access = access
self.r = r
self.w = w
self.rw = rw
self.register_size = register_size
if self.name[0] in "0123456789":
self.name = "_" + self.name
print("Name started with a number:", self.name)
def to_dict(self):
return {"name": self.name, "desc": self.desc, "width": self.width,
"offset": self.offset, "access": self.access,
"r": self.r.to_dict(), "w": self.w.to_dict(),
"rw": self.rw.to_dict()}
def to_rust(self):
mask = 2**self.width - 1
if self.width == 1:
mask = "1"
elif self.width < 6:
mask = f"0b{mask:b}"
else:
mask = f"0x{mask:x}"
bits = f"bit{'s' if self.width>1 else ''}"
ty = f"u{self.register_size}"
return f"""
/// {escape_desc(self.desc)}
pub mod {self.name} {{
/// Offset ({self.offset} bits)
pub const offset: {ty} = {self.offset};
/// Mask ({self.width} {bits}: {mask} << {self.offset})
pub const mask: {ty} = {mask} << offset;
{self.r.to_rust(self.width)}
{self.w.to_rust(self.width)}
{self.rw.to_rust(self.width)}
}}"""
@classmethod
def from_svd(cls, svd, node, ctx):
ctx = ctx.inherit(node)
name = get_string(node, 'name')
desc = get_string(node, 'description')
width = get_int(node, 'bitWidth')
offset = get_int(node, 'bitOffset')
access = ctx.access
# Round up register_size to a size that's representable as a Rust
# unsigned integer. We probably will never see a register that's
# not a multiple of 8, so this might be overkill.
register_size = (ctx.size + 7) & (~7)
if register_size != ctx.size:
print(f"Field {name} will be represented using u{register_size}s, "
f"although the register size is {ctx.size} bits")
r = EnumeratedValues.empty("R")
w = EnumeratedValues.empty("W")
rw = EnumeratedValues.empty("RW")
for evs in node.findall('enumeratedValues'):
if 'derivedFrom' in evs.attrib:
df = evs.attrib['derivedFrom']
evs = svd.find(f".//enumeratedValues[name='{df}']")
if evs is None:
raise ValueError(f"Can't find derivedFrom {df}")
evs = EnumeratedValues.from_svd(svd, evs, register_size)
evsname = evs.name
if evsname == "R":
r = evs
elif evsname == "W":
w = evs
else:
rw = evs
field = cls(name, desc, register_size, width, offset, access, r, w, rw)
return field
def __eq__(self, other):
return (
self.name == other.name and
self.width == other.width and
self.offset == other.offset and
self.access == other.access and
self.r == other.r and self.w == other.w and self.rw == other.rw)
def __lt__(self, other):
return (self.offset, self.name) < (other.offset, other.name)
class FieldLink(Node):
"""
A Field which outputs a `use` statement instead of a module.
"""
def __init__(self, parent, path):
self.parent = parent
self.path = path
self.r = parent.r
self.w = parent.w
self.rw = parent.rw
def to_dict(self):
return {"parent": self.parent.name, "path": self.path}
def to_rust(self):
return f"pub use {self.path}::{self.parent.name};"
def __lt__(self, other):
return self.parent.__lt__(other)
def __eq__(self, other):
return self.parent.__eq__(other)
@property
def name(self):
return self.parent.name
@property
def desc(self):
return self.parent.desc
@property
def width(self):
return self.parent.width
@property
def offset(self):
return self.parent.offset
@property
def access(self):
return self.parent.access
class RegisterCtx:
"""
The inheritance context for register properties.
Equivalent to an SVD `registerPropertiesGroup`.
"""
def __init__(self, size, access, reset_value, reset_mask):
self.size = size
self.access = access
self.reset_value = reset_value
self.reset_mask = reset_mask
@classmethod
def empty(cls):
"""Make an empty context."""
return cls(None, None, None, None)
def copy(self):
"""Return a copy of self."""
return RegisterCtx(self.size, self.access, self.reset_value,
self.reset_mask)
def update_from_node(self, node):
"""
Copies any specified properties from the given node into self,
leaving unspecified properties unchanged. Returns self for
easier chaining.
"""
size = get_int(node, 'size')
access = get_string(node, 'access')
reset_value = get_int(node, 'resetValue')
reset_mask = get_int(node, 'resetMask')
if size is not None:
self.size = size
if access is not None:
self.access = access
if reset_value is not None:
self.reset_value = reset_value
if reset_mask is not None:
self.reset_mask = reset_mask
return self
def inherit(self, node):
"""Return a copy of self which has been updated using `node`."""
return self.copy().update_from_node(node)
class Register(Node):
"""
Represents a register in a peripheral.
Has a name, description, offset, size, access, reset value and mask.
Belongs to a parent Peripheral.
May contain one or more child Fields.
"""
def __init__(self, name, desc, offset, size, access, reset_value,
reset_mask):
self.name = name
self.desc = desc
self.offset = offset
self.size = size
self.access = access
self.reset_value = reset_value
self.reset_mask = reset_mask
self.fields = []
def to_dict(self):
return {"name": self.name, "desc": self.desc, "offset": self.offset,
"size": self.size, "access": self.access,
"reset_value": self.reset_value, "reset_mask": self.reset_mask,
"fields": [x.to_dict() for x in self.fields]}
def to_rust_mod(self):
"""
Returns a Rust public module containing a public module for each
of this register's fields.
"""
fields = "\n".join(f.to_rust() for f in self.fields)
return f"""
/// {escape_desc(self.desc)}
pub mod {self.name} {{
{fields}
}}"""
def to_regtype(self):
"""
Return the type of register (RORegister, UnsafeWORegister, etc)
used for this Register.
"""
regtype = {"read-only": "RORegister", "write-only": "WORegister",
"read-write": "RWRegister"}[self.access]
for unsafe in UNSAFE_REGISTERS:
if fnmatch(self.name, unsafe):
regtype = "Unsafe" + regtype
break
return regtype
def to_rust_struct_entry(self):
"""Returns the RegisterBlock entry for this register."""
regtype = self.to_regtype()
return f"""
/// {escape_desc(self.desc)}
pub {self.name}: {regtype}<u{self.size}>,
"""
@classmethod
def from_svd(cls, svd, node, ctx):
ctx = ctx.inherit(node)
name = get_string(node, 'name')
desc = get_string(node, 'description')
offset = get_int(node, 'addressOffset')
register = cls(name, desc, offset, ctx.size, ctx.access,
ctx.reset_value, ctx.reset_mask)
fields = node.find('fields')
if fields is not None:
for field in fields.findall('field'):
register.fields.append(Field.from_svd(svd, field, ctx))
if register.access is None:
# This happens if access is defined per-field, typically because
# there is one or two read-write among many read-only registers.
field_accesses = [f.access for f in register.fields]
if all(access == "read-only" for access in field_accesses):
register.access = "read-only"
elif all(access == "write-only" for access in field_accesses):
register.access = "write-only"
else:
register.access = "read-write"
return register
def __eq__(self, other):
return (
self.name == other.name and
self.offset == other.offset and
self.size == other.size and
self.access == other.access and
sorted(self.fields) == sorted(other.fields)
)
def __lt__(self, other):
return (self.offset, self.name) < (other.offset, other.name)
def refactor_common_field_values(self):
"""
Go through all fields in this register and where two fields have the
same set of enumated values, replace the latter's with a link to the
former's.
"""
replace = []
to_replace = set()
fields = enumerate(self.fields)
for (idx1, f1), (idx2, f2) in itertools.combinations(fields, 2):
if f1 is f2 or idx1 in to_replace or idx2 in to_replace:
continue
if f1.r == f2.r and f1.r.values:
replace.append((idx1, idx2, 'r'))
to_replace.add(idx2)
if f1.w == f2.w and f1.w.values:
replace.append((idx1, idx2, 'w'))
to_replace.add(idx2)
if f1.rw == f2.rw and f1.rw.values:
replace.append((idx1, idx2, 'rw'))
to_replace.add(idx2)
for idx1, idx2, name in replace:
f1 = self.fields[idx1]
evs1 = getattr(f1, name)
f2 = EnumeratedValuesLink(f1, evs1)
setattr(self.fields[idx2], name, f2)
def consume(self, other, parent):
"""
Adds any fields from other to self, and adjusts self's name to the
common prefix of the two names, if such a prefix is at least
2 letters long.
"""
my_field_names = set(f.name for f in self.fields)
for field in other.fields:
if field.name not in my_field_names:
self.fields.append(field)
self.desc = "\n/// ".join([
f"{self.name} and {other.name}",
f"{self.name}: {escape_desc(self.desc)}",
f"{other.name}: {escape_desc(other.desc)}",
])
self.size = max(self.size, other.size)
self.access = "read-write"
newname = common_name(self.name, other.name, parent.name)
if newname != self.name[:len(newname)]:
print(f"Warning [{parent.name}]: {self.name}+{other.name} "
f"-> {newname}: suspected name compaction failure")
if newname != self.name:
if newname not in [r.name for r in parent.registers]:
self.name = newname
else:
print(f"Warning [{parent.name}]: {self.name} + {other.name} "
f"-> {newname}: name already exists, using {self.name}")
class PeripheralInstance(Node):
"""
Represents a specific peripheral instance in a device.
For example, GPIOA is a PeripheralInstance, while GPIO is a
PeripheralPrototype which would contain GPIOA. For many
peripherals there is a single PeripheralPrototype containing
a single PeripheralInstance.
Has a name and base address.
Belongs to a parent PeripheralPrototype.
"""
def __init__(self, name, addr, reset_values):
self.name = name
self.addr = addr
self.reset_values = reset_values
def to_dict(self):
return {"name": self.name, "addr": self.addr,
"reset_values": self.reset_values}
def to_rust(self, registers):
registers = {r.offset: r.name for r in registers}
resets = ", ".join(
f"{registers[k]}: 0x{v:08X}" for k, v in self.reset_values.items())
return f"""
/// Access functions for the {self.name} peripheral instance
pub mod {self.name} {{
use super::ResetValues;
#[cfg(not(feature="nosync"))]
use super::Instance;
#[cfg(not(feature="nosync"))]
const INSTANCE: Instance = Instance {{
addr: 0x{self.addr:08x},
_marker: ::core::marker::PhantomData,
}};
/// Reset values for each field in {self.name}
pub const reset: ResetValues = ResetValues {{
{resets}
}};
#[cfg(not(feature="nosync"))]
#[allow(renamed_and_removed_lints)]
#[allow(private_no_mangle_statics)]
#[no_mangle]
static mut {self.name}_TAKEN: bool = false;
/// Safe access to {self.name}
///
/// This function returns `Some(Instance)` if this instance is not
/// currently taken, and `None` if it is. This ensures that if you
/// do get `Some(Instance)`, you are ensured unique access to
/// the peripheral and there cannot be data races (unless other
/// code uses `unsafe`, of course). You can then pass the
/// `Instance` around to other functions as required. When you're
/// done with it, you can call `release(instance)` to return it.
///
/// `Instance` itself dereferences to a `RegisterBlock`, which
/// provides access to the peripheral's registers.
#[cfg(not(feature="nosync"))]
#[inline]
pub fn take() -> Option<Instance> {{
external_cortex_m::interrupt::free(|_| unsafe {{
if {self.name}_TAKEN {{
None
}} else {{
{self.name}_TAKEN = true;
Some(INSTANCE)
}}
}})
}}
/// Release exclusive access to {self.name}
///
/// This function allows you to return an `Instance` so that it
/// is available to `take()` again. This function will panic if
/// you return a different `Instance` or if this instance is not
/// already taken.
#[cfg(not(feature="nosync"))]
#[inline]
pub fn release(inst: Instance) {{
external_cortex_m::interrupt::free(|_| unsafe {{
if {self.name}_TAKEN && inst.addr == INSTANCE.addr {{
{self.name}_TAKEN = false;
}} else {{
panic!("Released a peripheral which was not taken");
}}
}});
}}
/// Unsafely steal {self.name}
///
/// This function is similar to take() but forcibly takes the
/// Instance, marking it as taken irregardless of its previous
/// state.
#[cfg(not(feature="nosync"))]
#[inline]
pub unsafe fn steal() -> Instance {{
{self.name}_TAKEN = true;
INSTANCE
}}
}}
/// Raw pointer to {self.name}
///
/// Dereferencing this is unsafe because you are not ensured unique
/// access to the peripheral, so you may encounter data races with
/// other users of this peripheral. It is up to you to ensure you
/// will not cause data races.
///
/// This constant is provided for ease of use in unsafe code: you can
/// simply call for example `write_reg!(gpio, GPIOA, ODR, 1);`.
pub const {self.name}: *const RegisterBlock =
0x{self.addr:08x} as *const _;"""
def __lt__(self, other):
return self.name < other.name
def __eq__(self, other):
return (self.name == other.name and
self.addr == other.addr and
self.reset_values == other.reset_values)
class PeripheralPrototype(Node):
"""
Represents a generic peripheral with registers.
Has a name and description.
Belongs to a parent Device.
Contains child PeripheralInstances and Registers.
Also contains a list of device names which contain this peripheral,
used to ensure shared peripherals are only compiled when the crate
is built for a device which uses them.
"""
def __init__(self, name, desc):
self.name = name.lower()
self.desc = desc
self.registers = []
self.instances = []
self.parent_device_names = []
def to_dict(self):
return {"name": self.name, "desc": self.desc,
"registers": [x.to_dict() for x in self.registers],
"instances": [x.to_dict() for x in self.instances]}
def to_rust_register_block(self):
"""Creates a RegisterBlock for this peripheral."""
lines = []
address = 0
reservedctr = 1
for register in sorted(self.registers):
if register.offset < address:
raise RuntimeError("Unexpected register aliasing")
if register.offset != address:
gaps = []
u32s = (register.offset - address) // 4
if u32s != 0:
gaps.append(f"[u32; {u32s}]")
address += u32s * 4
u16s = (register.offset - address) // 2
if u16s != 0:
gaps.append(f"[u16; {u16s}]")
address += u16s * 2
u8s = register.offset - address
if u8s != 0:
gaps.append(f"[u8; {u8s}]")
address += u8s
for gaptype in gaps:
lines.append(f"_reserved{reservedctr}: {gaptype},")
reservedctr += 1
lines.append(register.to_rust_struct_entry())
address += register.size // 8
lines = "\n".join(lines)
return f"""
#[repr(C)]
pub struct RegisterBlock {{
{lines}
}}"""
def to_rust_reset_values(self):
"""Creates a ResetValues struct for this peripheral."""
lines = []
for register in sorted(self.registers):
lines.append(f"pub {register.name}: u{register.size},")
lines = "\n".join(lines)
return f"""
pub struct ResetValues {{
{lines}
}}"""
def to_rust_instance(self):
"""Creates an Instance struct for this peripheral."""
return """
#[cfg(not(feature="nosync"))]
pub struct Instance {
pub(crate) addr: u32,
pub(crate) _marker: PhantomData<*const RegisterBlock>,
}
#[cfg(not(feature="nosync"))]
impl ::core::ops::Deref for Instance {
type Target = RegisterBlock;
#[inline(always)]
fn deref(&self) -> &RegisterBlock {
unsafe { &*(self.addr as *const _) }
}
}
#[cfg(feature="rtic")]
unsafe impl Send for Instance {}
"""
def to_rust_file(self, path):
"""
Creates {peripheral}.rs in path, and writes all register modules,
field modules, the register block, and any instances to that file.
Finally runs rustfmt over the new file.
"""
regtypes = set(r.to_regtype() for r in self.registers)
regtypes = ", ".join(regtypes)
if self.desc is None:
print(self.to_dict())
desc = "\n//! ".join(escape_desc(self.desc).split("\n"))
if len(self.parent_device_names) > 1:
desc += "\n//!\n"
desc += "//! Used by: " + ', '.join(
sorted(set(self.parent_device_names)))
preamble = "\n".join([
"#![allow(non_snake_case, non_upper_case_globals)]",
"#![allow(non_camel_case_types)]",
f"//! {desc}",
"",
"#[cfg(not(feature=\"nosync\"))]",
"use core::marker::PhantomData;",
f"use crate::{{{regtypes}}};",
"",
])
modules = "\n".join(r.to_rust_mod() for r in self.registers)
instances = "\n".join(i.to_rust(self.registers)
for i in sorted(self.instances))
fname = os.path.join(path, f"{self.name}.rs")
with open(fname, "w") as f:
f.write(preamble)
f.write(modules)
f.write(self.to_rust_register_block())
f.write(self.to_rust_reset_values())
f.write(self.to_rust_instance())
f.write(instances)
rustfmt(fname)
def to_parent_entry(self):
return f"pub mod {self.name};\n"
def to_struct_entry(self):
lines = []
for instance in self.instances:
lines.append(f"pub {instance.name}: {self.name}::Instance,")
return "\n".join(lines)
def to_struct_steal(self):
lines = []
for instance in self.instances:
lines.append(
f"{instance.name}: "
f"{self.name}::{instance.name}::steal(),")
return "\n".join(lines) + "\n"
@classmethod
def from_svd(cls, svd, node, register_ctx):
name = get_string(node, 'name')
addr = get_int(node, 'baseAddress')
desc = get_string(node, 'description')
registers = node.find('registers')
if 'derivedFrom' in node.attrib:
df = node.attrib['derivedFrom']
df_node = svd.find(f".//peripheral[name='{df}']")
if df_node is None:
raise ValueError("Can't find derivedFrom[{df}]")
desc = get_string(df_node, 'description', default=desc)
addr = get_int(node, 'baseAddress', addr)
registers = df_node.find('registers')
register_ctx = register_ctx.inherit(df_node)
register_ctx = register_ctx.inherit(node)
peripheral = cls(name, desc)
if registers is None:
raise ValueError(f"No registers found for peripheral {name}")
ctx = register_ctx
for register in registers.findall('register'):
# If register has a 'dim' field, expand to multiple registers
for dimr in expand_dim(register):
peripheral.registers.append(Register.from_svd(svd, dimr, ctx))
for cluster in registers.findall('cluster'):
for dimc in expand_dim(cluster):
for clusr in expand_cluster(dimc):
reg = Register.from_svd(svd, clusr, ctx)
peripheral.registers.append(reg)
resets = {r.offset: r.reset_value for r in peripheral.registers}
peripheral.instances.append(PeripheralInstance(name, addr, resets))
return peripheral
def consume(self, other, parent):
"""
Adds any PeripheralInstances from other to self, and adjusts self's
name to the common prefix of the two names, if such a prefix is
at least 3 letters long.
"""
self.instances += other.instances
newname = common_name(self.name, other.name, parent.name)
if newname != self.name:
if newname not in [p.name for p in parent.peripherals]:
self.name = newname
else:
print(f"Warning [{parent.name}]: {self.name} + {other.name} "
f"-> {newname}: name already exists, using {self.name}")
def refactor_common_register_fields(self):
"""
Go through all registers in this peripheral and where two registers
have the same set of fields, replace the latter's with links to the
former's.
"""
replace = []
to_replace = set()
registers = enumerate(self.registers)
for (idx1, r1), (idx2, r2) in itertools.combinations(registers, 2):
if r1 is r2 or idx1 in to_replace or idx2 in to_replace:
continue
if r1.fields == r2.fields and r1.fields:
replace.append((idx1, idx2))
to_replace.add(idx2)
for idx1, idx2 in replace:
r1 = self.registers[idx1]
r2 = self.registers[idx2]
path = f"super::{r1.name}"
r2.fields = [FieldLink(f, path) for f in r1.fields]
def refactor_aliased_registers(self):
"""
Go through all registers in this peripheral and where two registers
have the same offset (i.e., are aliased), merge the fields, replace
the name with the common prefix.
"""
to_delete = set()
registers = enumerate(self.registers)
for (idx1, r1), (idx2, r2) in itertools.combinations(registers, 2):
if r1 is r2 or idx1 in to_delete or idx2 in to_delete:
continue
if r1.offset == r2.offset:
r1.consume(r2, parent=self)
to_delete.add(idx2)
for idx in sorted(to_delete, reverse=True):
del self.registers[idx]
def __lt__(self, other):
return self.name < other.name
class PeripheralPrototypeLink(Node):
"""
Represents use of an externally defined RegisterBlock and registers,
with local instances.
"""
def __init__(self, name, prototype, path):
"""
`path`: the relative path to the prototype module,
so that `use {path}::RegisterBlock;` works from the
context of this module,
e.g., `super::tim1`, or `super::super::stm32f401::gpio`.
"""
self.name = name
self.prototype = prototype
self.path = path
self.instances = []
self.parent_device_names = []
def to_dict(self):
return {"prototype": self.prototype.name, "path": self.path,
"instances": [x.to_dict() for x in self.instances]}
def to_rust_file(self, path):
"""
Creates {peripheral}.rs in the path, writes `use` statements
for all register modules and the register block, and writes any
instances to that file.
Finally runs rustfmt over the new file.
"""
desc = "\n//! ".join(self.prototype.desc.split("\n"))
if len(self.parent_device_names) > 1:
desc += "\n//!\n"
desc += "//! Used by: " + ', '.join(
sorted(set(self.parent_device_names)))
preamble = "\n".join([
"#![allow(non_snake_case, non_upper_case_globals)]",
"#![allow(non_camel_case_types)]",
f"//! {desc}",
"",
f"pub use crate::{self.path}::{{RegisterBlock, ResetValues}};",
"#[cfg(not(feature = \"nosync\"))]",
f"pub use crate::{self.path}::{{Instance}};",
"",
])
registers = ", ".join(m.name for m in self.prototype.registers)
registers = f"pub use crate::{self.path}::{{{registers}}};\n"
instances = "\n".join(i.to_rust(self.registers)
for i in sorted(self.instances))
fname = os.path.join(path, f"{self.name}.rs")
with open(fname, "w") as f:
f.write(preamble)
f.write(registers)
f.write("\n")
f.write(instances)
rustfmt(fname)
def to_parent_entry(self):
return f"pub mod {self.name};\n"
def to_struct_entry(self, usename=None):
if usename is None:
usename = self.name
lines = []
for instance in self.instances:
lines.append(f"pub {instance.name}: {usename}::Instance,")
return "\n".join(lines)
def to_struct_steal(self, usename=None):
if usename is None:
usename = self.name
lines = []
for instance in self.instances:
lines.append(
f"{instance.name}: "
f"{usename}::{instance.name}::steal(),")
return "\n".join(lines)
@classmethod
def from_peripherals(cls, p1, p2, path):
plink = cls(p2.name, p1, path)
plink.instances = p2.instances
return plink
@property
def registers(self):
return self.prototype.registers
@property
def desc(self):
return self.prototype.desc
def refactor_common_register_fields(self):
pass
def refactor_common_instances(self):
pass
def refactor_aliased_registers(self):
pass
def __lt__(self, other):
return self.name < other.name
class PeripheralSharedInstanceLink(Node):
def __init__(self, name, usename, prototype):
self.name = name
self.usename = usename
self.prototype = prototype
def to_parent_entry(self):
if self.usename == self.name:
return f"pub use super::instances::{self.name};\n"
else:
return (f"pub use super::instances::{self.usename} "
f"as {self.name};\n")
def to_struct_entry(self):
return self.prototype.to_struct_entry(self.name)
def to_struct_steal(self):
return self.prototype.to_struct_steal(self.name)
def to_rust_file(self, path):
pass
@property
def registers(self):
return self.prototype.registers
@property
def desc(self):
return self.prototype.desc
def refactor_common_register_fields(self):
pass
def refactor_common_instances(self):
pass
def refactor_aliased_registers(self):
pass
def __lt__(self, other):
return self.name < other.name
class CPU(Node):
"""
Represents the CPU in a device.
Has a name and nvicPrioBits.
Belongs to a parent Device.
"""
def __init__(self, name, nvic_prio_bits):
self.name = name
self.nvic_prio_bits = nvic_prio_bits
def get_architecture(self):
if self.name == "CM0":
return "ARMv6-M"
elif self.name == "CM0+":
return "ARMv6-M"
elif self.name == "CM3":
return "ARMv7-M"
elif self.name == "CM4":
return "ARMv7E-M"
elif self.name == "CM7":
return "ARMv7E-M"
elif self.name == "CM33":
return "ARMv8-M"
def to_dict(self):
return {"name": self.name, "nvic_prio_bits": self.nvic_prio_bits}
@classmethod
def from_svd(cls, svd, node):
"""Load a CPU node from the CPU node of a parsed XML file."""
name = get_string(node, 'name')
nvic_prio_bits = node.find('nvicPrioBits').text
return cls(name, nvic_prio_bits)
class Interrupt(Node):
"""
Represents an interrupt in a device.
Has a name, description, and value (interrupt number).
Belongs to a parent Device.
"""
def __init__(self, name, desc, value):
self.name = name
self.desc = desc
self.value = value
def to_dict(self):
return {"name": self.name, "desc": self.desc, "value": self.value}
@classmethod
def from_svd(cls, svd, node):
name = get_string(node, 'name')
desc = get_string(node, 'description')
value = get_int(node, 'value')
return cls(name, desc, value)
def __lt__(self, other):
return self.value < other.value
class Device(Node):
"""
Represents a device corresponding to a single input SVD file.
Has a name.
Contains a child CPU, PeripheralPrototypes, and Interrupts.
"""
def __init__(self, name, cpu):
self.name = name.lower().replace("-", "_")
self.cpu = cpu
self.peripherals = []
self.interrupts = []
self.special = False
def to_dict(self):
return {"name": self.name, "cpu": self.cpu.to_dict(),
"peripherals": [x.to_dict() for x in self.peripherals],
"interrupts": [x.to_dict() for x in self.interrupts]}
def to_interrupt_file(self, familypath):
devicepath = os.path.join(familypath, self.name)
iname = os.path.join(devicepath, "interrupts.rs")
with open(iname, "w") as f:
f.write('#[cfg(feature="rt")]\nextern "C" {\n')
for interrupt in self.interrupts:
f.write(f' fn {interrupt.name}();\n')
f.write('}\n\n')
vectors = []
offset = 0
for interrupt in self.interrupts:
while interrupt.value != offset:
vectors.append("Vector { _reserved: 0 },")
offset += 1
vectors.append(f"Vector {{ _handler: {interrupt.name} }},")
offset += 1
nvectors = len(vectors)
vectors = "\n".join(vectors)
f.write(f"""\
#[doc(hidden)]
pub union Vector {{
_handler: unsafe extern "C" fn(),
_reserved: u32,
}}
#[cfg(feature="rt")]
#[doc(hidden)]
#[link_section=".vector_table.interrupts"]
#[no_mangle]
pub static __INTERRUPTS: [Vector; {nvectors}] = [
{vectors}
];
/// Available interrupts for this device
#[repr(u16)]
#[derive(Copy,Clone,Debug,PartialEq,Eq)]
#[allow(non_camel_case_types)]
pub enum Interrupt {{""")
for interrupt in self.interrupts:
f.write(f"/// {interrupt.value}: ")
f.write(f"{escape_desc(interrupt.desc)}\n")
f.write(f"{interrupt.name} = {interrupt.value},\n")
f.write("}\n")
f.write("""\
unsafe impl external_cortex_m::interrupt::InterruptNumber for Interrupt {
#[inline(always)]
fn number(self) -> u16 {
self as u16
}
}\n""")
rustfmt(iname)
def to_files(self, familypath):
devicepath = os.path.join(familypath, self.name)
os.makedirs(devicepath, exist_ok=True)
for peripheral in self.peripherals:
peripheral.to_rust_file(devicepath)
pnames = [p.name for p in self.peripherals]
dupnames = set(name for name in pnames if pnames.count(name) > 1)
if dupnames:
print(f"Warning [{self.name}]: duplicate peripherals: ", end='')
print(dupnames)
if not self.special:
self.to_interrupt_file(familypath)
mname = os.path.join(devicepath, "mod.rs")
with open(mname, "w") as f:
f.write(f"//! stm32ral module for {self.name}\n\n")
prio_bits = self.cpu.nvic_prio_bits
if not self.special:
f.write("/// Number of priority bits implemented by the NVIC")
f.write(f"\npub const NVIC_PRIO_BITS: u8 = {prio_bits};\n\n")
f.write("/// Interrupt-related magic for this device\n")
f.write("pub mod interrupts;\n")
f.write("pub use self::interrupts::Interrupt;\n")
f.write("pub use self::interrupts::Interrupt as interrupt;\n")
f.write("\n\n")
for peripheral in self.peripherals:
f.write(peripheral.to_parent_entry())
f.write("\n\n")
f.write("#[cfg(all(feature=\"rtic\", not(feature=\"nosync\")))]")
f.write("\n#[allow(non_snake_case)]\n")
f.write("pub struct Peripherals {\n")
for peripheral in self.peripherals:
f.write(" " + peripheral.to_struct_entry())
f.write("}\n\n")
f.write("#[cfg(all(feature=\"rtic\", feature=\"nosync\"))]\n")
f.write("#[allow(non_snake_case)]\n")
f.write("pub struct Peripherals {}\n\n")
f.write("#[cfg(all(feature=\"rtic\", not(feature=\"nosync\")))]")
f.write("\nimpl Peripherals {\n")
f.write(" pub unsafe fn steal() -> Self {\n")
f.write(" Peripherals {\n")
for peripheral in self.peripherals:
f.write(" " + peripheral.to_struct_steal())
f.write(" }\n }\n}\n\n")
f.write("#[cfg(all(feature=\"rtic\", feature=\"nosync\"))]\n")
f.write("impl Peripherals {\n pub fn steal() -> Self {\n")
f.write(" Peripherals {}\n }\n}")
rustfmt(mname)
if not self.special:
dname = os.path.join(devicepath, "device.x")
with open(dname, "w") as f:
for interrupt in self.interrupts:
f.write(f"PROVIDE({interrupt.name} = DefaultHandler);\n")
@classmethod
def from_svd(cls, svd, device_name):
"""Load a Device node and children from a parsed SVD XML file."""
name = device_name
try:
cpu = CPU.from_svd(svd, svd.find('cpu'))
except AttributeError as e:
print(device_name)
raise e
device = cls(name, cpu)
register_ctx = RegisterCtx.empty()
register_ctx = register_ctx.inherit(svd)
interrupt_nums = set()
for interrupt in svd.findall('.//interrupt'):
interrupt = Interrupt.from_svd(svd, interrupt)
if interrupt.value in interrupt_nums:
# Many SVDs have duplicated interrupts. Skip them.
continue
device.interrupts.append(interrupt)
interrupt_nums.add(interrupt.value)
device.interrupts.sort()
for peripheral in svd.findall('.//peripheral'):
device.peripherals.append(
PeripheralPrototype.from_svd(svd, peripheral, register_ctx))
for peripheral in device.peripherals:
peripheral.parent_device_names.append(device.name)
return device
@classmethod
def from_svdfile(cls, svdfile):
device_name = os.path.basename(svdfile).split('.')[0]
svd = ET.parse(svdfile)
return cls.from_svd(svd, device_name)
def refactor_peripheral_instances(self):
"""
Go through all peripherals and where two have the same RegisterBlock,
combine them into a single PeripheralPrototype with multiple
PeripheralInstances.
"""
to_delete = set()
to_link = set()
links = []
periphs = enumerate(self.peripherals)
for (idx1, p1), (idx2, p2) in itertools.combinations(periphs, 2):
if p1 is p2 or idx1 in to_delete or idx2 in to_delete:
continue
elif idx1 in to_link or idx2 in to_link:
continue
elif p1.registers == p2.registers:
if p1.name.startswith("tim"):
# Similar timers we have to special case, because they
# just do not group up well at all.
links.append((idx1, idx2))
to_link.add(idx2)
else:
# Other peripherals we just move instances together.
p1.consume(p2, parent=self)
to_delete.add(idx2)
for idx1, idx2 in links:
p1 = self.peripherals[idx1]
p2 = self.peripherals[idx2]
path = f"super::{p1.name}"
plink = PeripheralPrototypeLink.from_peripherals(p1, p2, path)
self.peripherals[idx2] = plink
for idx in sorted(to_delete, reverse=True):
del self.peripherals[idx]
class Family(Node):
"""
Represents a group of devices in a common family.
Peripheral prototypes (i.e. the register block and registers and fields)
which are used by more than one device in this family live in `peripherals`
and instances (i.e. specific memory addresses for GPIOA, GPIOB, ...)
which are used by more than one device live in `instances`.
"""
def __init__(self, name):
self.name = name
self.devices = []
self.peripherals = []
self.instances = []
def to_dict(self):
return {"name": self.name,
"devices": [d.to_dict() for d in self.devices],
"peripherals": [p.to_dict() for p in self.peripherals],
"instances": [i.to_dict()
for i in self.instances]}
def to_files(self, path, pool):
familypath = os.path.join(path, self.name)
os.makedirs(familypath, exist_ok=True)
periphpath = os.path.join(familypath, "peripherals")
instpath = os.path.join(familypath, "instances")
os.makedirs(periphpath, exist_ok=True)
os.makedirs(instpath, exist_ok=True)
pool_results = []
with open(os.path.join(familypath, "mod.rs"), "w") as f:
uname = self.name.upper()
f.write(f"//! Parent module for all {uname} devices.\n\n")
f.write("/// Peripherals shared by multiple devices\n")
f.write('pub mod peripherals;\n\n')
f.write("/// Peripheral instances shared by multiple devices\n")
f.write("pub(crate) mod instances;\n\n")
for device in self.devices:
dname = device.name
result = pool.apply_async(device.to_files, (familypath,))
pool_results.append(result)
f.write(f'#[cfg(any(feature="{dname}", feature="doc"))]\n')
f.write(f'pub mod {dname};\n\n')
with open(os.path.join(periphpath, "mod.rs"), "w") as f:
for peripheral in self.peripherals:
r = pool.apply_async(peripheral.to_rust_file, (periphpath,))
pool_results.append(r)
features = ", ".join(
f'feature="{d}"' for d in peripheral.parent_device_names)
f.write(f'#[cfg(any(feature="doc", {features}))]\n')
f.write(f'pub mod {peripheral.name};\n\n')
with open(os.path.join(instpath, "mod.rs"), "w") as f:
for instance in self.instances:
r = pool.apply_async(instance.to_rust_file, (instpath,))
pool_results.append(r)
features = ", ".join(
f'feature="{d}"' for d in instance.parent_device_names)
f.write(f'#[cfg(any(feature="doc", {features}))]\n')
f.write(f'pub mod {instance.name};\n\n')
return pool_results
def _enumerate_peripherals(self):
peripherals = []
for didx, device in enumerate(self.devices):
for pidx, peripheral in enumerate(device.peripherals):
peripherals.append((didx, pidx, peripheral))
return peripherals
def _match_peripherals(self):
"""Gather all pairs of matching peripherals in this family"""
to_link = set()
links = dict()
peripherals = self._enumerate_peripherals()
for pt1, pt2 in itertools.combinations(peripherals, 2):
didx1, pidx1, p1 = pt1
didx2, pidx2, p2 = pt2
idx1 = (didx1, pidx1)
idx2 = (didx2, pidx2)
if p1 is p2 or idx1 in to_link or idx2 in to_link:
continue
elif p1.registers == p2.registers:
to_link.add(idx2)
if idx1 not in links:
links[idx1] = []
links[idx1].append(idx2)
return links
def refactor_common_peripherals(self):
"""
Find peripherals shared between devices which are identical and
refactor them into the family-level shared peripherals.
"""
# Find all pairs of matching peripherals in the family
links = self._match_peripherals()
# Determine which peripherals need versioned names
# (any with multiple peripherals that share the same name).
pnames = set()
dupnames = set()
for idx in links:
didx, pidx = idx
p = self.devices[didx].peripherals[pidx]
if p.name in pnames:
dupnames.add(p.name)
pnames.add(p.name)
# Now create new crate-level peripherals and replace the old ones
# with links to the new ones
versions = {}
for idx in links:
# Get the primary member of the link group
didx, pidx = idx
device = self.devices[didx]
p = device.peripherals[pidx]
# Modify the name to gpio_v1, gpio_v2, etc
name = p.name
if name in dupnames:
if name not in versions:
versions[name] = 0
versions[name] += 1
name = f'{name}_v{versions[name]}'
# Make a new PeripheralPrototype for the family, with no instances
familyp = PeripheralPrototype(name, p.desc)
familyp.registers = p.registers
familyp.parent_device_names.append(device.name)
self.peripherals.append(familyp)
# Make a link for the primary member
path = f"{self.name}::peripherals::{name}"
linkp = PeripheralPrototypeLink(p.name, familyp, path)
linkp.instances = p.instances
self.devices[didx].peripherals[pidx] = linkp
# Make a link for each other member
for childidx in links[idx]:
cdidx, cpidx = childidx
childd = self.devices[cdidx]
childp = childd.peripherals[cpidx]
familyp.parent_device_names.append(childd.name)
linkp = PeripheralPrototypeLink(childp.name, familyp, path)
linkp.instances = childp.instances
childd.peripherals[cpidx] = linkp
self.refactor_common_instances(links)
def refactor_common_instances(self, links):
to_group = set()
groups = dict()
for primary, children in links.items():
members = [primary] + list(children)
for l1, l2 in itertools.combinations(members, 2):
didx1, pidx1 = l1
didx2, pidx2 = l2
p1 = self.devices[didx1].peripherals[pidx1]
p2 = self.devices[didx2].peripherals[pidx2]
if p1 is p2 or l1 in to_group or l2 in to_group:
continue
elif p1.instances == p2.instances:
to_group.add(l2)
if l1 not in groups:
groups[l1] = []
groups[l1].append(l2)
pnames = set()
dupnames = set()
for (didx, pidx) in groups:
p = self.devices[didx].peripherals[pidx]
if p.name in pnames:
dupnames.add(p.name)
pnames.add(p.name)
for idx in groups:
didx, pidx = idx
d = self.devices[didx]
p = d.peripherals[pidx]
name = p.name
if name in dupnames:
name += "_" + d.name[5:]
for cidx in groups[idx]:
cdidx, _ = cidx
cd = self.devices[cdidx]
name += "_" + cd.name[5:]
linkp = PeripheralSharedInstanceLink(p.name, name, p)
self.devices[didx].peripherals[pidx] = linkp
groupp = p
groupp.name = name
groupp.parent_device_names.append(d.name)
self.instances.append(groupp)
for cidx in groups[idx]:
cdidx, cpidx = cidx
cd = self.devices[cdidx]
groupp.parent_device_names.append(cd.name)
cd.peripherals[cpidx] = linkp
class Crate:
"""
Represents the overall crate of devices and shared peripherals.
Contains one or more child Families and shared Peripherals.
"""
def __init__(self):
self.families = []
self.peripherals = []
def to_dict(self):
return {"families": [x.to_dict() for x in self.families],
"peripherals": [x.to_dict() for x in self.peripherals]}
def write_build_script(self, path):
"""
Generates build.rs which copies the relevant device.x into the build
path for the selected device.
"""
devices = []
for family in self.families:
for device in family.devices:
if not device.special:
devices.append((family.name, device.name))
clauses = " else ".join("""\
if env::var_os("CARGO_FEATURE_{}").is_some() {{
"src/{}/{}/device.x"
}}""".format(d.upper(), f, d) for (f, d) in sorted(devices))
clauses += " else { panic!(\"No device features selected\"); }"
fname = os.path.join(path, "build.rs")
with open(fname, "w") as f:
f.write(BUILD_RS_TEMPLATE.format(device_clauses=clauses))
rustfmt(fname)
def to_files(self, path, pool):
"""
Writes src/lib.rs, Cargo.toml, src/mod.rs, build.rs, writes out all
child peripherals, and triggers all child families to write their own
files out.
"""
srcpath = os.path.join(path, 'src')
if not os.path.isdir(srcpath):
raise ValueError(f"{srcpath} does not exist")
periphpath = os.path.join(srcpath, "peripherals")
os.makedirs(periphpath, exist_ok=True)
lib_f = open(os.path.join(srcpath, "lib.rs"), "w")
lib_f.write(CRATE_LIB_PREAMBLE)
cargo_f = open(os.path.join(path, "Cargo.toml"), "w")
cargo_f.write(CRATE_CARGO_TOML_PREAMBLE)
self.write_build_script(path)
periph_f = open(os.path.join(periphpath, "mod.rs"), "w")
pool_results = []
for family in sorted(self.families, key=lambda x: x.name):
fname = family.name
pool_results += family.to_files(srcpath, pool)
features = [f'feature="{d.name}"' for d in family.devices]
lib_f.write(f'#[cfg(any(feature="doc", {", ".join(features)}))]\n')
lib_f.write(f'pub mod {fname};\n\n')
for device in family.devices:
dname = device.name
arch = device.cpu.get_architecture().lower().replace("-", "")
if device.special:
cargo_f.write(f'{dname} = []\n')
else:
cargo_f.write(f'{dname} = ["{arch}"]\n')
lib_f.write(f'#[cfg(feature="{dname}")]\n')
lib_f.write(f'pub use {fname}::{dname}::*;\n\n')
if self.peripherals:
lib_f.write("//! Peripherals shared between multiple families\n")
lib_f.write("pub mod peripherals;\n\n")
for peripheral in self.peripherals:
result = pool.apply_async(peripheral.to_rust_file, (periphpath,))
pool_results.append(result)
features = ", ".join(
f'feature="{d}"' for d in peripheral.parent_device_names)
periph_f.write(f'#[cfg(any(feature="doc", {features}))]\n')
periph_f.write(f'pub mod {peripheral.name};\n\n')
return pool_results
def get_int(node, tag, default=None):
"""Parses and returns an integer from the specified child tag in node."""
text = get_string(node, tag, default=default)
if text == default:
return text
text = text.lower().strip()
if text == "true":
return 1
elif text == "false":
return 0
elif text[:2] == "0x":
return int(text[2:], 16)
elif text[:2] == "0b":
return int(text[2:], 2)
else:
# Annoyingly sometimes constants are base-10 with leading zeros,
# so int(text, 0) for autodetection does not work.
return int(text, 10)
def get_string(node, tag, default=None):
"""Finds and returns a string from the specified child tag in node."""
text = node.findtext(tag, default=default)
if text == default:
return text
return " ".join(text.split())
def expand_dim(node):
"""
Returns an expanded list of nodes per the dimElementGroup, or just
a list containing node if no dimElementGroup specified.
"""
dim = get_int(node, 'dim')
if dim is None:
return [node]
inc = get_int(node, 'dimIncrement')
idxs = get_string(node, 'dimIndex')
if idxs is None:
idxs = list(range(dim))
else:
if "," in idxs:
idxs = idxs.split(",")
elif "-" in idxs:
li, ri = idxs.split("-")
idxs = list(range(int(li), int(ri)+1))
else:
raise ValueError("Unknown dimIndex: '{}'".format(idxs))
nodes = []
for cnt, idx in enumerate(idxs):
name = get_string(node, 'name').replace("%s", str(idx))
dim_node = copy.deepcopy(node)
dim_node.find('name').text = name
addr = get_int(dim_node, 'addressOffset') + cnt * inc
dim_node.find('addressOffset').text = "0x{:08x}".format(addr)
dim_node.attrib['dim_index'] = idx
nodes.append(dim_node)
return nodes
def expand_cluster(node):
if node.attrib.get('dim_index') is None:
raise ValueError("Can't process a cluster without dim_index set")
cluster_idx = node.attrib['dim_index']
cluster_addr = get_int(node, 'addressOffset')
nodes = []
for register in node.findall('register'):
addr = cluster_addr + get_int(register, 'addressOffset')
name = get_string(register, 'name') + str(cluster_idx)
clusr = copy.deepcopy(register)
clusr.find('addressOffset').text = "0x{:08x}".format(addr)
clusr.find('name').text = name
nodes.append(clusr)
return nodes
def escape_desc(desc):
"""Escape `desc` suitable for a doc comment."""
if desc is None:
return ""
else:
return desc.replace("[", "\\[").replace("]", "\\]")
def rustfmt(fname):
"""Runs rustfmt over the given filename."""
subprocess.run(["rustfmt", fname])
def common_name(a, b, ctx=""):
"""
Returns the best common name between `a` and `b`.
Ideally finds a single character different between `a` and `b` which
is then removed, e.g., GPIOA + GPIOB = GPIO, TIM1 + TIM2 = TIM,
I2S2EXT + I2S3EXT = I2SEXT. `a` may have already been merged into
such a form, so GPIO + GPIOB = GPIO, etc, as well.
If that does not succeed, the next best guess is a common prefix,
e.g., CCMR1_Input + CCMR1_Output = CCMR1.
Failing a common prefix, a warning is printed and `a` is returned.
Special cases:
* `spi?` and `i2s?ext` will produce `spi`
* `usart?` and `uart?` will produce `usart`
* `adc1_2` and `adc3_4` wil produce `adc_common`
* `adc3_common` and `adc12_common` will produce `adc_common`
* `delay_block_*` and `delay_block_*` will produce `dlyb`
`ctx` is optional and printed in any warnings emitted.
>>> common_name("gpioa", "gpiob")
'gpio'
>>> common_name("gpio", "gpiob")
'gpio'
>>> common_name("ccmr1_input", "ccmr1_output")
'ccmr1'
>>> common_name("i2s2ext", "i2s3ext")
'i2sext'
>>> common_name("i2sext", "i2s3ext")
'i2sext'
>>> common_name("spi2", "i2s2ext")
'spi'
>>> common_name("i2sext", "spi3")
'spi'
>>> common_name("usart3", "uart4")
'usart'
>>> common_name("usart", "uart7")
'usart'
>>> common_name("adc1_2", "adc3_4")
'adc_common'
>>> common_name("adc3_common", "adc12_common")
'adc_common'
>>> common_name("delay_block_quadspi", "delay_block_sdmmc1")
'dlyb'
"""
# Find position of all differences up to the end of the shortest name
diffpos = [i for i in range(min(len(a), len(b))) if a[i] != b[i]]
# Special cases
for x, y in ((a, b), (b, a)):
if x.startswith("i2s") and x.endswith("ext") and y.startswith("spi"):
return "spi"
if x.startswith("usart") and y.startswith("uart"):
return "usart"
if x == "adc1_2" and y == "adc3_4":
return "adc_common"
if x == "adc12_common" and y == "adc3_common":
return "adc_common"
if x.startswith("delay_block_") and y.startswith("delay_block_"):
return "dlyb"
if x == "dlyb" and y.startswith("delay_block_"):
return "dlyb"
if len(diffpos) == 0:
# Either names are the same or one name is the prefix of the other
if a == b:
print(f"Warning [{ctx}]: {a} and {b} are identical")
return a
elif b[:len(a)] == a:
return a
elif a[:len(b)] == b:
return b
elif len(diffpos) == 1:
# Names have a single character difference which we can try removing
p = diffpos[0]
an = a[:p] + a[p+1:]
bn = b[:p] + b[p+1:]
if an == bn:
return an
else:
print(f"Warning [{ctx}]: {a}->{an} and {b}->{bn} failed")
return a
else:
p = diffpos[0]
# First check if removing the first difference makes the names align,
# as will be the case for e.g. I2SEXT + I2S3EXT.
if a == b[:p] + b[p+1:]:
return a
# Names might at least have a common prefix
ap = a[:p]
bp = b[:p]
# Strip trailing underscore from suffix
if ap.endswith("_"):
ap = ap[:-1]
bp = bp[:-1]
if len(ap) > 0 and ap == bp:
return ap
else:
print(f"Warning [{ctx}]: {a}->{ap} and {b}->{bp} failed")
return a
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("cratepath", help="Path to crate root")
parser.add_argument("svdfiles", nargs="+", help="SVD files to parse")
return parser.parse_args()
def main():
args = parse_args()
crate = Crate()
print("Parsing input files...")
with multiprocessing.Pool() as p:
devices = p.map(Device.from_svdfile, args.svdfiles)
print("Collating families...")
cortex_family = Family("cortex_m")
crate.families.append(cortex_family)
for device in devices:
# Special case the ARMv*-M SVDs
if device.name.startswith("armv"):
device.special = True
cortex_family.devices.append(device)
else:
device_family = device.name[:7].lower()
if device_family not in [f.name for f in crate.families]:
crate.families.append(Family(device_family))
family = [f for f in crate.families if f.name == device_family][0]
if device.name in [d.name for d in family.devices]:
print(f"Warning: {device.name} already exists in {family},"
" skipping.")
continue
family.devices.append(device)
print("Running refactors...")
for device in devices:
device.refactor_peripheral_instances()
for peripheral in device.peripherals:
peripheral.refactor_aliased_registers()
peripheral.refactor_common_register_fields()
for register in peripheral.registers:
register.refactor_common_field_values()
for family in crate.families:
family.refactor_common_peripherals()
print("Outputting crate...")
pool_results = []
with multiprocessing.Pool() as pool:
pool_results += crate.to_files(args.cratepath, pool)
for result in pool_results:
result.get()
if __name__ == "__main__":
main()
| 35.5 | 93 | 0.560435 |
import os
import copy
import argparse
import itertools
import subprocess
import multiprocessing
import xml.etree.ElementTree as ET
from fnmatch import fnmatch
CRATE_LIB_PREAMBLE = """\
// Copyright 2018 Adam Greig
// See LICENSE-APACHE and LICENSE-MIT for license details.
//! This project provides a register access layer (RAL) for all
//! STM32 microcontrollers.
//!
//! When built, you must specify a device feature, such as `stm32f405`.
//! This will cause all modules in that device's module to be re-exported
//! from the top level, so that for example `stm32ral::gpio` will resolve to
//! `stm32ral::stm32f4::stm32f405::gpio`.
//!
//! In the generated documentation, all devices are visible inside their family
//! modules, but when built for a specific device, only that devices' constants
//! will be available.
//!
//! See the
//! [README](https://github.com/adamgreig/stm32ral/blob/master/README.md)
//! for example usage.
#![no_std]
#[cfg(feature="rt")]
extern crate cortex_m_rt;
mod register;
#[cfg(feature="rt")]
pub use cortex_m_rt::interrupt;
pub use crate::register::{RORegister, UnsafeRORegister};
pub use crate::register::{WORegister, UnsafeWORegister};
pub use crate::register::{RWRegister, UnsafeRWRegister};
"""
CRATE_CARGO_TOML_PREAMBLE = """\
# Generated by stm32ral.py. Do not edit manually.
[package]
name = "stm32ral"
authors = ["Adam Greig <adam@adamgreig.com>"]
description = "Register access layer for all STM32 microcontrollers"
repository = "https://github.com/adamgreig/stm32ral"
documentation = "https://docs.rs/stm32ral"
readme = "README.md"
keywords = ["stm32", "embedded", "no_std"]
categories = ["embedded", "no-std"]
license = "MIT/Apache-2.0"
edition = "2018"
exclude = ["/stm32-rs"]
# Change version in stm32ral.py, not in Cargo.toml!
version = "0.7.0"
[package.metadata.docs.rs]
features = ["doc"]
no-default-features = true
targets = []
[dependencies]
# Change dependency versions in stm32ral.py, not here!
external_cortex_m = { package = "cortex-m", version = "0.7.3" }
cortex-m-rt = { version = ">=0.6.15,<0.8", optional = true }
[features]
default = ["rt"]
rt = ["cortex-m-rt/device"]
inline-asm = ["external_cortex_m/inline-asm"]
rtfm = ["rtic"]
rtic = []
nosync = []
doc = []
"""
BUILD_RS_TEMPLATE = """\
use std::env;
use std::fs;
use std::path::PathBuf;
fn main() {{
if env::var_os("CARGO_FEATURE_RT").is_some() {{
let out = &PathBuf::from(env::var_os("OUT_DIR").unwrap());
println!("cargo:rustc-link-search={{}}", out.display());
let device_file = {device_clauses};
fs::copy(device_file, out.join("device.x")).unwrap();
println!("cargo:rerun-if-changed={{}}", device_file);
}}
println!("cargo:rerun-if-changed=build.rs");
}}
"""
UNSAFE_REGISTERS = [
"S?PAR", "S?M?AR", "CPAR?", "CMAR?",
"FGMAR", "BGMAR", "FGCMAR", "BGCMAR", "OMAR",
"L?CFBAR",
"DIEPDMA*", "DOEPDMA*", "HCDMA*",
"DMARDLAR", "DMATDLAR",
"ICIALLU", "?C?MVA?", "DC?SW", "DCCIMVAC", "DCCISW", "BPIALL",
]
class Node:
pass
class EnumeratedValue(Node):
def __init__(self, name, desc, value, register_size):
self.name = name
self.desc = desc
self.value = value
self.register_size = register_size
if self.name[0] in "0123456789":
self.name = "_" + self.name
print("Name started with a number:", self.name)
def to_dict(self):
return {"name": self.name, "desc": self.desc, "value": self.value}
def to_rust(self, field_width):
return f"""
/// 0b{self.value:0{field_width}b}: {escape_desc(self.desc)}
pub const {self.name}: u{self.register_size} = 0b{self.value:0{field_width}b};"""
@classmethod
def from_svd(cls, svd, node, register_size):
name = get_string(node, 'name')
desc = get_string(node, 'description')
value = get_int(node, 'value')
return cls(name, desc, value, register_size)
def __eq__(self, other):
return (
self.name == other.name and
self.value == other.value and
self.desc == other.desc)
def __lt__(self, other):
return self.value < other.value
class EnumeratedValues(Node):
def __init__(self, name):
self.name = name
self.values = []
def to_dict(self):
return {"name": self.name,
"values": [v.to_dict() for v in self.values]}
def to_rust(self, field_width):
values = "\n".join(v.to_rust(field_width) for v in self.values)
if self.name == "R":
desc = "Read-only values"
elif self.name == "W":
desc = "Write-only values"
else:
desc = "Read-write values"
if not values:
desc += " (empty)"
return f"""\
/// {desc}
pub mod {self.name} {{
{values}
}}"""
@classmethod
def from_svd(cls, svd, node, register_size):
usage = get_string(node, 'usage')
if usage == "read":
name = "R"
elif usage == "write":
name = "W"
else:
name = "RW"
evs = cls(name)
for ev in node.findall('enumeratedValue'):
evs.values.append(EnumeratedValue.from_svd(svd, ev, register_size))
return evs
@classmethod
def empty(cls, name):
return cls(name)
def __eq__(self, other):
return (
self.name == other.name and
len(self.values) == len(other.values) and
all(v1 == v2 for v1, v2
in zip(sorted(self.values), sorted(other.values))))
class EnumeratedValuesLink(Node):
def __init__(self, field, evs):
self.field = field
self.evs = evs
def to_dict(self):
return {"field": self.field.name, "evs": self.evs.name}
def to_rust(self, field_width):
return f"pub use ::super::{self.field.name}::{self.evs.name};"
def __eq__(self, other):
return self.evs.__eq__(other)
@property
def name(self):
return self.evs.name
@property
def values(self):
return self.evs.values
class Field(Node):
def __init__(self, name, desc, register_size, width, offset, access, r, w, rw):
self.name = name
self.desc = desc
self.width = width
self.offset = offset
self.access = access
self.r = r
self.w = w
self.rw = rw
self.register_size = register_size
if self.name[0] in "0123456789":
self.name = "_" + self.name
print("Name started with a number:", self.name)
def to_dict(self):
return {"name": self.name, "desc": self.desc, "width": self.width,
"offset": self.offset, "access": self.access,
"r": self.r.to_dict(), "w": self.w.to_dict(),
"rw": self.rw.to_dict()}
def to_rust(self):
mask = 2**self.width - 1
if self.width == 1:
mask = "1"
elif self.width < 6:
mask = f"0b{mask:b}"
else:
mask = f"0x{mask:x}"
bits = f"bit{'s' if self.width>1 else ''}"
ty = f"u{self.register_size}"
return f"""
/// {escape_desc(self.desc)}
pub mod {self.name} {{
/// Offset ({self.offset} bits)
pub const offset: {ty} = {self.offset};
/// Mask ({self.width} {bits}: {mask} << {self.offset})
pub const mask: {ty} = {mask} << offset;
{self.r.to_rust(self.width)}
{self.w.to_rust(self.width)}
{self.rw.to_rust(self.width)}
}}"""
@classmethod
def from_svd(cls, svd, node, ctx):
ctx = ctx.inherit(node)
name = get_string(node, 'name')
desc = get_string(node, 'description')
width = get_int(node, 'bitWidth')
offset = get_int(node, 'bitOffset')
access = ctx.access
# unsigned integer. We probably will never see a register that's
register_size = (ctx.size + 7) & (~7)
if register_size != ctx.size:
print(f"Field {name} will be represented using u{register_size}s, "
f"although the register size is {ctx.size} bits")
r = EnumeratedValues.empty("R")
w = EnumeratedValues.empty("W")
rw = EnumeratedValues.empty("RW")
for evs in node.findall('enumeratedValues'):
if 'derivedFrom' in evs.attrib:
df = evs.attrib['derivedFrom']
evs = svd.find(f".//enumeratedValues[name='{df}']")
if evs is None:
raise ValueError(f"Can't find derivedFrom {df}")
evs = EnumeratedValues.from_svd(svd, evs, register_size)
evsname = evs.name
if evsname == "R":
r = evs
elif evsname == "W":
w = evs
else:
rw = evs
field = cls(name, desc, register_size, width, offset, access, r, w, rw)
return field
def __eq__(self, other):
return (
self.name == other.name and
self.width == other.width and
self.offset == other.offset and
self.access == other.access and
self.r == other.r and self.w == other.w and self.rw == other.rw)
def __lt__(self, other):
return (self.offset, self.name) < (other.offset, other.name)
class FieldLink(Node):
def __init__(self, parent, path):
self.parent = parent
self.path = path
self.r = parent.r
self.w = parent.w
self.rw = parent.rw
def to_dict(self):
return {"parent": self.parent.name, "path": self.path}
def to_rust(self):
return f"pub use {self.path}::{self.parent.name};"
def __lt__(self, other):
return self.parent.__lt__(other)
def __eq__(self, other):
return self.parent.__eq__(other)
@property
def name(self):
return self.parent.name
@property
def desc(self):
return self.parent.desc
@property
def width(self):
return self.parent.width
@property
def offset(self):
return self.parent.offset
@property
def access(self):
return self.parent.access
class RegisterCtx:
def __init__(self, size, access, reset_value, reset_mask):
self.size = size
self.access = access
self.reset_value = reset_value
self.reset_mask = reset_mask
@classmethod
def empty(cls):
return cls(None, None, None, None)
def copy(self):
return RegisterCtx(self.size, self.access, self.reset_value,
self.reset_mask)
def update_from_node(self, node):
size = get_int(node, 'size')
access = get_string(node, 'access')
reset_value = get_int(node, 'resetValue')
reset_mask = get_int(node, 'resetMask')
if size is not None:
self.size = size
if access is not None:
self.access = access
if reset_value is not None:
self.reset_value = reset_value
if reset_mask is not None:
self.reset_mask = reset_mask
return self
def inherit(self, node):
return self.copy().update_from_node(node)
class Register(Node):
def __init__(self, name, desc, offset, size, access, reset_value,
reset_mask):
self.name = name
self.desc = desc
self.offset = offset
self.size = size
self.access = access
self.reset_value = reset_value
self.reset_mask = reset_mask
self.fields = []
def to_dict(self):
return {"name": self.name, "desc": self.desc, "offset": self.offset,
"size": self.size, "access": self.access,
"reset_value": self.reset_value, "reset_mask": self.reset_mask,
"fields": [x.to_dict() for x in self.fields]}
def to_rust_mod(self):
fields = "\n".join(f.to_rust() for f in self.fields)
return f"""
/// {escape_desc(self.desc)}
pub mod {self.name} {{
{fields}
}}"""
def to_regtype(self):
regtype = {"read-only": "RORegister", "write-only": "WORegister",
"read-write": "RWRegister"}[self.access]
for unsafe in UNSAFE_REGISTERS:
if fnmatch(self.name, unsafe):
regtype = "Unsafe" + regtype
break
return regtype
def to_rust_struct_entry(self):
regtype = self.to_regtype()
return f"""
/// {escape_desc(self.desc)}
pub {self.name}: {regtype}<u{self.size}>,
"""
@classmethod
def from_svd(cls, svd, node, ctx):
ctx = ctx.inherit(node)
name = get_string(node, 'name')
desc = get_string(node, 'description')
offset = get_int(node, 'addressOffset')
register = cls(name, desc, offset, ctx.size, ctx.access,
ctx.reset_value, ctx.reset_mask)
fields = node.find('fields')
if fields is not None:
for field in fields.findall('field'):
register.fields.append(Field.from_svd(svd, field, ctx))
if register.access is None:
# This happens if access is defined per-field, typically because
# there is one or two read-write among many read-only registers.
field_accesses = [f.access for f in register.fields]
if all(access == "read-only" for access in field_accesses):
register.access = "read-only"
elif all(access == "write-only" for access in field_accesses):
register.access = "write-only"
else:
register.access = "read-write"
return register
def __eq__(self, other):
return (
self.name == other.name and
self.offset == other.offset and
self.size == other.size and
self.access == other.access and
sorted(self.fields) == sorted(other.fields)
)
def __lt__(self, other):
return (self.offset, self.name) < (other.offset, other.name)
def refactor_common_field_values(self):
replace = []
to_replace = set()
fields = enumerate(self.fields)
for (idx1, f1), (idx2, f2) in itertools.combinations(fields, 2):
if f1 is f2 or idx1 in to_replace or idx2 in to_replace:
continue
if f1.r == f2.r and f1.r.values:
replace.append((idx1, idx2, 'r'))
to_replace.add(idx2)
if f1.w == f2.w and f1.w.values:
replace.append((idx1, idx2, 'w'))
to_replace.add(idx2)
if f1.rw == f2.rw and f1.rw.values:
replace.append((idx1, idx2, 'rw'))
to_replace.add(idx2)
for idx1, idx2, name in replace:
f1 = self.fields[idx1]
evs1 = getattr(f1, name)
f2 = EnumeratedValuesLink(f1, evs1)
setattr(self.fields[idx2], name, f2)
def consume(self, other, parent):
my_field_names = set(f.name for f in self.fields)
for field in other.fields:
if field.name not in my_field_names:
self.fields.append(field)
self.desc = "\n/// ".join([
f"{self.name} and {other.name}",
f"{self.name}: {escape_desc(self.desc)}",
f"{other.name}: {escape_desc(other.desc)}",
])
self.size = max(self.size, other.size)
self.access = "read-write"
newname = common_name(self.name, other.name, parent.name)
if newname != self.name[:len(newname)]:
print(f"Warning [{parent.name}]: {self.name}+{other.name} "
f"-> {newname}: suspected name compaction failure")
if newname != self.name:
if newname not in [r.name for r in parent.registers]:
self.name = newname
else:
print(f"Warning [{parent.name}]: {self.name} + {other.name} "
f"-> {newname}: name already exists, using {self.name}")
class PeripheralInstance(Node):
def __init__(self, name, addr, reset_values):
self.name = name
self.addr = addr
self.reset_values = reset_values
def to_dict(self):
return {"name": self.name, "addr": self.addr,
"reset_values": self.reset_values}
def to_rust(self, registers):
registers = {r.offset: r.name for r in registers}
resets = ", ".join(
f"{registers[k]}: 0x{v:08X}" for k, v in self.reset_values.items())
return f"""
/// Access functions for the {self.name} peripheral instance
pub mod {self.name} {{
use super::ResetValues;
#[cfg(not(feature="nosync"))]
use super::Instance;
#[cfg(not(feature="nosync"))]
const INSTANCE: Instance = Instance {{
addr: 0x{self.addr:08x},
_marker: ::core::marker::PhantomData,
}};
/// Reset values for each field in {self.name}
pub const reset: ResetValues = ResetValues {{
{resets}
}};
#[cfg(not(feature="nosync"))]
#[allow(renamed_and_removed_lints)]
#[allow(private_no_mangle_statics)]
#[no_mangle]
static mut {self.name}_TAKEN: bool = false;
/// Safe access to {self.name}
///
/// This function returns `Some(Instance)` if this instance is not
/// currently taken, and `None` if it is. This ensures that if you
/// do get `Some(Instance)`, you are ensured unique access to
/// the peripheral and there cannot be data races (unless other
/// code uses `unsafe`, of course). You can then pass the
/// `Instance` around to other functions as required. When you're
/// done with it, you can call `release(instance)` to return it.
///
/// `Instance` itself dereferences to a `RegisterBlock`, which
/// provides access to the peripheral's registers.
#[cfg(not(feature="nosync"))]
#[inline]
pub fn take() -> Option<Instance> {{
external_cortex_m::interrupt::free(|_| unsafe {{
if {self.name}_TAKEN {{
None
}} else {{
{self.name}_TAKEN = true;
Some(INSTANCE)
}}
}})
}}
/// Release exclusive access to {self.name}
///
/// This function allows you to return an `Instance` so that it
/// is available to `take()` again. This function will panic if
/// you return a different `Instance` or if this instance is not
/// already taken.
#[cfg(not(feature="nosync"))]
#[inline]
pub fn release(inst: Instance) {{
external_cortex_m::interrupt::free(|_| unsafe {{
if {self.name}_TAKEN && inst.addr == INSTANCE.addr {{
{self.name}_TAKEN = false;
}} else {{
panic!("Released a peripheral which was not taken");
}}
}});
}}
/// Unsafely steal {self.name}
///
/// This function is similar to take() but forcibly takes the
/// Instance, marking it as taken irregardless of its previous
/// state.
#[cfg(not(feature="nosync"))]
#[inline]
pub unsafe fn steal() -> Instance {{
{self.name}_TAKEN = true;
INSTANCE
}}
}}
/// Raw pointer to {self.name}
///
/// Dereferencing this is unsafe because you are not ensured unique
/// access to the peripheral, so you may encounter data races with
/// other users of this peripheral. It is up to you to ensure you
/// will not cause data races.
///
/// This constant is provided for ease of use in unsafe code: you can
/// simply call for example `write_reg!(gpio, GPIOA, ODR, 1);`.
pub const {self.name}: *const RegisterBlock =
0x{self.addr:08x} as *const _;"""
def __lt__(self, other):
return self.name < other.name
def __eq__(self, other):
return (self.name == other.name and
self.addr == other.addr and
self.reset_values == other.reset_values)
class PeripheralPrototype(Node):
def __init__(self, name, desc):
self.name = name.lower()
self.desc = desc
self.registers = []
self.instances = []
self.parent_device_names = []
def to_dict(self):
return {"name": self.name, "desc": self.desc,
"registers": [x.to_dict() for x in self.registers],
"instances": [x.to_dict() for x in self.instances]}
def to_rust_register_block(self):
lines = []
address = 0
reservedctr = 1
for register in sorted(self.registers):
if register.offset < address:
raise RuntimeError("Unexpected register aliasing")
if register.offset != address:
gaps = []
u32s = (register.offset - address) // 4
if u32s != 0:
gaps.append(f"[u32; {u32s}]")
address += u32s * 4
u16s = (register.offset - address) // 2
if u16s != 0:
gaps.append(f"[u16; {u16s}]")
address += u16s * 2
u8s = register.offset - address
if u8s != 0:
gaps.append(f"[u8; {u8s}]")
address += u8s
for gaptype in gaps:
lines.append(f"_reserved{reservedctr}: {gaptype},")
reservedctr += 1
lines.append(register.to_rust_struct_entry())
address += register.size // 8
lines = "\n".join(lines)
return f"""
#[repr(C)]
pub struct RegisterBlock {{
{lines}
}}"""
def to_rust_reset_values(self):
lines = []
for register in sorted(self.registers):
lines.append(f"pub {register.name}: u{register.size},")
lines = "\n".join(lines)
return f"""
pub struct ResetValues {{
{lines}
}}"""
def to_rust_instance(self):
return """
#[cfg(not(feature="nosync"))]
pub struct Instance {
pub(crate) addr: u32,
pub(crate) _marker: PhantomData<*const RegisterBlock>,
}
#[cfg(not(feature="nosync"))]
impl ::core::ops::Deref for Instance {
type Target = RegisterBlock;
#[inline(always)]
fn deref(&self) -> &RegisterBlock {
unsafe { &*(self.addr as *const _) }
}
}
#[cfg(feature="rtic")]
unsafe impl Send for Instance {}
"""
def to_rust_file(self, path):
regtypes = set(r.to_regtype() for r in self.registers)
regtypes = ", ".join(regtypes)
if self.desc is None:
print(self.to_dict())
desc = "\n//! ".join(escape_desc(self.desc).split("\n"))
if len(self.parent_device_names) > 1:
desc += "\n//!\n"
desc += "//! Used by: " + ', '.join(
sorted(set(self.parent_device_names)))
preamble = "\n".join([
"#![allow(non_snake_case, non_upper_case_globals)]",
"#![allow(non_camel_case_types)]",
f"//! {desc}",
"",
"#[cfg(not(feature=\"nosync\"))]",
"use core::marker::PhantomData;",
f"use crate::{{{regtypes}}};",
"",
])
modules = "\n".join(r.to_rust_mod() for r in self.registers)
instances = "\n".join(i.to_rust(self.registers)
for i in sorted(self.instances))
fname = os.path.join(path, f"{self.name}.rs")
with open(fname, "w") as f:
f.write(preamble)
f.write(modules)
f.write(self.to_rust_register_block())
f.write(self.to_rust_reset_values())
f.write(self.to_rust_instance())
f.write(instances)
rustfmt(fname)
def to_parent_entry(self):
return f"pub mod {self.name};\n"
def to_struct_entry(self):
lines = []
for instance in self.instances:
lines.append(f"pub {instance.name}: {self.name}::Instance,")
return "\n".join(lines)
def to_struct_steal(self):
lines = []
for instance in self.instances:
lines.append(
f"{instance.name}: "
f"{self.name}::{instance.name}::steal(),")
return "\n".join(lines) + "\n"
@classmethod
def from_svd(cls, svd, node, register_ctx):
name = get_string(node, 'name')
addr = get_int(node, 'baseAddress')
desc = get_string(node, 'description')
registers = node.find('registers')
if 'derivedFrom' in node.attrib:
df = node.attrib['derivedFrom']
df_node = svd.find(f".//peripheral[name='{df}']")
if df_node is None:
raise ValueError("Can't find derivedFrom[{df}]")
desc = get_string(df_node, 'description', default=desc)
addr = get_int(node, 'baseAddress', addr)
registers = df_node.find('registers')
register_ctx = register_ctx.inherit(df_node)
register_ctx = register_ctx.inherit(node)
peripheral = cls(name, desc)
if registers is None:
raise ValueError(f"No registers found for peripheral {name}")
ctx = register_ctx
for register in registers.findall('register'):
for dimr in expand_dim(register):
peripheral.registers.append(Register.from_svd(svd, dimr, ctx))
for cluster in registers.findall('cluster'):
for dimc in expand_dim(cluster):
for clusr in expand_cluster(dimc):
reg = Register.from_svd(svd, clusr, ctx)
peripheral.registers.append(reg)
resets = {r.offset: r.reset_value for r in peripheral.registers}
peripheral.instances.append(PeripheralInstance(name, addr, resets))
return peripheral
def consume(self, other, parent):
self.instances += other.instances
newname = common_name(self.name, other.name, parent.name)
if newname != self.name:
if newname not in [p.name for p in parent.peripherals]:
self.name = newname
else:
print(f"Warning [{parent.name}]: {self.name} + {other.name} "
f"-> {newname}: name already exists, using {self.name}")
def refactor_common_register_fields(self):
replace = []
to_replace = set()
registers = enumerate(self.registers)
for (idx1, r1), (idx2, r2) in itertools.combinations(registers, 2):
if r1 is r2 or idx1 in to_replace or idx2 in to_replace:
continue
if r1.fields == r2.fields and r1.fields:
replace.append((idx1, idx2))
to_replace.add(idx2)
for idx1, idx2 in replace:
r1 = self.registers[idx1]
r2 = self.registers[idx2]
path = f"super::{r1.name}"
r2.fields = [FieldLink(f, path) for f in r1.fields]
def refactor_aliased_registers(self):
to_delete = set()
registers = enumerate(self.registers)
for (idx1, r1), (idx2, r2) in itertools.combinations(registers, 2):
if r1 is r2 or idx1 in to_delete or idx2 in to_delete:
continue
if r1.offset == r2.offset:
r1.consume(r2, parent=self)
to_delete.add(idx2)
for idx in sorted(to_delete, reverse=True):
del self.registers[idx]
def __lt__(self, other):
return self.name < other.name
class PeripheralPrototypeLink(Node):
def __init__(self, name, prototype, path):
self.name = name
self.prototype = prototype
self.path = path
self.instances = []
self.parent_device_names = []
def to_dict(self):
return {"prototype": self.prototype.name, "path": self.path,
"instances": [x.to_dict() for x in self.instances]}
def to_rust_file(self, path):
desc = "\n//! ".join(self.prototype.desc.split("\n"))
if len(self.parent_device_names) > 1:
desc += "\n//!\n"
desc += "//! Used by: " + ', '.join(
sorted(set(self.parent_device_names)))
preamble = "\n".join([
"#![allow(non_snake_case, non_upper_case_globals)]",
"#![allow(non_camel_case_types)]",
f"//! {desc}",
"",
f"pub use crate::{self.path}::{{RegisterBlock, ResetValues}};",
"#[cfg(not(feature = \"nosync\"))]",
f"pub use crate::{self.path}::{{Instance}};",
"",
])
registers = ", ".join(m.name for m in self.prototype.registers)
registers = f"pub use crate::{self.path}::{{{registers}}};\n"
instances = "\n".join(i.to_rust(self.registers)
for i in sorted(self.instances))
fname = os.path.join(path, f"{self.name}.rs")
with open(fname, "w") as f:
f.write(preamble)
f.write(registers)
f.write("\n")
f.write(instances)
rustfmt(fname)
def to_parent_entry(self):
return f"pub mod {self.name};\n"
def to_struct_entry(self, usename=None):
if usename is None:
usename = self.name
lines = []
for instance in self.instances:
lines.append(f"pub {instance.name}: {usename}::Instance,")
return "\n".join(lines)
def to_struct_steal(self, usename=None):
if usename is None:
usename = self.name
lines = []
for instance in self.instances:
lines.append(
f"{instance.name}: "
f"{usename}::{instance.name}::steal(),")
return "\n".join(lines)
@classmethod
def from_peripherals(cls, p1, p2, path):
plink = cls(p2.name, p1, path)
plink.instances = p2.instances
return plink
@property
def registers(self):
return self.prototype.registers
@property
def desc(self):
return self.prototype.desc
def refactor_common_register_fields(self):
pass
def refactor_common_instances(self):
pass
def refactor_aliased_registers(self):
pass
def __lt__(self, other):
return self.name < other.name
class PeripheralSharedInstanceLink(Node):
def __init__(self, name, usename, prototype):
self.name = name
self.usename = usename
self.prototype = prototype
def to_parent_entry(self):
if self.usename == self.name:
return f"pub use super::instances::{self.name};\n"
else:
return (f"pub use super::instances::{self.usename} "
f"as {self.name};\n")
def to_struct_entry(self):
return self.prototype.to_struct_entry(self.name)
def to_struct_steal(self):
return self.prototype.to_struct_steal(self.name)
def to_rust_file(self, path):
pass
@property
def registers(self):
return self.prototype.registers
@property
def desc(self):
return self.prototype.desc
def refactor_common_register_fields(self):
pass
def refactor_common_instances(self):
pass
def refactor_aliased_registers(self):
pass
def __lt__(self, other):
return self.name < other.name
class CPU(Node):
def __init__(self, name, nvic_prio_bits):
self.name = name
self.nvic_prio_bits = nvic_prio_bits
def get_architecture(self):
if self.name == "CM0":
return "ARMv6-M"
elif self.name == "CM0+":
return "ARMv6-M"
elif self.name == "CM3":
return "ARMv7-M"
elif self.name == "CM4":
return "ARMv7E-M"
elif self.name == "CM7":
return "ARMv7E-M"
elif self.name == "CM33":
return "ARMv8-M"
def to_dict(self):
return {"name": self.name, "nvic_prio_bits": self.nvic_prio_bits}
@classmethod
def from_svd(cls, svd, node):
name = get_string(node, 'name')
nvic_prio_bits = node.find('nvicPrioBits').text
return cls(name, nvic_prio_bits)
class Interrupt(Node):
def __init__(self, name, desc, value):
self.name = name
self.desc = desc
self.value = value
def to_dict(self):
return {"name": self.name, "desc": self.desc, "value": self.value}
@classmethod
def from_svd(cls, svd, node):
name = get_string(node, 'name')
desc = get_string(node, 'description')
value = get_int(node, 'value')
return cls(name, desc, value)
def __lt__(self, other):
return self.value < other.value
class Device(Node):
def __init__(self, name, cpu):
self.name = name.lower().replace("-", "_")
self.cpu = cpu
self.peripherals = []
self.interrupts = []
self.special = False
def to_dict(self):
return {"name": self.name, "cpu": self.cpu.to_dict(),
"peripherals": [x.to_dict() for x in self.peripherals],
"interrupts": [x.to_dict() for x in self.interrupts]}
def to_interrupt_file(self, familypath):
devicepath = os.path.join(familypath, self.name)
iname = os.path.join(devicepath, "interrupts.rs")
with open(iname, "w") as f:
f.write('#[cfg(feature="rt")]\nextern "C" {\n')
for interrupt in self.interrupts:
f.write(f' fn {interrupt.name}();\n')
f.write('}\n\n')
vectors = []
offset = 0
for interrupt in self.interrupts:
while interrupt.value != offset:
vectors.append("Vector { _reserved: 0 },")
offset += 1
vectors.append(f"Vector {{ _handler: {interrupt.name} }},")
offset += 1
nvectors = len(vectors)
vectors = "\n".join(vectors)
f.write(f"""\
#[doc(hidden)]
pub union Vector {{
_handler: unsafe extern "C" fn(),
_reserved: u32,
}}
#[cfg(feature="rt")]
#[doc(hidden)]
#[link_section=".vector_table.interrupts"]
#[no_mangle]
pub static __INTERRUPTS: [Vector; {nvectors}] = [
{vectors}
];
/// Available interrupts for this device
#[repr(u16)]
#[derive(Copy,Clone,Debug,PartialEq,Eq)]
#[allow(non_camel_case_types)]
pub enum Interrupt {{""")
for interrupt in self.interrupts:
f.write(f"/// {interrupt.value}: ")
f.write(f"{escape_desc(interrupt.desc)}\n")
f.write(f"{interrupt.name} = {interrupt.value},\n")
f.write("}\n")
f.write("""\
unsafe impl external_cortex_m::interrupt::InterruptNumber for Interrupt {
#[inline(always)]
fn number(self) -> u16 {
self as u16
}
}\n""")
rustfmt(iname)
def to_files(self, familypath):
devicepath = os.path.join(familypath, self.name)
os.makedirs(devicepath, exist_ok=True)
for peripheral in self.peripherals:
peripheral.to_rust_file(devicepath)
pnames = [p.name for p in self.peripherals]
dupnames = set(name for name in pnames if pnames.count(name) > 1)
if dupnames:
print(f"Warning [{self.name}]: duplicate peripherals: ", end='')
print(dupnames)
if not self.special:
self.to_interrupt_file(familypath)
mname = os.path.join(devicepath, "mod.rs")
with open(mname, "w") as f:
f.write(f"//! stm32ral module for {self.name}\n\n")
prio_bits = self.cpu.nvic_prio_bits
if not self.special:
f.write("/// Number of priority bits implemented by the NVIC")
f.write(f"\npub const NVIC_PRIO_BITS: u8 = {prio_bits};\n\n")
f.write("/// Interrupt-related magic for this device\n")
f.write("pub mod interrupts;\n")
f.write("pub use self::interrupts::Interrupt;\n")
f.write("pub use self::interrupts::Interrupt as interrupt;\n")
f.write("\n\n")
for peripheral in self.peripherals:
f.write(peripheral.to_parent_entry())
f.write("\n\n")
f.write("#[cfg(all(feature=\"rtic\", not(feature=\"nosync\")))]")
f.write("\n#[allow(non_snake_case)]\n")
f.write("pub struct Peripherals {\n")
for peripheral in self.peripherals:
f.write(" " + peripheral.to_struct_entry())
f.write("}\n\n")
f.write("#[cfg(all(feature=\"rtic\", feature=\"nosync\"))]\n")
f.write("#[allow(non_snake_case)]\n")
f.write("pub struct Peripherals {}\n\n")
f.write("#[cfg(all(feature=\"rtic\", not(feature=\"nosync\")))]")
f.write("\nimpl Peripherals {\n")
f.write(" pub unsafe fn steal() -> Self {\n")
f.write(" Peripherals {\n")
for peripheral in self.peripherals:
f.write(" " + peripheral.to_struct_steal())
f.write(" }\n }\n}\n\n")
f.write("#[cfg(all(feature=\"rtic\", feature=\"nosync\"))]\n")
f.write("impl Peripherals {\n pub fn steal() -> Self {\n")
f.write(" Peripherals {}\n }\n}")
rustfmt(mname)
if not self.special:
dname = os.path.join(devicepath, "device.x")
with open(dname, "w") as f:
for interrupt in self.interrupts:
f.write(f"PROVIDE({interrupt.name} = DefaultHandler);\n")
@classmethod
def from_svd(cls, svd, device_name):
name = device_name
try:
cpu = CPU.from_svd(svd, svd.find('cpu'))
except AttributeError as e:
print(device_name)
raise e
device = cls(name, cpu)
register_ctx = RegisterCtx.empty()
register_ctx = register_ctx.inherit(svd)
interrupt_nums = set()
for interrupt in svd.findall('.//interrupt'):
interrupt = Interrupt.from_svd(svd, interrupt)
if interrupt.value in interrupt_nums:
continue
device.interrupts.append(interrupt)
interrupt_nums.add(interrupt.value)
device.interrupts.sort()
for peripheral in svd.findall('.//peripheral'):
device.peripherals.append(
PeripheralPrototype.from_svd(svd, peripheral, register_ctx))
for peripheral in device.peripherals:
peripheral.parent_device_names.append(device.name)
return device
@classmethod
def from_svdfile(cls, svdfile):
device_name = os.path.basename(svdfile).split('.')[0]
svd = ET.parse(svdfile)
return cls.from_svd(svd, device_name)
def refactor_peripheral_instances(self):
to_delete = set()
to_link = set()
links = []
periphs = enumerate(self.peripherals)
for (idx1, p1), (idx2, p2) in itertools.combinations(periphs, 2):
if p1 is p2 or idx1 in to_delete or idx2 in to_delete:
continue
elif idx1 in to_link or idx2 in to_link:
continue
elif p1.registers == p2.registers:
if p1.name.startswith("tim"):
links.append((idx1, idx2))
to_link.add(idx2)
else:
p1.consume(p2, parent=self)
to_delete.add(idx2)
for idx1, idx2 in links:
p1 = self.peripherals[idx1]
p2 = self.peripherals[idx2]
path = f"super::{p1.name}"
plink = PeripheralPrototypeLink.from_peripherals(p1, p2, path)
self.peripherals[idx2] = plink
for idx in sorted(to_delete, reverse=True):
del self.peripherals[idx]
class Family(Node):
def __init__(self, name):
self.name = name
self.devices = []
self.peripherals = []
self.instances = []
def to_dict(self):
return {"name": self.name,
"devices": [d.to_dict() for d in self.devices],
"peripherals": [p.to_dict() for p in self.peripherals],
"instances": [i.to_dict()
for i in self.instances]}
def to_files(self, path, pool):
familypath = os.path.join(path, self.name)
os.makedirs(familypath, exist_ok=True)
periphpath = os.path.join(familypath, "peripherals")
instpath = os.path.join(familypath, "instances")
os.makedirs(periphpath, exist_ok=True)
os.makedirs(instpath, exist_ok=True)
pool_results = []
with open(os.path.join(familypath, "mod.rs"), "w") as f:
uname = self.name.upper()
f.write(f"//! Parent module for all {uname} devices.\n\n")
f.write("/// Peripherals shared by multiple devices\n")
f.write('pub mod peripherals;\n\n')
f.write("/// Peripheral instances shared by multiple devices\n")
f.write("pub(crate) mod instances;\n\n")
for device in self.devices:
dname = device.name
result = pool.apply_async(device.to_files, (familypath,))
pool_results.append(result)
f.write(f'#[cfg(any(feature="{dname}", feature="doc"))]\n')
f.write(f'pub mod {dname};\n\n')
with open(os.path.join(periphpath, "mod.rs"), "w") as f:
for peripheral in self.peripherals:
r = pool.apply_async(peripheral.to_rust_file, (periphpath,))
pool_results.append(r)
features = ", ".join(
f'feature="{d}"' for d in peripheral.parent_device_names)
f.write(f'#[cfg(any(feature="doc", {features}))]\n')
f.write(f'pub mod {peripheral.name};\n\n')
with open(os.path.join(instpath, "mod.rs"), "w") as f:
for instance in self.instances:
r = pool.apply_async(instance.to_rust_file, (instpath,))
pool_results.append(r)
features = ", ".join(
f'feature="{d}"' for d in instance.parent_device_names)
f.write(f'#[cfg(any(feature="doc", {features}))]\n')
f.write(f'pub mod {instance.name};\n\n')
return pool_results
def _enumerate_peripherals(self):
peripherals = []
for didx, device in enumerate(self.devices):
for pidx, peripheral in enumerate(device.peripherals):
peripherals.append((didx, pidx, peripheral))
return peripherals
def _match_peripherals(self):
to_link = set()
links = dict()
peripherals = self._enumerate_peripherals()
for pt1, pt2 in itertools.combinations(peripherals, 2):
didx1, pidx1, p1 = pt1
didx2, pidx2, p2 = pt2
idx1 = (didx1, pidx1)
idx2 = (didx2, pidx2)
if p1 is p2 or idx1 in to_link or idx2 in to_link:
continue
elif p1.registers == p2.registers:
to_link.add(idx2)
if idx1 not in links:
links[idx1] = []
links[idx1].append(idx2)
return links
def refactor_common_peripherals(self):
links = self._match_peripherals()
pnames = set()
dupnames = set()
for idx in links:
didx, pidx = idx
p = self.devices[didx].peripherals[pidx]
if p.name in pnames:
dupnames.add(p.name)
pnames.add(p.name)
versions = {}
for idx in links:
didx, pidx = idx
device = self.devices[didx]
p = device.peripherals[pidx]
name = p.name
if name in dupnames:
if name not in versions:
versions[name] = 0
versions[name] += 1
name = f'{name}_v{versions[name]}'
familyp = PeripheralPrototype(name, p.desc)
familyp.registers = p.registers
familyp.parent_device_names.append(device.name)
self.peripherals.append(familyp)
path = f"{self.name}::peripherals::{name}"
linkp = PeripheralPrototypeLink(p.name, familyp, path)
linkp.instances = p.instances
self.devices[didx].peripherals[pidx] = linkp
for childidx in links[idx]:
cdidx, cpidx = childidx
childd = self.devices[cdidx]
childp = childd.peripherals[cpidx]
familyp.parent_device_names.append(childd.name)
linkp = PeripheralPrototypeLink(childp.name, familyp, path)
linkp.instances = childp.instances
childd.peripherals[cpidx] = linkp
self.refactor_common_instances(links)
def refactor_common_instances(self, links):
to_group = set()
groups = dict()
for primary, children in links.items():
members = [primary] + list(children)
for l1, l2 in itertools.combinations(members, 2):
didx1, pidx1 = l1
didx2, pidx2 = l2
p1 = self.devices[didx1].peripherals[pidx1]
p2 = self.devices[didx2].peripherals[pidx2]
if p1 is p2 or l1 in to_group or l2 in to_group:
continue
elif p1.instances == p2.instances:
to_group.add(l2)
if l1 not in groups:
groups[l1] = []
groups[l1].append(l2)
pnames = set()
dupnames = set()
for (didx, pidx) in groups:
p = self.devices[didx].peripherals[pidx]
if p.name in pnames:
dupnames.add(p.name)
pnames.add(p.name)
for idx in groups:
didx, pidx = idx
d = self.devices[didx]
p = d.peripherals[pidx]
name = p.name
if name in dupnames:
name += "_" + d.name[5:]
for cidx in groups[idx]:
cdidx, _ = cidx
cd = self.devices[cdidx]
name += "_" + cd.name[5:]
linkp = PeripheralSharedInstanceLink(p.name, name, p)
self.devices[didx].peripherals[pidx] = linkp
groupp = p
groupp.name = name
groupp.parent_device_names.append(d.name)
self.instances.append(groupp)
for cidx in groups[idx]:
cdidx, cpidx = cidx
cd = self.devices[cdidx]
groupp.parent_device_names.append(cd.name)
cd.peripherals[cpidx] = linkp
class Crate:
def __init__(self):
self.families = []
self.peripherals = []
def to_dict(self):
return {"families": [x.to_dict() for x in self.families],
"peripherals": [x.to_dict() for x in self.peripherals]}
def write_build_script(self, path):
devices = []
for family in self.families:
for device in family.devices:
if not device.special:
devices.append((family.name, device.name))
clauses = " else ".join("""\
if env::var_os("CARGO_FEATURE_{}").is_some() {{
"src/{}/{}/device.x"
}}""".format(d.upper(), f, d) for (f, d) in sorted(devices))
clauses += " else { panic!(\"No device features selected\"); }"
fname = os.path.join(path, "build.rs")
with open(fname, "w") as f:
f.write(BUILD_RS_TEMPLATE.format(device_clauses=clauses))
rustfmt(fname)
def to_files(self, path, pool):
srcpath = os.path.join(path, 'src')
if not os.path.isdir(srcpath):
raise ValueError(f"{srcpath} does not exist")
periphpath = os.path.join(srcpath, "peripherals")
os.makedirs(periphpath, exist_ok=True)
lib_f = open(os.path.join(srcpath, "lib.rs"), "w")
lib_f.write(CRATE_LIB_PREAMBLE)
cargo_f = open(os.path.join(path, "Cargo.toml"), "w")
cargo_f.write(CRATE_CARGO_TOML_PREAMBLE)
self.write_build_script(path)
periph_f = open(os.path.join(periphpath, "mod.rs"), "w")
pool_results = []
for family in sorted(self.families, key=lambda x: x.name):
fname = family.name
pool_results += family.to_files(srcpath, pool)
features = [f'feature="{d.name}"' for d in family.devices]
lib_f.write(f'#[cfg(any(feature="doc", {", ".join(features)}))]\n')
lib_f.write(f'pub mod {fname};\n\n')
for device in family.devices:
dname = device.name
arch = device.cpu.get_architecture().lower().replace("-", "")
if device.special:
cargo_f.write(f'{dname} = []\n')
else:
cargo_f.write(f'{dname} = ["{arch}"]\n')
lib_f.write(f'#[cfg(feature="{dname}")]\n')
lib_f.write(f'pub use {fname}::{dname}::*;\n\n')
if self.peripherals:
lib_f.write("//! Peripherals shared between multiple families\n")
lib_f.write("pub mod peripherals;\n\n")
for peripheral in self.peripherals:
result = pool.apply_async(peripheral.to_rust_file, (periphpath,))
pool_results.append(result)
features = ", ".join(
f'feature="{d}"' for d in peripheral.parent_device_names)
periph_f.write(f'#[cfg(any(feature="doc", {features}))]\n')
periph_f.write(f'pub mod {peripheral.name};\n\n')
return pool_results
def get_int(node, tag, default=None):
text = get_string(node, tag, default=default)
if text == default:
return text
text = text.lower().strip()
if text == "true":
return 1
elif text == "false":
return 0
elif text[:2] == "0x":
return int(text[2:], 16)
elif text[:2] == "0b":
return int(text[2:], 2)
else:
return int(text, 10)
def get_string(node, tag, default=None):
text = node.findtext(tag, default=default)
if text == default:
return text
return " ".join(text.split())
def expand_dim(node):
dim = get_int(node, 'dim')
if dim is None:
return [node]
inc = get_int(node, 'dimIncrement')
idxs = get_string(node, 'dimIndex')
if idxs is None:
idxs = list(range(dim))
else:
if "," in idxs:
idxs = idxs.split(",")
elif "-" in idxs:
li, ri = idxs.split("-")
idxs = list(range(int(li), int(ri)+1))
else:
raise ValueError("Unknown dimIndex: '{}'".format(idxs))
nodes = []
for cnt, idx in enumerate(idxs):
name = get_string(node, 'name').replace("%s", str(idx))
dim_node = copy.deepcopy(node)
dim_node.find('name').text = name
addr = get_int(dim_node, 'addressOffset') + cnt * inc
dim_node.find('addressOffset').text = "0x{:08x}".format(addr)
dim_node.attrib['dim_index'] = idx
nodes.append(dim_node)
return nodes
def expand_cluster(node):
if node.attrib.get('dim_index') is None:
raise ValueError("Can't process a cluster without dim_index set")
cluster_idx = node.attrib['dim_index']
cluster_addr = get_int(node, 'addressOffset')
nodes = []
for register in node.findall('register'):
addr = cluster_addr + get_int(register, 'addressOffset')
name = get_string(register, 'name') + str(cluster_idx)
clusr = copy.deepcopy(register)
clusr.find('addressOffset').text = "0x{:08x}".format(addr)
clusr.find('name').text = name
nodes.append(clusr)
return nodes
def escape_desc(desc):
if desc is None:
return ""
else:
return desc.replace("[", "\\[").replace("]", "\\]")
def rustfmt(fname):
subprocess.run(["rustfmt", fname])
def common_name(a, b, ctx=""):
# Find position of all differences up to the end of the shortest name
diffpos = [i for i in range(min(len(a), len(b))) if a[i] != b[i]]
# Special cases
for x, y in ((a, b), (b, a)):
if x.startswith("i2s") and x.endswith("ext") and y.startswith("spi"):
return "spi"
if x.startswith("usart") and y.startswith("uart"):
return "usart"
if x == "adc1_2" and y == "adc3_4":
return "adc_common"
if x == "adc12_common" and y == "adc3_common":
return "adc_common"
if x.startswith("delay_block_") and y.startswith("delay_block_"):
return "dlyb"
if x == "dlyb" and y.startswith("delay_block_"):
return "dlyb"
if len(diffpos) == 0:
# Either names are the same or one name is the prefix of the other
if a == b:
print(f"Warning [{ctx}]: {a} and {b} are identical")
return a
elif b[:len(a)] == a:
return a
elif a[:len(b)] == b:
return b
elif len(diffpos) == 1:
# Names have a single character difference which we can try removing
p = diffpos[0]
an = a[:p] + a[p+1:]
bn = b[:p] + b[p+1:]
if an == bn:
return an
else:
print(f"Warning [{ctx}]: {a}->{an} and {b}->{bn} failed")
return a
else:
p = diffpos[0]
# First check if removing the first difference makes the names align,
# as will be the case for e.g. I2SEXT + I2S3EXT.
if a == b[:p] + b[p+1:]:
return a
# Names might at least have a common prefix
ap = a[:p]
bp = b[:p]
# Strip trailing underscore from suffix
if ap.endswith("_"):
ap = ap[:-1]
bp = bp[:-1]
if len(ap) > 0 and ap == bp:
return ap
else:
print(f"Warning [{ctx}]: {a}->{ap} and {b}->{bp} failed")
return a
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("cratepath", help="Path to crate root")
parser.add_argument("svdfiles", nargs="+", help="SVD files to parse")
return parser.parse_args()
def main():
args = parse_args()
crate = Crate()
print("Parsing input files...")
with multiprocessing.Pool() as p:
devices = p.map(Device.from_svdfile, args.svdfiles)
print("Collating families...")
cortex_family = Family("cortex_m")
crate.families.append(cortex_family)
for device in devices:
# Special case the ARMv*-M SVDs
if device.name.startswith("armv"):
device.special = True
cortex_family.devices.append(device)
else:
device_family = device.name[:7].lower()
if device_family not in [f.name for f in crate.families]:
crate.families.append(Family(device_family))
family = [f for f in crate.families if f.name == device_family][0]
if device.name in [d.name for d in family.devices]:
print(f"Warning: {device.name} already exists in {family},"
" skipping.")
continue
family.devices.append(device)
print("Running refactors...")
for device in devices:
device.refactor_peripheral_instances()
for peripheral in device.peripherals:
peripheral.refactor_aliased_registers()
peripheral.refactor_common_register_fields()
for register in peripheral.registers:
register.refactor_common_field_values()
for family in crate.families:
family.refactor_common_peripherals()
print("Outputting crate...")
pool_results = []
with multiprocessing.Pool() as pool:
pool_results += crate.to_files(args.cratepath, pool)
for result in pool_results:
result.get()
if __name__ == "__main__":
main()
| true | true |
1c389e6e09a465c0e3c1d8b9b5019040cd95e042 | 2,539 | py | Python | setup.py | cryptbytestech/pypassvault | fd9ab73b5bd9bfcb7f3941fa3b82dc3000b40d68 | [
"MIT"
] | 1 | 2018-09-22T22:13:01.000Z | 2018-09-22T22:13:01.000Z | setup.py | cryptbytestech/pypassvault | fd9ab73b5bd9bfcb7f3941fa3b82dc3000b40d68 | [
"MIT"
] | null | null | null | setup.py | cryptbytestech/pypassvault | fd9ab73b5bd9bfcb7f3941fa3b82dc3000b40d68 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Setup for pypassvault project
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
with open('LICENSE.txt') as f:
license = f.read()
setup(
name='pypassvault',
version='0.1.4',
description='A simple commandline password vault written in python.',
long_description=readme,
long_description_content_type='text/markdown',
author='cryptbytestech',
author_email='cryptbytestech@gmail.com',
url='https://github.com/cryptbytestech/pypassvault',
#license=license,
license="MIT",
#packages=find_packages(exclude=('tests', 'docs'))
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
#'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
],
# What does your project relate to?
keywords='password vault program cryptography',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'invoke>=0.19.0',
'passlib>=1.7.1',
'appdirs>=1.4.3',
'cryptography>=1.8.1',
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
# 'dev': [],
# 'test': [],
},
entry_points = {
'console_scripts': ['passvault=pypassvault.passvault:main'],
},
)
| 31.345679 | 79 | 0.639622 |
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
with open('LICENSE.txt') as f:
license = f.read()
setup(
name='pypassvault',
version='0.1.4',
description='A simple commandline password vault written in python.',
long_description=readme,
long_description_content_type='text/markdown',
author='cryptbytestech',
author_email='cryptbytestech@gmail.com',
url='https://github.com/cryptbytestech/pypassvault',
license="MIT",
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
keywords='password vault program cryptography',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'invoke>=0.19.0',
'passlib>=1.7.1',
'appdirs>=1.4.3',
'cryptography>=1.8.1',
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
# 'dev': [],
# 'test': [],
},
entry_points = {
'console_scripts': ['passvault=pypassvault.passvault:main'],
},
)
| true | true |
1c38a0219266bb2a7ae22ff441e9711b2c97abde | 6,087 | py | Python | homework/hw03/construct_check.py | QinJiaHao1994/cs61a-sp20-solutions | 71a481bef5ce73a4c6ff746455ccc51dc65453ea | [
"MIT"
] | 8 | 2020-07-28T11:10:49.000Z | 2021-05-29T15:27:17.000Z | homework/hw02/construct_check.py | QinJiaHao1994/cs61a-sp20-solutions | 71a481bef5ce73a4c6ff746455ccc51dc65453ea | [
"MIT"
] | null | null | null | homework/hw02/construct_check.py | QinJiaHao1994/cs61a-sp20-solutions | 71a481bef5ce73a4c6ff746455ccc51dc65453ea | [
"MIT"
] | 1 | 2020-10-23T08:15:08.000Z | 2020-10-23T08:15:08.000Z | from ast import parse, NodeVisitor, Name
_NAMES = {
'Add': '+',
'And': 'and',
'Assert': 'assert',
'Assign': '=',
'AugAssign': 'op=',
'BitAnd': '&',
'BitOr': '|',
'BitXor': '^',
'Break': 'break',
'Recursion': 'recursive call',
'ClassDef': 'class',
'Continue': 'continue',
'Del': 'del',
'Delete': 'delete',
'Dict': '{...}',
'DictComp': '{...}',
'Div': '/',
'Ellipsis': '...',
'Eq': '==',
'ExceptHandler': 'except',
'ExtSlice': '[::]',
'FloorDiv': '//',
'For': 'for',
'FunctionDef': 'def',
'GeneratorExp': '(... for ...)',
'Global': 'global',
'Gt': '>',
'GtE': '>=',
'If': 'if',
'IfExp': '...if...else...',
'Import': 'import',
'ImportFrom': 'from ... import ...',
'In': 'in',
'Index': '...[...]',
'Invert': '~',
'Is': 'is',
'IsNot': 'is not ',
'LShift': '<<',
'Lambda': 'lambda',
'List': '[...]',
'ListComp': '[...for...]',
'Lt': '<',
'LtE': '<=',
'Mod': '%',
'Mult': '*',
'Nonlocal': 'nonlocal',
'Not': 'not',
'NotEq': '!=',
'NotIn': 'not in',
'Or': 'or',
'Pass': 'pass',
'Pow': '**',
'RShift': '>>',
'Raise': 'raise',
'Return': 'return',
'Set': '{ ... } (set)',
'SetComp': '{ ... for ... } (set)',
'Slice': '[ : ]',
'Starred': '',
'Sub': '-',
'Subscript': '[]',
'Try': 'try',
'Tuple': '(... , ... )',
'UAdd': '+',
'USub': '-',
'While': 'while',
'With': 'with',
'Yield': 'yield',
'YieldFrom': 'yield from',
}
def check(source_file, checked_funcs, disallow, source=None):
"""Checks that AST nodes whose type names are present in DISALLOW
(an object supporting 'in') are not present in the function(s) named
CHECKED_FUNCS in SOURCE. By default, SOURCE is the contents of the
file SOURCE_FILE. CHECKED_FUNCS is either a string (indicating a single
name) or an object of some other type that supports 'in'. CHECKED_FUNCS
may contain __main__ to indicate an entire module. Prints reports of
each prohibited node and returns True iff none are found.
See ast.__dir__() for AST type names. The special node name 'Recursion'
checks for overtly recursive calls (i.e., calls of the form NAME(...) where
NAME is an enclosing def."""
return ExclusionChecker(disallow).check(source_file, checked_funcs, source)
class ExclusionChecker(NodeVisitor):
"""An AST visitor that checks that certain constructs are excluded from
parts of a program. ExclusionChecker(EXC) checks that AST node types
whose names are in the sequence or set EXC are not present. Its check
method visits nodes in a given function of a source file checking that the
indicated node types are not used."""
def __init__(self, disallow=()):
"""DISALLOW is the initial default list of disallowed
node-type names."""
self._disallow = set(disallow)
self._checking = False
self._errs = 0
def generic_visit(self, node):
if self._checking and type(node).__name__ in self._disallow:
self._report(node)
super().generic_visit(node)
def visit_Module(self, node):
if "__main__" in self._checked_funcs:
self._checking = True
self._checked_name = self._source_file
super().generic_visit(node)
def visit_Call(self, node):
if 'Recursion' in self._disallow and \
type(node.func) is Name and \
node.func.id in self._func_nest:
self._report(node, "should not be recursive")
self.generic_visit(node)
def visit_FunctionDef(self, node):
self._func_nest.append(node.name)
if self._checking:
self.generic_visit(node)
elif node.name in self._checked_funcs:
self._checked_name = "Function " + node.name
checking0 = self._checking
self._checking = True
super().generic_visit(node)
self._checking = checking0
self._func_nest.pop()
def _report(self, node, msg=None):
node_name = _NAMES.get(type(node).__name__, type(node).__name__)
if msg is None:
msg = "should not contain '{}'".format(node_name)
print("{} {}".format(self._checked_name, msg))
self._errs += 1
def errors(self):
"""Returns the number of number of prohibited constructs found in
the last call to check."""
return self._errs
def check(self, source_file, checked_funcs, disallow=None, source=None):
"""Checks that AST nodes whose type names are present in DISALLOW
(an object supporting the contains test) are not present in
the function(s) named CHECKED_FUNCS in SOURCE. By default, SOURCE
is the contents of the file SOURCE_FILE. DISALLOW defaults to the
argument given to the constructor (and resets that value if it is
present). CHECKED_FUNCS is either a string (indicating a single
name) or an object of some other type that supports 'in'.
CHECKED_FUNCS may contain __main__ to indicate an entire module.
Prints reports of each prohibited node and returns True iff none
are found.
See ast.__dir__() for AST type names. The special node name
'Recursion' checks for overtly recursive calls (i.e., calls of the
form NAME(...) where NAME is an enclosing def."""
self._checking = False
self._source_file = source_file
self._func_nest = []
if type(checked_funcs) is str:
self._checked_funcs = {checked_funcs}
else:
self._checked_funcs = set(checked_funcs)
if disallow is not None:
self._disallow = set(disallow)
if source is None:
with open(source_file) as inp:
p = parse(open(source_file).read(), source_file)
else:
p = parse(source, source_file)
self._errs = 0
self.visit(p)
return self._errs == 0 | 34.196629 | 79 | 0.580746 | from ast import parse, NodeVisitor, Name
_NAMES = {
'Add': '+',
'And': 'and',
'Assert': 'assert',
'Assign': '=',
'AugAssign': 'op=',
'BitAnd': '&',
'BitOr': '|',
'BitXor': '^',
'Break': 'break',
'Recursion': 'recursive call',
'ClassDef': 'class',
'Continue': 'continue',
'Del': 'del',
'Delete': 'delete',
'Dict': '{...}',
'DictComp': '{...}',
'Div': '/',
'Ellipsis': '...',
'Eq': '==',
'ExceptHandler': 'except',
'ExtSlice': '[::]',
'FloorDiv': '//',
'For': 'for',
'FunctionDef': 'def',
'GeneratorExp': '(... for ...)',
'Global': 'global',
'Gt': '>',
'GtE': '>=',
'If': 'if',
'IfExp': '...if...else...',
'Import': 'import',
'ImportFrom': 'from ... import ...',
'In': 'in',
'Index': '...[...]',
'Invert': '~',
'Is': 'is',
'IsNot': 'is not ',
'LShift': '<<',
'Lambda': 'lambda',
'List': '[...]',
'ListComp': '[...for...]',
'Lt': '<',
'LtE': '<=',
'Mod': '%',
'Mult': '*',
'Nonlocal': 'nonlocal',
'Not': 'not',
'NotEq': '!=',
'NotIn': 'not in',
'Or': 'or',
'Pass': 'pass',
'Pow': '**',
'RShift': '>>',
'Raise': 'raise',
'Return': 'return',
'Set': '{ ... } (set)',
'SetComp': '{ ... for ... } (set)',
'Slice': '[ : ]',
'Starred': '',
'Sub': '-',
'Subscript': '[]',
'Try': 'try',
'Tuple': '(... , ... )',
'UAdd': '+',
'USub': '-',
'While': 'while',
'With': 'with',
'Yield': 'yield',
'YieldFrom': 'yield from',
}
def check(source_file, checked_funcs, disallow, source=None):
return ExclusionChecker(disallow).check(source_file, checked_funcs, source)
class ExclusionChecker(NodeVisitor):
def __init__(self, disallow=()):
self._disallow = set(disallow)
self._checking = False
self._errs = 0
def generic_visit(self, node):
if self._checking and type(node).__name__ in self._disallow:
self._report(node)
super().generic_visit(node)
def visit_Module(self, node):
if "__main__" in self._checked_funcs:
self._checking = True
self._checked_name = self._source_file
super().generic_visit(node)
def visit_Call(self, node):
if 'Recursion' in self._disallow and \
type(node.func) is Name and \
node.func.id in self._func_nest:
self._report(node, "should not be recursive")
self.generic_visit(node)
def visit_FunctionDef(self, node):
self._func_nest.append(node.name)
if self._checking:
self.generic_visit(node)
elif node.name in self._checked_funcs:
self._checked_name = "Function " + node.name
checking0 = self._checking
self._checking = True
super().generic_visit(node)
self._checking = checking0
self._func_nest.pop()
def _report(self, node, msg=None):
node_name = _NAMES.get(type(node).__name__, type(node).__name__)
if msg is None:
msg = "should not contain '{}'".format(node_name)
print("{} {}".format(self._checked_name, msg))
self._errs += 1
def errors(self):
return self._errs
def check(self, source_file, checked_funcs, disallow=None, source=None):
self._checking = False
self._source_file = source_file
self._func_nest = []
if type(checked_funcs) is str:
self._checked_funcs = {checked_funcs}
else:
self._checked_funcs = set(checked_funcs)
if disallow is not None:
self._disallow = set(disallow)
if source is None:
with open(source_file) as inp:
p = parse(open(source_file).read(), source_file)
else:
p = parse(source, source_file)
self._errs = 0
self.visit(p)
return self._errs == 0 | true | true |
1c38a07f1d438c871dd93a7df76d133bbf3990f8 | 72 | py | Python | plugins/jira/komand_jira/actions/assign_issue/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/jira/komand_jira/actions/assign_issue/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/jira/komand_jira/actions/assign_issue/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | # GENERATED BY KOMAND SDK - DO NOT EDIT
from .action import AssignIssue
| 24 | 39 | 0.777778 |
from .action import AssignIssue
| true | true |
1c38a1c55c3ead54b40a989169a6a4eabd456b52 | 806 | py | Python | nnuncert/app/uci/_naming.py | pjoachims/nnuncert | 45dede54fdb714926926d719be2c9b9b542b2601 | [
"MIT"
] | 2 | 2021-12-30T06:25:43.000Z | 2022-01-25T00:41:22.000Z | nnuncert/app/uci/_naming.py | pjoachims/nnuncert | 45dede54fdb714926926d719be2c9b9b542b2601 | [
"MIT"
] | 1 | 2022-01-25T00:35:28.000Z | 2022-03-28T15:23:16.000Z | nnuncert/app/uci/_naming.py | pjoachims/nnuncert | 45dede54fdb714926926d719be2c9b9b542b2601 | [
"MIT"
] | null | null | null | def type2name(model) -> str:
"""Get name of model."""
if isinstance(model, DNNCModel):
modelstr = "DNNC"
if hasattr(model, "dnnc"):
suffix = {"ridge": "-R",
"horseshoe": "-HS",}[model.dnnc.dnnc_type]
modelstr = modelstr + suffix
return modelstr
elif isinstance(model, GPModel):
return "GP-ReLU"
elif isinstance(model, MCDropout):
return "MC dropout"
elif isinstance(model, PNN):
return "PNN"
elif isinstance(model, PNNEnsemble):
return "PNN-E"
elif isinstance(model, NLM):
return "NLM"
elif isinstance(model, NLMEnsemble):
return "NLM-E"
elif isinstance(model, PBPModel):
return "PBP"
else:
raise ValueError("Type not recognized.")
| 31 | 64 | 0.573201 | def type2name(model) -> str:
if isinstance(model, DNNCModel):
modelstr = "DNNC"
if hasattr(model, "dnnc"):
suffix = {"ridge": "-R",
"horseshoe": "-HS",}[model.dnnc.dnnc_type]
modelstr = modelstr + suffix
return modelstr
elif isinstance(model, GPModel):
return "GP-ReLU"
elif isinstance(model, MCDropout):
return "MC dropout"
elif isinstance(model, PNN):
return "PNN"
elif isinstance(model, PNNEnsemble):
return "PNN-E"
elif isinstance(model, NLM):
return "NLM"
elif isinstance(model, NLMEnsemble):
return "NLM-E"
elif isinstance(model, PBPModel):
return "PBP"
else:
raise ValueError("Type not recognized.")
| true | true |
1c38a25e8b17d5f873d08cc1d1a8173064984d5e | 456 | py | Python | provisioner/migrations/0003_subscription_in_process.py | uw-it-aca/msca-provisioner | 6edc5468da21bad0172787ca8fdab1d93d9d0127 | [
"Apache-2.0"
] | 1 | 2018-09-07T14:43:40.000Z | 2018-09-07T14:43:40.000Z | provisioner/migrations/0003_subscription_in_process.py | uw-it-aca/msca-provisioner | 6edc5468da21bad0172787ca8fdab1d93d9d0127 | [
"Apache-2.0"
] | null | null | null | provisioner/migrations/0003_subscription_in_process.py | uw-it-aca/msca-provisioner | 6edc5468da21bad0172787ca8fdab1d93d9d0127 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-13 21:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('provisioner', '0002_auto_20160504_2102'),
]
operations = [
migrations.AddField(
model_name='subscription',
name='in_process',
field=models.NullBooleanField(),
),
]
| 21.714286 | 51 | 0.622807 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('provisioner', '0002_auto_20160504_2102'),
]
operations = [
migrations.AddField(
model_name='subscription',
name='in_process',
field=models.NullBooleanField(),
),
]
| true | true |
1c38a2f6663c6d6e6a23b12415a725623b04db53 | 6,720 | py | Python | pangtreebuild/tests/tests_affinity_tree/tests_affinity.py | meoke/PangTreeBuild | 7cafb76df32c559a76ed1d269699dc0e52313312 | [
"MIT"
] | 2 | 2019-09-04T20:01:28.000Z | 2019-12-23T22:41:57.000Z | pangtreebuild/tests/tests_affinity_tree/tests_affinity.py | meoke/PangTreeBuild | 7cafb76df32c559a76ed1d269699dc0e52313312 | [
"MIT"
] | 2 | 2019-08-10T16:18:01.000Z | 2019-10-28T21:40:23.000Z | pangtreebuild/tests/tests_affinity_tree/tests_affinity.py | meoke/PangTreeBuild | 7cafb76df32c559a76ed1d269699dc0e52313312 | [
"MIT"
] | 2 | 2020-04-23T23:57:52.000Z | 2020-07-12T17:09:02.000Z | import unittest
from typing import List
from ddt import unpack, data, ddt
from pangtreebuild.affinity_tree import builders as at_builders
from pangtreebuild.affinity_tree import parameters as at_params
from pangtreebuild.pangenome import graph
from pangtreebuild.pangenome.parameters import msa
def sid(x): return msa.SequenceID(x)
def nid(x): return graph.NodeID(x)
def b(x): return graph.Base(x)
@ddt
class AffinityTreeGenerationTests(unittest.TestCase):
@data((at_params.P(0.5), graph.Compatibility(0.836660026534076)),
(at_params.P(1), graph.Compatibility(0.7)),
(at_params.P(4), graph.Compatibility(0.6561)))
@unpack
def test_1_p_parameter_influence(self,
p: at_params.P,
expected_cutoff: graph.Compatibility):
nodes = [graph.Node(node_id=nid(0), base=b('T'), aligned_to=None),
graph.Node(node_id=nid(1), base=b('A'), aligned_to=None),
graph.Node(node_id=nid(2), base=b('G'), aligned_to=None),
graph.Node(node_id=nid(3), base=b('A'), aligned_to=None),
graph.Node(node_id=nid(4), base=b('C'), aligned_to=None),
graph.Node(node_id=nid(5), base=b('A'), aligned_to=None),
graph.Node(node_id=nid(6), base=b('C'), aligned_to=None),
graph.Node(node_id=nid(7), base=b('G'), aligned_to=None),
graph.Node(node_id=nid(8), base=b('T'), aligned_to=None),
graph.Node(node_id=nid(9), base=b('A'), aligned_to=None)
]
sequences = {
msa.SequenceID('seq0'):
graph.Sequence(msa.SequenceID('seq0'),
[graph.SeqPath([*map(nid, [10, 11, 12, 13, 14, 15, 16, 17, 18, 9])])],
graph.SequenceMetadata({})),
msa.SequenceID('seq1'):
graph.Sequence(msa.SequenceID('seq1'),
[graph.SeqPath([*map(nid, [10, 11, 12, 13, 14, 15, 16, 17, 8, 9])])],
graph.SequenceMetadata({})),
msa.SequenceID('seq2'):
graph.Sequence(msa.SequenceID('seq2'),
[graph.SeqPath([*map(nid, [10, 11, 12, 13, 14, 15, 16, 7, 8, 9])])],
graph.SequenceMetadata({})),
msa.SequenceID('seq3'):
graph.Sequence(msa.SequenceID('seq3'),
[graph.SeqPath([*map(nid, [10, 11, 12, 3, 4, 5, 6, 7, 8, 9])])],
graph.SequenceMetadata({})),
msa.SequenceID('seq4'):
graph.Sequence(msa.SequenceID('seq3'),
[graph.SeqPath([*map(nid, [10, 11, 2, 3, 4, 5, 6, 7, 8, 9])])],
graph.SequenceMetadata({}))
}
poagraph = graph.Poagraph(nodes, sequences)
consensus_path = graph.SeqPath([*map(nid, [10, 11, 12, 13, 14, 15, 16, 17, 18, 19])])
compatibilities = poagraph.get_compatibilities(poagraph.get_sequences_ids(),
consensus_path,
p)
actual_cutoff = at_builders._find_node_cutoff([c for c in compatibilities.values()], []).cutoff
self.assertAlmostEqual(expected_cutoff.value, actual_cutoff.value)
@data(
# single compatibility value
(0.5, [graph.Compatibility(0.5)]),
# two compatibilities values
(0.7, [graph.Compatibility(0.5), graph.Compatibility(0.7)]),
(1, [graph.Compatibility(1), graph.Compatibility(0.45)]),
(0.9, [graph.Compatibility(0.9), graph.Compatibility(0.5)]),
# repeated values
(0.7, [*map(graph.Compatibility, [0.5, 0.7, 0.7])]),
(0.9, [*map(graph.Compatibility, [0.9, 0.5, 0.5])]),
(1, [*map(graph.Compatibility, [0.45, 1, 0.45, 0.45])]),
# many unique compatibilities values
(.8, [*map(graph.Compatibility, [.3, .4, .8])]),
(0.91, [*map(graph.Compatibility, [0.31, 0.32, 0.91, 0.92, 0.93, 0.97])]),
(0.91, [*map(graph.Compatibility, [0.29, 0.3, 0.33, 0.91, 0.92, 0.93, 0.97])]),
(1, [*map(graph.Compatibility, [0.81, 0.75, 0.8, 0.81, 1])]),
(0.9, [*map(graph.Compatibility, [0.5, 0.9, 0.99])]),
(0.7, [*map(graph.Compatibility, [0.2, 0.85, 0.7, 0.8])]),
(0.99, [*map(graph.Compatibility, [0.99, 0.9, 0.99])]),
(0.99, [*map(graph.Compatibility, [0.99])]),
# repeated distance between values
(.4, [*map(graph.Compatibility, [.3, .4, .5])]),
# all the same values
(.1, [*map(graph.Compatibility, [.1, .1, .1])])
)
@unpack
def test_2_find_cutoff_no_so_far_values(self,
expected_cutoff: float,
compatibilities: List[graph.Compatibility]):
actual_cutoff = at_builders._find_node_cutoff(compatibilities, []).cutoff
self.assertEqual(expected_cutoff, actual_cutoff.value)
def test_3_find_cutoff_no_compatibilities(self):
with self.assertRaises(ValueError) as err:
_ = at_builders._find_node_cutoff([], []).cutoff
self.assertEqual(str(err.exception), """Empty compatibilities list.
Cannot find cutoff.""")
@data(
# guard <= all compatibilities
(0.2, [0.2, 0.7, 0.8, 0.85], [0.1, 0.01, 0]),
(0.7, [0.7, 0.85, 0.7, 0.8], [0.1, 0.01, 0]),
(0.8, [0.7, 0.7, 0.85, 0.8], [0.85, 0.91, 1.0]),
# guard > all compatibilities
(0.6, [0.3, 0.6, 0.61, 0.61], [0.99]), # big distance to guard
(0.9, [0.2, 0.97, 0.98, 0.9], [0.99]), # small distance to guard
# guard between compatibilities
(0.5, [0.2, 0.57, 0.58, 0.5], [0.55]), # take smaller than guard
(0.58, [0.2, 0.27, 0.58, 0.2], [0.55]), # take greater than guard
(0.55, [0.2, 0.58, 0.27, 0.55], [0.55]) # take equal to guard
)
@unpack
def test_4_find_cutoff_with_so_far_values(self,
expected_cutoff,
compatibilities,
so_far_cutoffs):
compatibilities = [graph.Compatibility(c) for c in compatibilities]
so_far_cutoffs = [graph.Compatibility(c) for c in so_far_cutoffs]
actual_cutoff = at_builders._find_node_cutoff(compatibilities, so_far_cutoffs).cutoff
self.assertEqual(expected_cutoff, actual_cutoff.value)
if __name__ == '__main__':
unittest.main()
| 45.405405 | 103 | 0.529464 | import unittest
from typing import List
from ddt import unpack, data, ddt
from pangtreebuild.affinity_tree import builders as at_builders
from pangtreebuild.affinity_tree import parameters as at_params
from pangtreebuild.pangenome import graph
from pangtreebuild.pangenome.parameters import msa
def sid(x): return msa.SequenceID(x)
def nid(x): return graph.NodeID(x)
def b(x): return graph.Base(x)
@ddt
class AffinityTreeGenerationTests(unittest.TestCase):
@data((at_params.P(0.5), graph.Compatibility(0.836660026534076)),
(at_params.P(1), graph.Compatibility(0.7)),
(at_params.P(4), graph.Compatibility(0.6561)))
@unpack
def test_1_p_parameter_influence(self,
p: at_params.P,
expected_cutoff: graph.Compatibility):
nodes = [graph.Node(node_id=nid(0), base=b('T'), aligned_to=None),
graph.Node(node_id=nid(1), base=b('A'), aligned_to=None),
graph.Node(node_id=nid(2), base=b('G'), aligned_to=None),
graph.Node(node_id=nid(3), base=b('A'), aligned_to=None),
graph.Node(node_id=nid(4), base=b('C'), aligned_to=None),
graph.Node(node_id=nid(5), base=b('A'), aligned_to=None),
graph.Node(node_id=nid(6), base=b('C'), aligned_to=None),
graph.Node(node_id=nid(7), base=b('G'), aligned_to=None),
graph.Node(node_id=nid(8), base=b('T'), aligned_to=None),
graph.Node(node_id=nid(9), base=b('A'), aligned_to=None)
]
sequences = {
msa.SequenceID('seq0'):
graph.Sequence(msa.SequenceID('seq0'),
[graph.SeqPath([*map(nid, [10, 11, 12, 13, 14, 15, 16, 17, 18, 9])])],
graph.SequenceMetadata({})),
msa.SequenceID('seq1'):
graph.Sequence(msa.SequenceID('seq1'),
[graph.SeqPath([*map(nid, [10, 11, 12, 13, 14, 15, 16, 17, 8, 9])])],
graph.SequenceMetadata({})),
msa.SequenceID('seq2'):
graph.Sequence(msa.SequenceID('seq2'),
[graph.SeqPath([*map(nid, [10, 11, 12, 13, 14, 15, 16, 7, 8, 9])])],
graph.SequenceMetadata({})),
msa.SequenceID('seq3'):
graph.Sequence(msa.SequenceID('seq3'),
[graph.SeqPath([*map(nid, [10, 11, 12, 3, 4, 5, 6, 7, 8, 9])])],
graph.SequenceMetadata({})),
msa.SequenceID('seq4'):
graph.Sequence(msa.SequenceID('seq3'),
[graph.SeqPath([*map(nid, [10, 11, 2, 3, 4, 5, 6, 7, 8, 9])])],
graph.SequenceMetadata({}))
}
poagraph = graph.Poagraph(nodes, sequences)
consensus_path = graph.SeqPath([*map(nid, [10, 11, 12, 13, 14, 15, 16, 17, 18, 19])])
compatibilities = poagraph.get_compatibilities(poagraph.get_sequences_ids(),
consensus_path,
p)
actual_cutoff = at_builders._find_node_cutoff([c for c in compatibilities.values()], []).cutoff
self.assertAlmostEqual(expected_cutoff.value, actual_cutoff.value)
@data(
(0.5, [graph.Compatibility(0.5)]),
(0.7, [graph.Compatibility(0.5), graph.Compatibility(0.7)]),
(1, [graph.Compatibility(1), graph.Compatibility(0.45)]),
(0.9, [graph.Compatibility(0.9), graph.Compatibility(0.5)]),
(0.7, [*map(graph.Compatibility, [0.5, 0.7, 0.7])]),
(0.9, [*map(graph.Compatibility, [0.9, 0.5, 0.5])]),
(1, [*map(graph.Compatibility, [0.45, 1, 0.45, 0.45])]),
(.8, [*map(graph.Compatibility, [.3, .4, .8])]),
(0.91, [*map(graph.Compatibility, [0.31, 0.32, 0.91, 0.92, 0.93, 0.97])]),
(0.91, [*map(graph.Compatibility, [0.29, 0.3, 0.33, 0.91, 0.92, 0.93, 0.97])]),
(1, [*map(graph.Compatibility, [0.81, 0.75, 0.8, 0.81, 1])]),
(0.9, [*map(graph.Compatibility, [0.5, 0.9, 0.99])]),
(0.7, [*map(graph.Compatibility, [0.2, 0.85, 0.7, 0.8])]),
(0.99, [*map(graph.Compatibility, [0.99, 0.9, 0.99])]),
(0.99, [*map(graph.Compatibility, [0.99])]),
(.4, [*map(graph.Compatibility, [.3, .4, .5])]),
(.1, [*map(graph.Compatibility, [.1, .1, .1])])
)
@unpack
def test_2_find_cutoff_no_so_far_values(self,
expected_cutoff: float,
compatibilities: List[graph.Compatibility]):
actual_cutoff = at_builders._find_node_cutoff(compatibilities, []).cutoff
self.assertEqual(expected_cutoff, actual_cutoff.value)
def test_3_find_cutoff_no_compatibilities(self):
with self.assertRaises(ValueError) as err:
_ = at_builders._find_node_cutoff([], []).cutoff
self.assertEqual(str(err.exception), """Empty compatibilities list.
Cannot find cutoff.""")
@data(
(0.2, [0.2, 0.7, 0.8, 0.85], [0.1, 0.01, 0]),
(0.7, [0.7, 0.85, 0.7, 0.8], [0.1, 0.01, 0]),
(0.8, [0.7, 0.7, 0.85, 0.8], [0.85, 0.91, 1.0]),
(0.6, [0.3, 0.6, 0.61, 0.61], [0.99]),
(0.9, [0.2, 0.97, 0.98, 0.9], [0.99]),
(0.5, [0.2, 0.57, 0.58, 0.5], [0.55]),
(0.58, [0.2, 0.27, 0.58, 0.2], [0.55]),
(0.55, [0.2, 0.58, 0.27, 0.55], [0.55])
)
@unpack
def test_4_find_cutoff_with_so_far_values(self,
expected_cutoff,
compatibilities,
so_far_cutoffs):
compatibilities = [graph.Compatibility(c) for c in compatibilities]
so_far_cutoffs = [graph.Compatibility(c) for c in so_far_cutoffs]
actual_cutoff = at_builders._find_node_cutoff(compatibilities, so_far_cutoffs).cutoff
self.assertEqual(expected_cutoff, actual_cutoff.value)
if __name__ == '__main__':
unittest.main()
| true | true |
1c38a55c25a9e9c64f3f209e10c39739dc9d7a92 | 1,784 | py | Python | tests/test_roles.py | kugiyasan/discordBot | 647fbcaa8686e99774eddeb57359730196a4f65f | [
"MIT"
] | 3 | 2020-07-05T21:37:07.000Z | 2021-09-21T11:11:45.000Z | tests/test_roles.py | kugiyasan/discordBot | 647fbcaa8686e99774eddeb57359730196a4f65f | [
"MIT"
] | null | null | null | tests/test_roles.py | kugiyasan/discordBot | 647fbcaa8686e99774eddeb57359730196a4f65f | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
import discord.ext.test as dpytest
import pytest
from typing import List
# TODO addrole, delrole, manageaddrole, managedelrole
# addrole:
# when len(roles) == 0
# when len(roles) == 1
# when len(roles) > 10
class State:
http = None
async def create_random_roles(guild: discord.Guild) -> List[discord.Role]:
names = (
"Red",
"Green",
"Blue",
"Orange",
"Yellow",
"Purple",
"Owner",
"Member",
"A Random Role",
"I don't have ideas anymore",
)
roles = []
for i in range(len(names)):
data = {"id": i, "name": names[i]}
role = discord.Role(guild=guild, state=State(), data=data)
roles.append(role)
await dpytest.create_role_callback(guild, role)
return roles
# @pytest.mark.asyncio
# async def test_addrole(bot, prefix):
# await create_random_roles(bot.guilds[0])
# await dpytest.message(f"{prefix} addrole")
@pytest.mark.asyncio
async def test_manageaddrole(bot: commands.Bot, prefix: str):
guild: discord.Guild = bot.guilds[0]
channel: discord.TextChannel = guild.text_channels[0]
member: discord.Member = guild.members[0]
# roles = await create_random_roles(guild)
await create_random_roles(guild)
await dpytest.set_permission_overrides(guild.me, channel, manage_roles=True)
await channel.set_permissions(member, administrator=True)
message: discord.Message = await channel.send(f"{prefix} manageaddrole")
# dpytest.verify_embed(embed1, allow_text=True)
await dpytest.run_all_events()
await message.add_reaction("1️⃣")
assert message.reactions == [], "The message's reactions aren't deleted"
# assert len(member.roles) == 2
| 27.030303 | 80 | 0.662556 | import discord
from discord.ext import commands
import discord.ext.test as dpytest
import pytest
from typing import List
class State:
http = None
async def create_random_roles(guild: discord.Guild) -> List[discord.Role]:
names = (
"Red",
"Green",
"Blue",
"Orange",
"Yellow",
"Purple",
"Owner",
"Member",
"A Random Role",
"I don't have ideas anymore",
)
roles = []
for i in range(len(names)):
data = {"id": i, "name": names[i]}
role = discord.Role(guild=guild, state=State(), data=data)
roles.append(role)
await dpytest.create_role_callback(guild, role)
return roles
# @pytest.mark.asyncio
# async def test_addrole(bot, prefix):
# await create_random_roles(bot.guilds[0])
# await dpytest.message(f"{prefix} addrole")
@pytest.mark.asyncio
async def test_manageaddrole(bot: commands.Bot, prefix: str):
guild: discord.Guild = bot.guilds[0]
channel: discord.TextChannel = guild.text_channels[0]
member: discord.Member = guild.members[0]
# roles = await create_random_roles(guild)
await create_random_roles(guild)
await dpytest.set_permission_overrides(guild.me, channel, manage_roles=True)
await channel.set_permissions(member, administrator=True)
message: discord.Message = await channel.send(f"{prefix} manageaddrole")
# dpytest.verify_embed(embed1, allow_text=True)
await dpytest.run_all_events()
await message.add_reaction("1️⃣")
assert message.reactions == [], "The message's reactions aren't deleted"
# assert len(member.roles) == 2
| true | true |
1c38a57d15a68d2a9775967e932551dadd920d4f | 3,389 | py | Python | interlayer.py | AlExKrYt/Polyglot | 509a53ade5da7af07b1c33514cbd219c0b672fd5 | [
"MIT"
] | null | null | null | interlayer.py | AlExKrYt/Polyglot | 509a53ade5da7af07b1c33514cbd219c0b672fd5 | [
"MIT"
] | null | null | null | interlayer.py | AlExKrYt/Polyglot | 509a53ade5da7af07b1c33514cbd219c0b672fd5 | [
"MIT"
] | null | null | null | import json
import os
import sys
import traceback
from google.cloud import translate
import logger
json_key = ""
project_name = ""
translator: translate.TranslationServiceClient
lang_list = {}
class BadTrgLangException(Exception):
pass
class TooManyRequestException(Exception):
pass
class EqualLangsException(Exception):
pass
class BadSrcLangException(Exception):
pass
class TooLongMsg(Exception):
pass
class UnkTransException(Exception):
pass
def init_dialog_api(config):
keypath = input("Please, write path to your JSON Google API Key (optional, key.json as default): ")
if keypath == "":
keypath = "key.json"
config.set("Polyglot", "keypath", keypath)
return config
def api_init(config):
global project_name, json_key
try:
json_key = config["Polyglot"]["keypath"]
except KeyError:
raise
if not os.path.isfile(json_key):
logger.write_log("ERR: JSON file wasn't found! Bot will close!")
sys.exit(1)
try:
project_name = "projects/" + json.load(open(json_key, 'r')).get("project_id")
except Exception as e:
logger.write_log("ERR: Project name isn't readable from JSON! Bot will close!")
logger.write_log("ERR: " + str(e) + "\n" + traceback.format_exc())
sys.exit(1)
return config
def translate_init():
global translator
try:
translator = translate.TranslationServiceClient.from_service_account_json(json_key)
except Exception as e:
logger.write_log("ERR: Translator object wasn't created successful! Bot will close! "
"Please check your JSON key or Google Cloud settings.")
logger.write_log("ERR: " + str(e) + "\n" + traceback.format_exc())
sys.exit(1)
def extract_lang(text):
try:
return translator.detect_language(parent=project_name, content=text, timeout=10).languages[0].language_code
except Exception as e:
logger.write_log("ERR: " + str(e) + "\n" + traceback.format_exc())
raise UnkTransException
def list_of_langs():
global lang_list
lang_buffer = translator.get_supported_languages(parent=project_name, display_language_code="en", timeout=10)
for lang in lang_buffer.languages:
lang_list.update({lang.language_code: lang.display_name})
def get_translate(input_text: str, target_lang: str, distorting=False, src_lang=None):
try:
trans_result = translator.translate_text(parent=project_name, contents=[input_text],
target_language_code=target_lang, source_language_code=src_lang,
mime_type="text/plain", timeout=10).translations[0].translated_text
except Exception as e:
if str(e) in "400 Target language is invalid.":
raise BadTrgLangException
if str(e) in "400 Target language can't be equal to source language.":
raise EqualLangsException
if str(e) in "400 Source language is invalid.":
raise BadSrcLangException
else:
logger.write_log("ERR: " + str(e) + "\n" + traceback.format_exc())
raise UnkTransException
if len(trans_result) > 4096 and distorting is False:
logger.write_log("WARN: too long message for sending.")
raise TooLongMsg
return trans_result
| 28.965812 | 116 | 0.663913 | import json
import os
import sys
import traceback
from google.cloud import translate
import logger
json_key = ""
project_name = ""
translator: translate.TranslationServiceClient
lang_list = {}
class BadTrgLangException(Exception):
pass
class TooManyRequestException(Exception):
pass
class EqualLangsException(Exception):
pass
class BadSrcLangException(Exception):
pass
class TooLongMsg(Exception):
pass
class UnkTransException(Exception):
pass
def init_dialog_api(config):
keypath = input("Please, write path to your JSON Google API Key (optional, key.json as default): ")
if keypath == "":
keypath = "key.json"
config.set("Polyglot", "keypath", keypath)
return config
def api_init(config):
global project_name, json_key
try:
json_key = config["Polyglot"]["keypath"]
except KeyError:
raise
if not os.path.isfile(json_key):
logger.write_log("ERR: JSON file wasn't found! Bot will close!")
sys.exit(1)
try:
project_name = "projects/" + json.load(open(json_key, 'r')).get("project_id")
except Exception as e:
logger.write_log("ERR: Project name isn't readable from JSON! Bot will close!")
logger.write_log("ERR: " + str(e) + "\n" + traceback.format_exc())
sys.exit(1)
return config
def translate_init():
global translator
try:
translator = translate.TranslationServiceClient.from_service_account_json(json_key)
except Exception as e:
logger.write_log("ERR: Translator object wasn't created successful! Bot will close! "
"Please check your JSON key or Google Cloud settings.")
logger.write_log("ERR: " + str(e) + "\n" + traceback.format_exc())
sys.exit(1)
def extract_lang(text):
try:
return translator.detect_language(parent=project_name, content=text, timeout=10).languages[0].language_code
except Exception as e:
logger.write_log("ERR: " + str(e) + "\n" + traceback.format_exc())
raise UnkTransException
def list_of_langs():
global lang_list
lang_buffer = translator.get_supported_languages(parent=project_name, display_language_code="en", timeout=10)
for lang in lang_buffer.languages:
lang_list.update({lang.language_code: lang.display_name})
def get_translate(input_text: str, target_lang: str, distorting=False, src_lang=None):
try:
trans_result = translator.translate_text(parent=project_name, contents=[input_text],
target_language_code=target_lang, source_language_code=src_lang,
mime_type="text/plain", timeout=10).translations[0].translated_text
except Exception as e:
if str(e) in "400 Target language is invalid.":
raise BadTrgLangException
if str(e) in "400 Target language can't be equal to source language.":
raise EqualLangsException
if str(e) in "400 Source language is invalid.":
raise BadSrcLangException
else:
logger.write_log("ERR: " + str(e) + "\n" + traceback.format_exc())
raise UnkTransException
if len(trans_result) > 4096 and distorting is False:
logger.write_log("WARN: too long message for sending.")
raise TooLongMsg
return trans_result
| true | true |
1c38a5a2d01b93136809060cd6c7c78965f8875f | 427 | py | Python | vertica_python/vertica/messages/backend_messages/ready_for_query.py | jbfavre/vertica-python | c53ffc49a971e9a806679f95e8680847120f49e4 | [
"MIT"
] | 1 | 2016-10-01T20:28:31.000Z | 2016-10-01T20:28:31.000Z | vertica_python/vertica/messages/backend_messages/ready_for_query.py | jbfavre/vertica-python | c53ffc49a971e9a806679f95e8680847120f49e4 | [
"MIT"
] | null | null | null | vertica_python/vertica/messages/backend_messages/ready_for_query.py | jbfavre/vertica-python | c53ffc49a971e9a806679f95e8680847120f49e4 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from struct import unpack
from vertica_python.vertica.messages.message import BackendMessage
class ReadyForQuery(BackendMessage):
STATUSES = {
'I': 'no_transaction',
'T': 'in_transaction',
'E': 'failed_transaction'
}
def __init__(self, data):
self.transaction_status = self.STATUSES[unpack('c', data)[0]]
ReadyForQuery._message_id('Z')
| 20.333333 | 69 | 0.693208 | from __future__ import absolute_import
from struct import unpack
from vertica_python.vertica.messages.message import BackendMessage
class ReadyForQuery(BackendMessage):
STATUSES = {
'I': 'no_transaction',
'T': 'in_transaction',
'E': 'failed_transaction'
}
def __init__(self, data):
self.transaction_status = self.STATUSES[unpack('c', data)[0]]
ReadyForQuery._message_id('Z')
| true | true |
1c38a5f6bb31cfbfd3f0862ca82857e16497d1f9 | 795 | py | Python | app/core/admin.py | abidahmadq/recipe-app-api | 29bd29ade5bea6107745a8d620db6ce7a76bfc65 | [
"MIT"
] | null | null | null | app/core/admin.py | abidahmadq/recipe-app-api | 29bd29ade5bea6107745a8d620db6ce7a76bfc65 | [
"MIT"
] | null | null | null | app/core/admin.py | abidahmadq/recipe-app-api | 29bd29ade5bea6107745a8d620db6ce7a76bfc65 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from core import models
from django.utils.translation import gettext as _
# Register your models here.
class UserAdmin(BaseUserAdmin):
ordering = ['id']
list_display = ['email', 'name']
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Personal Info'), {'fields': ('name', )}),
(
_('Permissions'),
{'fields': ('is_active', 'is_staff', 'is_superuser')}
),
(_('Important dates'), {'fields': ('last_login', )})
)
add_fieldsets = (
(None, {
'classes': ('wide', ),
'fields': ('email', 'password1', 'password2')
}),
)
admin.site.register(models.User, UserAdmin) | 29.444444 | 65 | 0.568553 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from core import models
from django.utils.translation import gettext as _
class UserAdmin(BaseUserAdmin):
ordering = ['id']
list_display = ['email', 'name']
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Personal Info'), {'fields': ('name', )}),
(
_('Permissions'),
{'fields': ('is_active', 'is_staff', 'is_superuser')}
),
(_('Important dates'), {'fields': ('last_login', )})
)
add_fieldsets = (
(None, {
'classes': ('wide', ),
'fields': ('email', 'password1', 'password2')
}),
)
admin.site.register(models.User, UserAdmin) | true | true |
1c38a65740967a1e49c94a99e84549d3470de0b7 | 493 | py | Python | TwoPointers/Leetcode11.py | Rylie-W/LeetRecord | 623c4efe88b3af54b8a65f6ec23db850b8c6f46f | [
"Apache-2.0"
] | null | null | null | TwoPointers/Leetcode11.py | Rylie-W/LeetRecord | 623c4efe88b3af54b8a65f6ec23db850b8c6f46f | [
"Apache-2.0"
] | null | null | null | TwoPointers/Leetcode11.py | Rylie-W/LeetRecord | 623c4efe88b3af54b8a65f6ec23db850b8c6f46f | [
"Apache-2.0"
] | null | null | null | class Solution:
def maxArea(self, height) -> int:
left=0
right=len(height)-1
res=min(height[left],height[right])*(right-left)
while right>left:
res=max(res,(right-left)*min(height[right],height[left]))
if height[left]<height[right]:
left+=1
else: right-=1
return res
if __name__ == '__main__':
sol=Solution()
# height = [1, 1]
height=[1,3,2,5,25,24,5]
print(sol.maxArea(height))
| 25.947368 | 69 | 0.543611 | class Solution:
def maxArea(self, height) -> int:
left=0
right=len(height)-1
res=min(height[left],height[right])*(right-left)
while right>left:
res=max(res,(right-left)*min(height[right],height[left]))
if height[left]<height[right]:
left+=1
else: right-=1
return res
if __name__ == '__main__':
sol=Solution()
height=[1,3,2,5,25,24,5]
print(sol.maxArea(height))
| true | true |
1c38a73b8b293aebd84332a4186df1a089905b21 | 10,956 | py | Python | old_py2/controllers/ajax_controller.py | guineawheek/ftc-data-take-2 | 337bff2077eadb3bd6bbebd153cbb6181c99516f | [
"MIT"
] | 266 | 2015-01-04T00:10:48.000Z | 2022-03-28T18:42:05.000Z | old_py2/controllers/ajax_controller.py | guineawheek/ftc-data-take-2 | 337bff2077eadb3bd6bbebd153cbb6181c99516f | [
"MIT"
] | 2,673 | 2015-01-01T20:14:33.000Z | 2022-03-31T18:17:16.000Z | old_py2/controllers/ajax_controller.py | guineawheek/ftc-data-take-2 | 337bff2077eadb3bd6bbebd153cbb6181c99516f | [
"MIT"
] | 230 | 2015-01-04T00:10:48.000Z | 2022-03-26T18:12:04.000Z | import logging
import os
import json
import time
import datetime
from base_controller import CacheableHandler, LoggedInHandler
from consts.client_type import ClientType
from consts.playoff_type import PlayoffType
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from google.appengine.ext.webapp import template
from helpers.model_to_dict import ModelToDict
from helpers.mytba_helper import MyTBAHelper
from models.account import Account
from models.api_auth_access import ApiAuthAccess
from models.event import Event
from models.favorite import Favorite
from models.mobile_client import MobileClient
from models.sitevar import Sitevar
from models.typeahead_entry import TypeaheadEntry
class AccountInfoHandler(LoggedInHandler):
"""
For getting account info.
Only provides logged in status for now.
"""
def get(self):
self.response.headers['content-type'] = 'application/json; charset="utf-8"'
user = self.user_bundle.user
self.response.out.write(json.dumps({
'logged_in': True if user else False,
'user_id': user.user_id() if user else None
}))
class AccountRegisterFCMToken(LoggedInHandler):
"""
For adding/updating an FCM token
"""
def post(self):
if not self.user_bundle.user:
self.response.set_status(401)
return
user_id = self.user_bundle.user.user_id()
fcm_token = self.request.get('fcm_token')
uuid = self.request.get('uuid')
display_name = self.request.get('display_name')
client_type = ClientType.WEB
query = MobileClient.query(
MobileClient.user_id == user_id,
MobileClient.device_uuid == uuid,
MobileClient.client_type == client_type)
if query.count() == 0:
# Record doesn't exist yet, so add it
MobileClient(
parent=ndb.Key(Account, user_id),
user_id=user_id,
messaging_id=fcm_token,
client_type=client_type,
device_uuid=uuid,
display_name=display_name).put()
else:
# Record already exists, update it
client = query.fetch(1)[0]
client.messaging_id = fcm_token
client.display_name = display_name
client.put()
class AccountFavoritesHandler(LoggedInHandler):
"""
For getting an account's favorites
"""
def get(self, model_type):
if not self.user_bundle.user:
self.response.set_status(401)
return
favorites = Favorite.query(
Favorite.model_type==int(model_type),
ancestor=ndb.Key(Account, self.user_bundle.user.user_id())).fetch()
self.response.out.write(json.dumps([ModelToDict.favoriteConverter(fav) for fav in favorites]))
class AccountFavoritesAddHandler(LoggedInHandler):
"""
For adding an account's favorites
"""
def post(self):
if not self.user_bundle.user:
self.response.set_status(401)
return
model_type = int(self.request.get("model_type"))
model_key = self.request.get("model_key")
user_id = self.user_bundle.user.user_id()
fav = Favorite(
parent=ndb.Key(Account, user_id),
user_id=user_id,
model_key=model_key,
model_type=model_type
)
MyTBAHelper.add_favorite(fav)
class AccountFavoritesDeleteHandler(LoggedInHandler):
"""
For deleting an account's favorites
"""
def post(self):
if not self.user_bundle.user:
self.response.set_status(401)
return
model_key = self.request.get("model_key")
model_type = int(self.request.get("model_type"))
user_id = self.user_bundle.user.user_id()
MyTBAHelper.remove_favorite(user_id, model_key, model_type)
class LiveEventHandler(CacheableHandler):
"""
Returns the necessary details to render live components
Uses timestamp for aggressive caching
"""
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "live-event:{}:{}" # (event_key, timestamp)
CACHE_HEADER_LENGTH = 60 * 10
def __init__(self, *args, **kw):
super(LiveEventHandler, self).__init__(*args, **kw)
self._cache_expiration = self.CACHE_HEADER_LENGTH
def get(self, event_key, timestamp):
if int(timestamp) > time.time():
self.abort(404)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(event_key, timestamp)
super(LiveEventHandler, self).get(event_key, timestamp)
def _render(self, event_key, timestamp):
self.response.headers['content-type'] = 'application/json; charset="utf-8"'
event = Event.get_by_id(event_key)
matches = []
for match in event.matches:
matches.append({
'name': match.short_name,
'alliances': match.alliances,
'order': match.play_order,
'time_str': match.time_string,
})
event_dict = {
# 'rankings': event.rankings,
# 'matchstats': event.matchstats,
'matches': matches,
}
return json.dumps(event_dict)
class TypeaheadHandler(CacheableHandler):
"""
Currently just returns a list of all teams and events
Needs to be optimized at some point.
Tried a trie but the datastructure was too big to
fit into memcache efficiently
"""
CACHE_VERSION = 2
CACHE_KEY_FORMAT = "typeahead_entries:{}" # (search_key)
CACHE_HEADER_LENGTH = 60 * 60 * 24
def __init__(self, *args, **kw):
super(TypeaheadHandler, self).__init__(*args, **kw)
self._cache_expiration = self.CACHE_HEADER_LENGTH
def get(self, search_key):
import urllib2
search_key = urllib2.unquote(search_key)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(search_key)
super(TypeaheadHandler, self).get(search_key)
def _render(self, search_key):
self.response.headers['content-type'] = 'application/json; charset="utf-8"'
entry = TypeaheadEntry.get_by_id(search_key)
if entry is None:
return '[]'
else:
self._last_modified = entry.updated
return entry.data_json
class EventRemapTeamsHandler(CacheableHandler):
"""
Returns the current team remapping for an event
"""
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "remap_teams_{}" # (event_key)
CACHE_HEADER_LENGTH = 1
def __init__(self, *args, **kw):
super(EventRemapTeamsHandler, self).__init__(*args, **kw)
self._cache_expiration = 1
def get(self, event_key):
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(event_key)
super(EventRemapTeamsHandler, self).get(event_key)
def _render(self, event_key):
self.response.headers.add_header('content-type', 'application/json', charset='utf-8')
event = Event.get_by_id(event_key)
if not event:
return json.dumps(None)
return json.dumps(event.remap_teams)
class WebcastHandler(CacheableHandler):
"""
Returns the HTML necessary to generate the webcast embed for a given event
"""
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "webcast_{}_{}" # (event_key)
CACHE_HEADER_LENGTH = 60 * 5
def __init__(self, *args, **kw):
super(WebcastHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24
def get(self, event_key, webcast_number):
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(event_key, webcast_number)
super(WebcastHandler, self).get(event_key, webcast_number)
def _render(self, event_key, webcast_number):
self.response.headers.add_header('content-type', 'application/json', charset='utf-8')
output = {}
if not webcast_number.isdigit():
return json.dumps(output)
webcast_number = int(webcast_number) - 1
event = Event.get_by_id(event_key)
if event and event.webcast:
webcast = event.webcast[webcast_number]
if 'type' in webcast and 'channel' in webcast:
output['player'] = self._renderPlayer(webcast)
else:
special_webcasts_future = Sitevar.get_by_id_async('gameday.special_webcasts')
special_webcasts = special_webcasts_future.get_result()
if special_webcasts:
special_webcasts = special_webcasts.contents['webcasts']
else:
special_webcasts = []
special_webcasts_dict = {}
for webcast in special_webcasts:
special_webcasts_dict[webcast['key_name']] = webcast
if event_key in special_webcasts_dict:
webcast = special_webcasts_dict[event_key]
if 'type' in webcast and 'channel' in webcast:
output['player'] = self._renderPlayer(webcast)
return json.dumps(output)
def _renderPlayer(self, webcast):
webcast_type = webcast['type']
template_values = {'webcast': webcast}
path = os.path.join(os.path.dirname(__file__), '../templates/webcast/' + webcast_type + '.html')
return template.render(path, template_values)
def memcacheFlush(self, event_key):
keys = [self._render_cache_key(self.CACHE_KEY_FORMAT.format(event_key, n)) for n in range(10)]
memcache.delete_multi(keys)
return keys
class AllowedApiWriteEventsHandler(LoggedInHandler):
"""
Get the events the current user is allowed to edit via the trusted API
"""
def get(self):
if not self.user_bundle.user:
self.response.out.write(json.dumps([]))
return
now = datetime.datetime.now()
auth_tokens = ApiAuthAccess.query(ApiAuthAccess.owner == self.user_bundle.account.key,
ndb.OR(ApiAuthAccess.expiration == None, ApiAuthAccess.expiration >= now)).fetch()
event_keys = []
for token in auth_tokens:
event_keys.extend(token.event_list)
events = ndb.get_multi(event_keys)
details = []
for event in events:
details.append({'value': event.key_name, 'label': "{} {}".format(event.year, event.name)})
self.response.out.write(json.dumps(details))
class PlayoffTypeGetHandler(CacheableHandler):
"""
Returns the possible playoff types, formatted for EventWizard dropdown
"""
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "playoff_types"
CACHE_HEADER_LENGTH = 60 * 60 * 24
def get(self):
types = []
for type_enum, type_name in PlayoffType.type_names.iteritems():
types.append({'value': type_enum, 'label': type_name})
self.response.out.write(json.dumps(types))
| 33.710769 | 124 | 0.643574 | import logging
import os
import json
import time
import datetime
from base_controller import CacheableHandler, LoggedInHandler
from consts.client_type import ClientType
from consts.playoff_type import PlayoffType
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from google.appengine.ext.webapp import template
from helpers.model_to_dict import ModelToDict
from helpers.mytba_helper import MyTBAHelper
from models.account import Account
from models.api_auth_access import ApiAuthAccess
from models.event import Event
from models.favorite import Favorite
from models.mobile_client import MobileClient
from models.sitevar import Sitevar
from models.typeahead_entry import TypeaheadEntry
class AccountInfoHandler(LoggedInHandler):
def get(self):
self.response.headers['content-type'] = 'application/json; charset="utf-8"'
user = self.user_bundle.user
self.response.out.write(json.dumps({
'logged_in': True if user else False,
'user_id': user.user_id() if user else None
}))
class AccountRegisterFCMToken(LoggedInHandler):
def post(self):
if not self.user_bundle.user:
self.response.set_status(401)
return
user_id = self.user_bundle.user.user_id()
fcm_token = self.request.get('fcm_token')
uuid = self.request.get('uuid')
display_name = self.request.get('display_name')
client_type = ClientType.WEB
query = MobileClient.query(
MobileClient.user_id == user_id,
MobileClient.device_uuid == uuid,
MobileClient.client_type == client_type)
if query.count() == 0:
MobileClient(
parent=ndb.Key(Account, user_id),
user_id=user_id,
messaging_id=fcm_token,
client_type=client_type,
device_uuid=uuid,
display_name=display_name).put()
else:
# Record already exists, update it
client = query.fetch(1)[0]
client.messaging_id = fcm_token
client.display_name = display_name
client.put()
class AccountFavoritesHandler(LoggedInHandler):
def get(self, model_type):
if not self.user_bundle.user:
self.response.set_status(401)
return
favorites = Favorite.query(
Favorite.model_type==int(model_type),
ancestor=ndb.Key(Account, self.user_bundle.user.user_id())).fetch()
self.response.out.write(json.dumps([ModelToDict.favoriteConverter(fav) for fav in favorites]))
class AccountFavoritesAddHandler(LoggedInHandler):
def post(self):
if not self.user_bundle.user:
self.response.set_status(401)
return
model_type = int(self.request.get("model_type"))
model_key = self.request.get("model_key")
user_id = self.user_bundle.user.user_id()
fav = Favorite(
parent=ndb.Key(Account, user_id),
user_id=user_id,
model_key=model_key,
model_type=model_type
)
MyTBAHelper.add_favorite(fav)
class AccountFavoritesDeleteHandler(LoggedInHandler):
def post(self):
if not self.user_bundle.user:
self.response.set_status(401)
return
model_key = self.request.get("model_key")
model_type = int(self.request.get("model_type"))
user_id = self.user_bundle.user.user_id()
MyTBAHelper.remove_favorite(user_id, model_key, model_type)
class LiveEventHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "live-event:{}:{}" # (event_key, timestamp)
CACHE_HEADER_LENGTH = 60 * 10
def __init__(self, *args, **kw):
super(LiveEventHandler, self).__init__(*args, **kw)
self._cache_expiration = self.CACHE_HEADER_LENGTH
def get(self, event_key, timestamp):
if int(timestamp) > time.time():
self.abort(404)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(event_key, timestamp)
super(LiveEventHandler, self).get(event_key, timestamp)
def _render(self, event_key, timestamp):
self.response.headers['content-type'] = 'application/json; charset="utf-8"'
event = Event.get_by_id(event_key)
matches = []
for match in event.matches:
matches.append({
'name': match.short_name,
'alliances': match.alliances,
'order': match.play_order,
'time_str': match.time_string,
})
event_dict = {
# 'rankings': event.rankings,
# 'matchstats': event.matchstats,
'matches': matches,
}
return json.dumps(event_dict)
class TypeaheadHandler(CacheableHandler):
CACHE_VERSION = 2
CACHE_KEY_FORMAT = "typeahead_entries:{}" # (search_key)
CACHE_HEADER_LENGTH = 60 * 60 * 24
def __init__(self, *args, **kw):
super(TypeaheadHandler, self).__init__(*args, **kw)
self._cache_expiration = self.CACHE_HEADER_LENGTH
def get(self, search_key):
import urllib2
search_key = urllib2.unquote(search_key)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(search_key)
super(TypeaheadHandler, self).get(search_key)
def _render(self, search_key):
self.response.headers['content-type'] = 'application/json; charset="utf-8"'
entry = TypeaheadEntry.get_by_id(search_key)
if entry is None:
return '[]'
else:
self._last_modified = entry.updated
return entry.data_json
class EventRemapTeamsHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "remap_teams_{}" # (event_key)
CACHE_HEADER_LENGTH = 1
def __init__(self, *args, **kw):
super(EventRemapTeamsHandler, self).__init__(*args, **kw)
self._cache_expiration = 1
def get(self, event_key):
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(event_key)
super(EventRemapTeamsHandler, self).get(event_key)
def _render(self, event_key):
self.response.headers.add_header('content-type', 'application/json', charset='utf-8')
event = Event.get_by_id(event_key)
if not event:
return json.dumps(None)
return json.dumps(event.remap_teams)
class WebcastHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "webcast_{}_{}" # (event_key)
CACHE_HEADER_LENGTH = 60 * 5
def __init__(self, *args, **kw):
super(WebcastHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24
def get(self, event_key, webcast_number):
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(event_key, webcast_number)
super(WebcastHandler, self).get(event_key, webcast_number)
def _render(self, event_key, webcast_number):
self.response.headers.add_header('content-type', 'application/json', charset='utf-8')
output = {}
if not webcast_number.isdigit():
return json.dumps(output)
webcast_number = int(webcast_number) - 1
event = Event.get_by_id(event_key)
if event and event.webcast:
webcast = event.webcast[webcast_number]
if 'type' in webcast and 'channel' in webcast:
output['player'] = self._renderPlayer(webcast)
else:
special_webcasts_future = Sitevar.get_by_id_async('gameday.special_webcasts')
special_webcasts = special_webcasts_future.get_result()
if special_webcasts:
special_webcasts = special_webcasts.contents['webcasts']
else:
special_webcasts = []
special_webcasts_dict = {}
for webcast in special_webcasts:
special_webcasts_dict[webcast['key_name']] = webcast
if event_key in special_webcasts_dict:
webcast = special_webcasts_dict[event_key]
if 'type' in webcast and 'channel' in webcast:
output['player'] = self._renderPlayer(webcast)
return json.dumps(output)
def _renderPlayer(self, webcast):
webcast_type = webcast['type']
template_values = {'webcast': webcast}
path = os.path.join(os.path.dirname(__file__), '../templates/webcast/' + webcast_type + '.html')
return template.render(path, template_values)
def memcacheFlush(self, event_key):
keys = [self._render_cache_key(self.CACHE_KEY_FORMAT.format(event_key, n)) for n in range(10)]
memcache.delete_multi(keys)
return keys
class AllowedApiWriteEventsHandler(LoggedInHandler):
def get(self):
if not self.user_bundle.user:
self.response.out.write(json.dumps([]))
return
now = datetime.datetime.now()
auth_tokens = ApiAuthAccess.query(ApiAuthAccess.owner == self.user_bundle.account.key,
ndb.OR(ApiAuthAccess.expiration == None, ApiAuthAccess.expiration >= now)).fetch()
event_keys = []
for token in auth_tokens:
event_keys.extend(token.event_list)
events = ndb.get_multi(event_keys)
details = []
for event in events:
details.append({'value': event.key_name, 'label': "{} {}".format(event.year, event.name)})
self.response.out.write(json.dumps(details))
class PlayoffTypeGetHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "playoff_types"
CACHE_HEADER_LENGTH = 60 * 60 * 24
def get(self):
types = []
for type_enum, type_name in PlayoffType.type_names.iteritems():
types.append({'value': type_enum, 'label': type_name})
self.response.out.write(json.dumps(types))
| true | true |
1c38a7ddaaefad4ecbb5c078dc05360fb3aca639 | 3,004 | py | Python | protoc_oneof_listing_plugin.py | sunaga-lab/protobuf-oneof-listing-plugin | ccb926c39130435182e6f0593d7fe26edd28bbb1 | [
"MIT"
] | null | null | null | protoc_oneof_listing_plugin.py | sunaga-lab/protobuf-oneof-listing-plugin | ccb926c39130435182e6f0593d7fe26edd28bbb1 | [
"MIT"
] | null | null | null | protoc_oneof_listing_plugin.py | sunaga-lab/protobuf-oneof-listing-plugin | ccb926c39130435182e6f0593d7fe26edd28bbb1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys, os, os.path
from google.protobuf.compiler import plugin_pb2 as plugin
import itertools
from jinja2 import Environment, FileSystemLoader
import inflection
import glob
TEMPLATE_DIR_PARAM_KEY = "templates"
DEFAULT_TEMPLATES_PATH = "templates/*"
class OneofCase:
def __init__(self, package, type_name, value_name):
self.type_name_full = type_name
if package in type_name:
self.type_name = type_name.replace("."+package+".", "")
else:
self.type_name = type_name
self.value_name = value_name
class OneofInfo:
def __init__(self, package, msg_name, fieldname):
self.package = package
self.msg_name = msg_name
self.fieldname = fieldname
self.cases = []
def add_case(self, case):
self.cases.append(case)
def parse_file(file):
all_oneofs = list(itertools.chain(*[parse_message_type(file.package, msgtype) for msgtype in file.message_type]))
return all_oneofs
def parse_message_type(package, msg_type):
oneofs = [OneofInfo(package, msg_type.name, decl.name) for decl in msg_type.oneof_decl]
for field in msg_type.field:
if field.HasField("oneof_index"):
oneofs[field.oneof_index].add_case(
OneofCase(package, field.type_name, field.name)
)
return oneofs
def add_name_filters(env):
env.filters['camelize'] = inflection.camelize
env.filters['camelize_lower'] = lambda s: inflection.camelize(s, False)
env.filters['underscore'] = inflection.underscore
env.filters['underscore_upper'] = lambda s: inflection.underscore(s).upper()
env.filters['underscore_lower'] = lambda s: inflection.underscore(s).lower()
env.filters['upper'] = lambda s: s.upper()
env.filters['lower'] = lambda s: s.lower()
def main():
# Read request
data = sys.stdin.buffer.read()
request = plugin.CodeGeneratorRequest()
request.ParseFromString(data)
response = plugin.CodeGeneratorResponse()
oneofs = list(itertools.chain(*[parse_file(item) for item in request.proto_file]))
# Prepare rendering env
env = Environment(loader=FileSystemLoader(["."], encoding='utf8'))
add_name_filters(env)
if request.parameter.startswith(TEMPLATE_DIR_PARAM_KEY + "="):
templates = request.parameter[len(TEMPLATE_DIR_PARAM_KEY)+1:]
else:
templates = DEFAULT_TEMPLATES_PATH
template_target_glob = glob.glob(templates)
if len(template_target_glob) == 0:
response.error = "No such template: " + templates + " (from " + os.getcwd() + ")"
else:
for tfn in template_target_glob:
tmpl = env.get_template(tfn)
output = tmpl.render(oneofs=oneofs)
f: response.File = response.file.add()
f.name = os.path.basename(tfn).replace(".tpl", "")
f.content = output
output = response.SerializeToString()
sys.stdout.buffer.write(output)
if __name__ == '__main__':
main()
| 32.652174 | 117 | 0.675433 |
import sys, os, os.path
from google.protobuf.compiler import plugin_pb2 as plugin
import itertools
from jinja2 import Environment, FileSystemLoader
import inflection
import glob
TEMPLATE_DIR_PARAM_KEY = "templates"
DEFAULT_TEMPLATES_PATH = "templates/*"
class OneofCase:
def __init__(self, package, type_name, value_name):
self.type_name_full = type_name
if package in type_name:
self.type_name = type_name.replace("."+package+".", "")
else:
self.type_name = type_name
self.value_name = value_name
class OneofInfo:
def __init__(self, package, msg_name, fieldname):
self.package = package
self.msg_name = msg_name
self.fieldname = fieldname
self.cases = []
def add_case(self, case):
self.cases.append(case)
def parse_file(file):
all_oneofs = list(itertools.chain(*[parse_message_type(file.package, msgtype) for msgtype in file.message_type]))
return all_oneofs
def parse_message_type(package, msg_type):
oneofs = [OneofInfo(package, msg_type.name, decl.name) for decl in msg_type.oneof_decl]
for field in msg_type.field:
if field.HasField("oneof_index"):
oneofs[field.oneof_index].add_case(
OneofCase(package, field.type_name, field.name)
)
return oneofs
def add_name_filters(env):
env.filters['camelize'] = inflection.camelize
env.filters['camelize_lower'] = lambda s: inflection.camelize(s, False)
env.filters['underscore'] = inflection.underscore
env.filters['underscore_upper'] = lambda s: inflection.underscore(s).upper()
env.filters['underscore_lower'] = lambda s: inflection.underscore(s).lower()
env.filters['upper'] = lambda s: s.upper()
env.filters['lower'] = lambda s: s.lower()
def main():
data = sys.stdin.buffer.read()
request = plugin.CodeGeneratorRequest()
request.ParseFromString(data)
response = plugin.CodeGeneratorResponse()
oneofs = list(itertools.chain(*[parse_file(item) for item in request.proto_file]))
env = Environment(loader=FileSystemLoader(["."], encoding='utf8'))
add_name_filters(env)
if request.parameter.startswith(TEMPLATE_DIR_PARAM_KEY + "="):
templates = request.parameter[len(TEMPLATE_DIR_PARAM_KEY)+1:]
else:
templates = DEFAULT_TEMPLATES_PATH
template_target_glob = glob.glob(templates)
if len(template_target_glob) == 0:
response.error = "No such template: " + templates + " (from " + os.getcwd() + ")"
else:
for tfn in template_target_glob:
tmpl = env.get_template(tfn)
output = tmpl.render(oneofs=oneofs)
f: response.File = response.file.add()
f.name = os.path.basename(tfn).replace(".tpl", "")
f.content = output
output = response.SerializeToString()
sys.stdout.buffer.write(output)
if __name__ == '__main__':
main()
| true | true |
1c38aa00eda65281d3fb5a0dc4a4a97245553c9a | 11,436 | py | Python | src/openapi_client/model/id16_nullable.py | Nozbe/NTImporters | 12fa898efd41954b5c29bae383aba5ac56044e87 | [
"MIT"
] | 6 | 2022-01-03T07:07:18.000Z | 2022-03-21T08:13:49.000Z | src/openapi_client/model/id16_nullable.py | Nozbe/NTImporters | 12fa898efd41954b5c29bae383aba5ac56044e87 | [
"MIT"
] | 1 | 2022-02-02T13:01:05.000Z | 2022-02-10T12:01:17.000Z | src/openapi_client/model/id16_nullable.py | Nozbe/NTImporters | 12fa898efd41954b5c29bae383aba5ac56044e87 | [
"MIT"
] | 1 | 2022-02-10T11:53:55.000Z | 2022-02-10T11:53:55.000Z | """
Nozbe Teams API
Nozbe Teams API specification # noqa: E501
The version of the OpenAPI document: 0.0.1
Contact: support@nozbe.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from openapi_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from openapi_client.exceptions import ApiAttributeError
class Id16Nullable(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('value',): {
'max_length': 16,
'min_length': 16,
'regex': {
'pattern': r'', # noqa: E501
},
},
}
additional_properties_type = None
_nullable = True
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': (str,),
}
@cached_property
def discriminator():
return None
attribute_map = {}
read_only_vars = set()
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""Id16Nullable - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str): # noqa: E501
Keyword Args:
value (str): # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
"""Id16Nullable - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str): # noqa: E501
Keyword Args:
value (str): # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
self = super(OpenApiModel, cls).__new__(cls)
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
return self
| 39.84669 | 110 | 0.551591 |
import re
import sys
from openapi_client.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from openapi_client.exceptions import ApiAttributeError
class Id16Nullable(ModelSimple):
allowed_values = {
}
validations = {
('value',): {
'max_length': 16,
'min_length': 16,
'regex': {
'pattern': r'',
},
},
}
additional_properties_type = None
_nullable = True
@cached_property
def openapi_types():
return {
'value': (str,),
}
@cached_property
def discriminator():
return None
attribute_map = {}
read_only_vars = set()
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
self = super(OpenApiModel, cls).__new__(cls)
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
return self
| true | true |
1c38ac794000eeef8a8357e0403beb588408b24b | 2,842 | py | Python | services/main/src/devices.py | Para-chute/getaware | 469ccebca023d244152740e1d502ceb9db123194 | [
"MIT"
] | 1 | 2019-09-06T23:17:44.000Z | 2019-09-06T23:17:44.000Z | services/main/src/devices.py | Para-chute/getaware | 469ccebca023d244152740e1d502ceb9db123194 | [
"MIT"
] | 7 | 2020-09-07T04:37:32.000Z | 2022-02-26T17:28:15.000Z | services/main/src/devices.py | Para-chute/getaware | 469ccebca023d244152740e1d502ceb9db123194 | [
"MIT"
] | 1 | 2019-09-06T16:28:02.000Z | 2019-09-06T16:28:02.000Z | import cherrypy
import os
import json
import datetime
import influxdb
import src.schemas as schema
class Devices(object):
def __init__(self):
pass
exposed = True
@cherrypy.tools.json_out()
def GET(self, eventalias, intid, uuid):
try:
event = schema.Event.objects.get(alias=eventalias)
except Exception as e:
raise cherrypy.HTTPError(404, str(e))
try:
device = event.device_groups.get(
intid=intid).devices.get(uuid=uuid)
if device is not None:
return {
"status": 200,
"data": json.loads(device.to_json())
}
except Exception as e:
raise cherrypy.HTTPError(404, str(e))
return {
"status": 404,
"message": "Device Not found"
}
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
def POST(self, eventalias, intid):
try:
event = schema.Event.objects.get(alias=eventalias)
except Exception as e:
raise cherrypy.HTTPError(404, str(e))
device = schema.Device()
device.controller_type = cherrypy.request.json.get('controller_type')
notfound = True
for idx, groupidx in enumerate(event.device_groups):
if str(groupidx['intid']) == intid:
event.device_groups[idx].devices.append(device)
notfound = False
break
if notfound is True:
raise cherrypy.HTTPError(
404, 'Group not found')
try:
event.save()
except Exception as e:
raise cherrypy.HTTPError(400, str(e))
cherrypy.response.status = 200
return {
"status": 200,
"data": json.loads(event.to_json())
}
def DELETE(self, eventalias, intid, uuid):
try:
event = schema.Event.objects.get(alias=eventalias)
except Exception as e:
raise cherrypy.HTTPError(404, str(e))
for idx, groupidx in enumerate(event.device_groups):
if str(groupidx['intid']) == intid:
for idy, deviceidx in enumerate(event.device_groups[idx].devices):
if str(deviceidx['uuid']) == uuid:
try:
event.device_groups[idx].devices.pop(idy)
event.save()
return {
"status": 200,
"data": "Deleted"
}
except Exception as e:
raise cherrypy.HTTPError(500, str(e))
return {
"status": 400,
"message": "Couldn't detele Device"
}
| 29.298969 | 82 | 0.508445 | import cherrypy
import os
import json
import datetime
import influxdb
import src.schemas as schema
class Devices(object):
def __init__(self):
pass
exposed = True
@cherrypy.tools.json_out()
def GET(self, eventalias, intid, uuid):
try:
event = schema.Event.objects.get(alias=eventalias)
except Exception as e:
raise cherrypy.HTTPError(404, str(e))
try:
device = event.device_groups.get(
intid=intid).devices.get(uuid=uuid)
if device is not None:
return {
"status": 200,
"data": json.loads(device.to_json())
}
except Exception as e:
raise cherrypy.HTTPError(404, str(e))
return {
"status": 404,
"message": "Device Not found"
}
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
def POST(self, eventalias, intid):
try:
event = schema.Event.objects.get(alias=eventalias)
except Exception as e:
raise cherrypy.HTTPError(404, str(e))
device = schema.Device()
device.controller_type = cherrypy.request.json.get('controller_type')
notfound = True
for idx, groupidx in enumerate(event.device_groups):
if str(groupidx['intid']) == intid:
event.device_groups[idx].devices.append(device)
notfound = False
break
if notfound is True:
raise cherrypy.HTTPError(
404, 'Group not found')
try:
event.save()
except Exception as e:
raise cherrypy.HTTPError(400, str(e))
cherrypy.response.status = 200
return {
"status": 200,
"data": json.loads(event.to_json())
}
def DELETE(self, eventalias, intid, uuid):
try:
event = schema.Event.objects.get(alias=eventalias)
except Exception as e:
raise cherrypy.HTTPError(404, str(e))
for idx, groupidx in enumerate(event.device_groups):
if str(groupidx['intid']) == intid:
for idy, deviceidx in enumerate(event.device_groups[idx].devices):
if str(deviceidx['uuid']) == uuid:
try:
event.device_groups[idx].devices.pop(idy)
event.save()
return {
"status": 200,
"data": "Deleted"
}
except Exception as e:
raise cherrypy.HTTPError(500, str(e))
return {
"status": 400,
"message": "Couldn't detele Device"
}
| true | true |
1c38adb5b1c1296aac582774750f99d40b779ab0 | 741 | py | Python | ssddj/ssdfrontend/views.py | sachinkagarwal/saturnring | 8fc6d9e0525ea72536102a17b0b5febed63e22ae | [
"BSD-3-Clause"
] | 3 | 2016-01-24T14:44:06.000Z | 2017-10-10T15:26:15.000Z | ssddj/ssdfrontend/views.py | Acidburn0zzz/saturnring | 5dfb62480e6e8bbae1714274a58b07ac4303138f | [
"BSD-3-Clause"
] | 1 | 2015-04-16T17:27:24.000Z | 2015-04-16T17:27:24.000Z | ssddj/ssdfrontend/views.py | Acidburn0zzz/saturnring | 5dfb62480e6e8bbae1714274a58b07ac4303138f | [
"BSD-3-Clause"
] | 6 | 2015-04-17T19:06:15.000Z | 2019-02-15T19:08:34.000Z | #Copyright 2014 Blackberry Limited
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from django.shortcuts import render
from django.views.generic import ListView
from ssdfrontend.models import Target
class TargetList(ListView):
model = Target
| 35.285714 | 73 | 0.782726 |
from django.shortcuts import render
from django.views.generic import ListView
from ssdfrontend.models import Target
class TargetList(ListView):
model = Target
| true | true |
1c38ae29e28c04149ad2eb99228cb304e4228d61 | 12,547 | py | Python | sdk/python/pulumi_aws/cognito/identity_pool.py | sibuthomasmathew/pulumi-aws | 6351f2182eb6f693d4e09e4136c385adfa0ab674 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/cognito/identity_pool.py | sibuthomasmathew/pulumi-aws | 6351f2182eb6f693d4e09e4136c385adfa0ab674 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/cognito/identity_pool.py | sibuthomasmathew/pulumi-aws | 6351f2182eb6f693d4e09e4136c385adfa0ab674 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['IdentityPool']
class IdentityPool(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allow_unauthenticated_identities: Optional[pulumi.Input[bool]] = None,
cognito_identity_providers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IdentityPoolCognitoIdentityProviderArgs']]]]] = None,
developer_provider_name: Optional[pulumi.Input[str]] = None,
identity_pool_name: Optional[pulumi.Input[str]] = None,
openid_connect_provider_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
saml_provider_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
supported_login_providers: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides an AWS Cognito Identity Pool.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
default = aws.iam.SamlProvider("default", saml_metadata_document=(lambda path: open(path).read())("saml-metadata.xml"))
main = aws.cognito.IdentityPool("main",
identity_pool_name="identity pool",
allow_unauthenticated_identities=False,
cognito_identity_providers=[
aws.cognito.IdentityPoolCognitoIdentityProviderArgs(
client_id="6lhlkkfbfb4q5kpp90urffae",
provider_name="cognito-idp.us-east-1.amazonaws.com/us-east-1_Tv0493apJ",
server_side_token_check=False,
),
aws.cognito.IdentityPoolCognitoIdentityProviderArgs(
client_id="7kodkvfqfb4qfkp39eurffae",
provider_name="cognito-idp.us-east-1.amazonaws.com/eu-west-1_Zr231apJu",
server_side_token_check=False,
),
],
supported_login_providers={
"graph.facebook.com": "7346241598935552",
"accounts.google.com": "123456789012.apps.googleusercontent.com",
},
saml_provider_arns=[default.arn],
openid_connect_provider_arns=["arn:aws:iam::123456789012:oidc-provider/id.example.com"])
```
## Import
Cognito Identity Pool can be imported using the name, e.g.
```sh
$ pulumi import aws:cognito/identityPool:IdentityPool mypool <identity-pool-id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] allow_unauthenticated_identities: Whether the identity pool supports unauthenticated logins or not.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IdentityPoolCognitoIdentityProviderArgs']]]] cognito_identity_providers: An array of Amazon Cognito Identity user pools and their client IDs.
:param pulumi.Input[str] developer_provider_name: The "domain" by which Cognito will refer to your users. This name acts as a placeholder that allows your
backend and the Cognito service to communicate about the developer provider.
:param pulumi.Input[str] identity_pool_name: The Cognito Identity Pool name.
:param pulumi.Input[Sequence[pulumi.Input[str]]] openid_connect_provider_arns: Set of OpendID Connect provider ARNs.
:param pulumi.Input[Sequence[pulumi.Input[str]]] saml_provider_arns: An array of Amazon Resource Names (ARNs) of the SAML provider for your identity.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] supported_login_providers: Key-Value pairs mapping provider names to provider app IDs.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the Identity Pool.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['allow_unauthenticated_identities'] = allow_unauthenticated_identities
__props__['cognito_identity_providers'] = cognito_identity_providers
__props__['developer_provider_name'] = developer_provider_name
if identity_pool_name is None and not opts.urn:
raise TypeError("Missing required property 'identity_pool_name'")
__props__['identity_pool_name'] = identity_pool_name
__props__['openid_connect_provider_arns'] = openid_connect_provider_arns
__props__['saml_provider_arns'] = saml_provider_arns
__props__['supported_login_providers'] = supported_login_providers
__props__['tags'] = tags
__props__['arn'] = None
super(IdentityPool, __self__).__init__(
'aws:cognito/identityPool:IdentityPool',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
allow_unauthenticated_identities: Optional[pulumi.Input[bool]] = None,
arn: Optional[pulumi.Input[str]] = None,
cognito_identity_providers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IdentityPoolCognitoIdentityProviderArgs']]]]] = None,
developer_provider_name: Optional[pulumi.Input[str]] = None,
identity_pool_name: Optional[pulumi.Input[str]] = None,
openid_connect_provider_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
saml_provider_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
supported_login_providers: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'IdentityPool':
"""
Get an existing IdentityPool resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] allow_unauthenticated_identities: Whether the identity pool supports unauthenticated logins or not.
:param pulumi.Input[str] arn: The ARN of the identity pool.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IdentityPoolCognitoIdentityProviderArgs']]]] cognito_identity_providers: An array of Amazon Cognito Identity user pools and their client IDs.
:param pulumi.Input[str] developer_provider_name: The "domain" by which Cognito will refer to your users. This name acts as a placeholder that allows your
backend and the Cognito service to communicate about the developer provider.
:param pulumi.Input[str] identity_pool_name: The Cognito Identity Pool name.
:param pulumi.Input[Sequence[pulumi.Input[str]]] openid_connect_provider_arns: Set of OpendID Connect provider ARNs.
:param pulumi.Input[Sequence[pulumi.Input[str]]] saml_provider_arns: An array of Amazon Resource Names (ARNs) of the SAML provider for your identity.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] supported_login_providers: Key-Value pairs mapping provider names to provider app IDs.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the Identity Pool.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["allow_unauthenticated_identities"] = allow_unauthenticated_identities
__props__["arn"] = arn
__props__["cognito_identity_providers"] = cognito_identity_providers
__props__["developer_provider_name"] = developer_provider_name
__props__["identity_pool_name"] = identity_pool_name
__props__["openid_connect_provider_arns"] = openid_connect_provider_arns
__props__["saml_provider_arns"] = saml_provider_arns
__props__["supported_login_providers"] = supported_login_providers
__props__["tags"] = tags
return IdentityPool(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allowUnauthenticatedIdentities")
def allow_unauthenticated_identities(self) -> pulumi.Output[Optional[bool]]:
"""
Whether the identity pool supports unauthenticated logins or not.
"""
return pulumi.get(self, "allow_unauthenticated_identities")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The ARN of the identity pool.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="cognitoIdentityProviders")
def cognito_identity_providers(self) -> pulumi.Output[Optional[Sequence['outputs.IdentityPoolCognitoIdentityProvider']]]:
"""
An array of Amazon Cognito Identity user pools and their client IDs.
"""
return pulumi.get(self, "cognito_identity_providers")
@property
@pulumi.getter(name="developerProviderName")
def developer_provider_name(self) -> pulumi.Output[Optional[str]]:
"""
The "domain" by which Cognito will refer to your users. This name acts as a placeholder that allows your
backend and the Cognito service to communicate about the developer provider.
"""
return pulumi.get(self, "developer_provider_name")
@property
@pulumi.getter(name="identityPoolName")
def identity_pool_name(self) -> pulumi.Output[str]:
"""
The Cognito Identity Pool name.
"""
return pulumi.get(self, "identity_pool_name")
@property
@pulumi.getter(name="openidConnectProviderArns")
def openid_connect_provider_arns(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Set of OpendID Connect provider ARNs.
"""
return pulumi.get(self, "openid_connect_provider_arns")
@property
@pulumi.getter(name="samlProviderArns")
def saml_provider_arns(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
An array of Amazon Resource Names (ARNs) of the SAML provider for your identity.
"""
return pulumi.get(self, "saml_provider_arns")
@property
@pulumi.getter(name="supportedLoginProviders")
def supported_login_providers(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Key-Value pairs mapping provider names to provider app IDs.
"""
return pulumi.get(self, "supported_login_providers")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to the Identity Pool.
"""
return pulumi.get(self, "tags")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 51.633745 | 209 | 0.679047 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['IdentityPool']
class IdentityPool(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allow_unauthenticated_identities: Optional[pulumi.Input[bool]] = None,
cognito_identity_providers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IdentityPoolCognitoIdentityProviderArgs']]]]] = None,
developer_provider_name: Optional[pulumi.Input[str]] = None,
identity_pool_name: Optional[pulumi.Input[str]] = None,
openid_connect_provider_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
saml_provider_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
supported_login_providers: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['allow_unauthenticated_identities'] = allow_unauthenticated_identities
__props__['cognito_identity_providers'] = cognito_identity_providers
__props__['developer_provider_name'] = developer_provider_name
if identity_pool_name is None and not opts.urn:
raise TypeError("Missing required property 'identity_pool_name'")
__props__['identity_pool_name'] = identity_pool_name
__props__['openid_connect_provider_arns'] = openid_connect_provider_arns
__props__['saml_provider_arns'] = saml_provider_arns
__props__['supported_login_providers'] = supported_login_providers
__props__['tags'] = tags
__props__['arn'] = None
super(IdentityPool, __self__).__init__(
'aws:cognito/identityPool:IdentityPool',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
allow_unauthenticated_identities: Optional[pulumi.Input[bool]] = None,
arn: Optional[pulumi.Input[str]] = None,
cognito_identity_providers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IdentityPoolCognitoIdentityProviderArgs']]]]] = None,
developer_provider_name: Optional[pulumi.Input[str]] = None,
identity_pool_name: Optional[pulumi.Input[str]] = None,
openid_connect_provider_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
saml_provider_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
supported_login_providers: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'IdentityPool':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["allow_unauthenticated_identities"] = allow_unauthenticated_identities
__props__["arn"] = arn
__props__["cognito_identity_providers"] = cognito_identity_providers
__props__["developer_provider_name"] = developer_provider_name
__props__["identity_pool_name"] = identity_pool_name
__props__["openid_connect_provider_arns"] = openid_connect_provider_arns
__props__["saml_provider_arns"] = saml_provider_arns
__props__["supported_login_providers"] = supported_login_providers
__props__["tags"] = tags
return IdentityPool(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allowUnauthenticatedIdentities")
def allow_unauthenticated_identities(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "allow_unauthenticated_identities")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="cognitoIdentityProviders")
def cognito_identity_providers(self) -> pulumi.Output[Optional[Sequence['outputs.IdentityPoolCognitoIdentityProvider']]]:
return pulumi.get(self, "cognito_identity_providers")
@property
@pulumi.getter(name="developerProviderName")
def developer_provider_name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "developer_provider_name")
@property
@pulumi.getter(name="identityPoolName")
def identity_pool_name(self) -> pulumi.Output[str]:
return pulumi.get(self, "identity_pool_name")
@property
@pulumi.getter(name="openidConnectProviderArns")
def openid_connect_provider_arns(self) -> pulumi.Output[Optional[Sequence[str]]]:
return pulumi.get(self, "openid_connect_provider_arns")
@property
@pulumi.getter(name="samlProviderArns")
def saml_provider_arns(self) -> pulumi.Output[Optional[Sequence[str]]]:
return pulumi.get(self, "saml_provider_arns")
@property
@pulumi.getter(name="supportedLoginProviders")
def supported_login_providers(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
return pulumi.get(self, "supported_login_providers")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
return pulumi.get(self, "tags")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true | true |
1c38ae3a2b9ed3a97e68b3292d750d92637ef04c | 13,191 | py | Python | test/unit/models/test_proposals.py | swatchie-1/sentinel | 954f76e93ed1422d885a4cb5e46fb8aa3e525899 | [
"MIT"
] | null | null | null | test/unit/models/test_proposals.py | swatchie-1/sentinel | 954f76e93ed1422d885a4cb5e46fb8aa3e525899 | [
"MIT"
] | null | null | null | test/unit/models/test_proposals.py | swatchie-1/sentinel | 954f76e93ed1422d885a4cb5e46fb8aa3e525899 | [
"MIT"
] | 2 | 2019-05-20T16:37:32.000Z | 2021-12-04T15:44:47.000Z | import pytest
import sys
import os
import time
os.environ['SENTINEL_ENV'] = 'test'
os.environ['SENTINEL_CONFIG'] = os.path.normpath(os.path.join(os.path.dirname(__file__), '../../test_sentinel.conf'))
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../../../lib')))
import misc
import config
from models import GovernanceObject, Proposal, Vote
# clear DB tables before each execution
def setup():
# clear tables first
Vote.delete().execute()
Proposal.delete().execute()
GovernanceObject.delete().execute()
def teardown():
pass
# list of proposal govobjs to import for testing
@pytest.fixture
def go_list_proposals():
items = [
{u'AbsoluteYesCount': 1000,
u'AbstainCount': 7,
u'CollateralHash': u'acb67ec3f3566c9b94a26b70b36c1f74a010a37c0950c22d683cc50da324fdca',
u'DataHex': u'5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20323132323532303430302c20226e616d65223a20226465616e2d6d696c6c65722d35343933222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a2032352e37352c202273746172745f65706f6368223a20313437343236313038362c202274797065223a20312c202275726c223a2022687474703a2f2f6461736863656e7472616c2e6f72672f6465616e2d6d696c6c65722d35343933227d5d5d',
u'DataString': u'[["proposal", {"end_epoch": 2122520400, "name": "dean-miller-5493", "payment_address": "yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui", "payment_amount": 25.75, "start_epoch": 1474261086, "type": 1, "url": "http://hiluxcentral.org/dean-miller-5493"}]]',
u'Hash': u'dfd7d63979c0b62456b63d5fc5306dbec451180adee85876cbf5b28c69d1a86c',
u'IsValidReason': u'',
u'NoCount': 25,
u'YesCount': 1025,
u'fBlockchainValidity': True,
u'fCachedDelete': False,
u'fCachedEndorsed': False,
u'fCachedFunding': False,
u'fCachedValid': True},
{u'AbsoluteYesCount': 1000,
u'AbstainCount': 29,
u'CollateralHash': u'3efd23283aa98c2c33f80e4d9ed6f277d195b72547b6491f43280380f6aac810',
u'DataHex': u'5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20323132323532303430302c20226e616d65223a20226665726e616e64657a2d37363235222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a2032352e37352c202273746172745f65706f6368223a20313437343236313038362c202274797065223a20312c202275726c223a2022687474703a2f2f6461736863656e7472616c2e6f72672f6665726e616e64657a2d37363235227d5d5d',
u'DataString': u'[["proposal", {"end_epoch": 2122520400, "name": "fernandez-7625", "payment_address": "yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui", "payment_amount": 25.75, "start_epoch": 1474261086, "type": 1, "url": "http://hiluxcentral.org/fernandez-7625"}]]',
u'Hash': u'0523445762025b2e01a2cd34f1d10f4816cf26ee1796167e5b029901e5873630',
u'IsValidReason': u'',
u'NoCount': 56,
u'YesCount': 1056,
u'fBlockchainValidity': True,
u'fCachedDelete': False,
u'fCachedEndorsed': False,
u'fCachedFunding': False,
u'fCachedValid': True},
]
return items
# Proposal
@pytest.fixture
def proposal():
# NOTE: no governance_object_id is set
pobj = Proposal(
start_epoch=1483250400, # 2017-01-01
end_epoch=2122520400,
name="wine-n-cheeze-party",
url="https://hiluxcentral.com/wine-n-cheeze-party",
payment_address="yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui",
payment_amount=13
)
# NOTE: this object is (intentionally) not saved yet.
# We want to return an built, but unsaved, object
return pobj
def test_proposal_is_valid(proposal):
from hiluxd import HiluxDaemon
import hiluxlib
hiluxd = HiluxDaemon.from_hilux_conf(config.hilux_conf)
orig = Proposal(**proposal.get_dict()) # make a copy
# fixture as-is should be valid
assert proposal.is_valid() is True
# ============================================================
# ensure end_date not greater than start_date
# ============================================================
proposal.end_epoch = proposal.start_epoch
assert proposal.is_valid() is False
proposal.end_epoch = proposal.start_epoch - 1
assert proposal.is_valid() is False
proposal.end_epoch = proposal.start_epoch + 0
assert proposal.is_valid() is False
proposal.end_epoch = proposal.start_epoch + 1
assert proposal.is_valid() is True
# reset
proposal = Proposal(**orig.get_dict())
# ============================================================
# ensure valid proposal name
# ============================================================
proposal.name = ' heya!@209h '
assert proposal.is_valid() is False
proposal.name = "anything' OR 'x'='x"
assert proposal.is_valid() is False
proposal.name = ' '
assert proposal.is_valid() is False
proposal.name = ''
assert proposal.is_valid() is False
proposal.name = '0'
assert proposal.is_valid() is True
proposal.name = 'R66-Y'
assert proposal.is_valid() is True
# binary gibberish
proposal.name = hiluxlib.deserialise('22385c7530303933375c75303363375c75303232395c75303138635c75303064335c75303163345c75303264385c75303236615c75303134625c75303163335c75303063335c75303362385c75303266615c75303261355c75303266652f2b5c75303065395c75303164655c75303136655c75303338645c75303062385c75303138635c75303064625c75303064315c75303038325c75303133325c753032333222')
assert proposal.is_valid() is False
# reset
proposal = Proposal(**orig.get_dict())
# ============================================================
# ensure valid payment address
# ============================================================
proposal.payment_address = '7'
assert proposal.is_valid() is False
proposal.payment_address = 'YYE8KWYAUU5YSWSYMB3Q3RYX8XTUU9Y7UI'
assert proposal.is_valid() is False
proposal.payment_address = 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Uj'
assert proposal.is_valid() is False
proposal.payment_address = '221 B Baker St., London, United Kingdom'
assert proposal.is_valid() is False
# this is actually the Hilux foundation multisig address...
proposal.payment_address = '7gnwGHt17heGpG9Crfeh4KGpYNFugPhJdh'
assert proposal.is_valid() is False
proposal.payment_address = 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui'
assert proposal.is_valid() is True
# reset
proposal = Proposal(**orig.get_dict())
# validate URL
proposal.url = ' '
assert proposal.is_valid() is False
proposal.url = ' '
assert proposal.is_valid() is False
proposal.url = 'http://bit.ly/1e1EYJv'
assert proposal.is_valid() is True
proposal.url = 'https://example.com/resource.ext?param=1&other=2'
assert proposal.is_valid() is True
proposal.url = 'www.com'
assert proposal.is_valid() is True
proposal.url = 'v.ht/'
assert proposal.is_valid() is True
proposal.url = 'ipfs:///ipfs/QmPwwoytFU3gZYk5tSppumxaGbHymMUgHsSvrBdQH69XRx/'
assert proposal.is_valid() is True
proposal.url = '/ipfs/QmPwwoytFU3gZYk5tSppumxaGbHymMUgHsSvrBdQH69XRx/'
assert proposal.is_valid() is True
proposal.url = 's3://bucket/thing/anotherthing/file.pdf'
assert proposal.is_valid() is True
proposal.url = 'http://zqktlwi4fecvo6ri.onion/wiki/index.php/Main_Page'
assert proposal.is_valid() is True
proposal.url = 'ftp://ftp.funet.fi/pub/standards/RFC/rfc959.txt'
assert proposal.is_valid() is True
# gibberish URL
proposal.url = hiluxlib.deserialise('22687474703a2f2f5c75303330385c75303065665c75303362345c75303362315c75303266645c75303331345c625c75303134655c75303031615c75303139655c75303133365c75303264315c75303238655c75303364395c75303230665c75303363355c75303030345c75303336665c75303238355c75303165375c75303063635c75303139305c75303262615c75303239316a5c75303130375c75303362365c7530306562645c75303133335c75303335665c7530326562715c75303038655c75303332645c75303362645c75303064665c75303135654f365c75303237335c75303363645c7530333539275c75303165345c75303339615c75303365385c75303334345c75303130615c75303265662e5c75303231625c75303164356a5c75303232345c75303163645c75303336365c75303064625c75303339665c75303230305c75303337615c75303138395c75303263325c75303038345c75303066615c75303031335c75303233655c75303135345c75303165395c75303139635c75303239375c75303039355c75303038345c75303362305c7530306233435c75303135345c75303063665c75303163345c75303261335c75303362655c75303136305c75303139365c75303263665c75303131305c7530313031475c75303162645c75303338645c75303363325c75303138625c75303235625c75303266325c75303264635c75303139335c75303066665c75303066645c75303133625c75303234305c75303137615c75303062355c75303031645c75303238655c75303166315c75303232315c75303161615c75303265325c75303335625c75303333665c75303239345c75303335315c75303038345c75303339395c75303262385c75303132375c75303330357a5c75303263625c75303066305c75303062355c75303164335c75303338385c75303364385c75303130625c75303266325c75303137305c75303335315c75303030305c75303136385c75303039646d5c75303331315c75303236615c75303330375c75303332635c75303361635c665c75303363335c75303264365c75303238645c75303136395c7530323438635c75303163385c75303261355c75303164615c75303165375c75303337355c75303332645c7530333165755c75303131665c75303338375c75303135325c75303065325c75303135326c5c75303164325c75303164615c75303136645c75303061665c75303333375c75303264375c75303339375c75303139395c75303134635c75303165385c75303234315c75303336635c75303130645c75303230635c75303161615c75303339355c75303133315c75303064615c75303165615c75303336645c75303064325c75303337365c75303363315c75303132645c75303266305c75303064364f255c75303263635c75303162645c75303062385c75303238365c75303136395c75303337335c75303232335c75303336655c75303037665c75303062616b5c75303132365c75303233305c75303330645c75303362385c75303164355c75303166615c75303338395c75303062635c75303135325c75303334365c75303139645c75303135615c75303031395c75303061385c75303133615c75303338635c75303339625c75303261655c75303065395c75303362635c75303166385c75303031665c75303230615c75303263355c75303134335c75303361635c75303334355c75303236645c75303139365c75303362665c75303135615c75303137305c75303165395c75303231395c75303332665c75303232645c75303030365c75303066305c75303134665c75303337375c75303234325d5c75303164325c75303337655c75303265665c75303331395c75303261355c75303265385c75303338395c75303235645c75303334315c75303338395c7530323230585c75303062645c75303166365c75303238645c75303231375c75303066665c75303130385c75303331305c75303330335c75303031395c75303039635c75303363315c75303039615c75303334355c75303331305c75303162335c75303263315c75303132395c75303234335c75303038627c5c75303361335c75303261635c75303165655c75303030305c75303237615c75303038385c75303066355c75303232375c75303236635c75303236355c7530336336205c75303038615c7530333561787c735c75303336305c75303362655c75303235385c75303334345c75303264365c75303262355c75303361315c75303135345c75303131625c75303061625c75303038615c75303332655c75303238325c75303031393d5c75303263335c75303332655c75303163645c75303139305c75303231305c75303131365c75303334305c75303234665c75303162635c75303333645c75303135305c75303132335c75303233645c75303133345c75303062327a5c75303331635c75303136312a5c753032316522')
assert proposal.is_valid() is False
# reset
proposal = Proposal(**orig.get_dict())
# ============================================================
# ensure proposal can't request negative hilux
# ============================================================
proposal.payment_amount = -1
assert proposal.is_valid() is False
def test_proposal_is_expired(proposal):
cycle = 24 # testnet
now = misc.now()
proposal.start_epoch = now - (86400 * 2) # two days ago
proposal.end_epoch = now - (60 * 60) # expired one hour ago
assert proposal.is_expired(superblockcycle=cycle) is False
# fudge factor + a 24-block cycle == an expiry window of 9086, so...
proposal.end_epoch = now - 9085
assert proposal.is_expired(superblockcycle=cycle) is False
proposal.end_epoch = now - 9087
assert proposal.is_expired(superblockcycle=cycle) is True
def test_proposal_is_deletable(proposal):
now = misc.now()
assert proposal.is_deletable() is False
proposal.end_epoch = now - (86400 * 29)
assert proposal.is_deletable() is False
# add a couple seconds for time variance
proposal.end_epoch = now - ((86400 * 30) + 2)
assert proposal.is_deletable() is True
# deterministic ordering
def test_approved_and_ranked(go_list_proposals):
from hiluxd import HiluxDaemon
hiluxd = HiluxDaemon.from_hilux_conf(config.hilux_conf)
for item in go_list_proposals:
(go, subobj) = GovernanceObject.import_gobject_from_hiluxd(hiluxd, item)
prop_list = Proposal.approved_and_ranked(proposal_quorum=1, next_superblock_max_budget=60)
assert prop_list[0].object_hash == u'dfd7d63979c0b62456b63d5fc5306dbec451180adee85876cbf5b28c69d1a86c'
assert prop_list[1].object_hash == u'0523445762025b2e01a2cd34f1d10f4816cf26ee1796167e5b029901e5873630'
| 52.764 | 3,629 | 0.779547 | import pytest
import sys
import os
import time
os.environ['SENTINEL_ENV'] = 'test'
os.environ['SENTINEL_CONFIG'] = os.path.normpath(os.path.join(os.path.dirname(__file__), '../../test_sentinel.conf'))
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../../../lib')))
import misc
import config
from models import GovernanceObject, Proposal, Vote
def setup():
Vote.delete().execute()
Proposal.delete().execute()
GovernanceObject.delete().execute()
def teardown():
pass
@pytest.fixture
def go_list_proposals():
items = [
{u'AbsoluteYesCount': 1000,
u'AbstainCount': 7,
u'CollateralHash': u'acb67ec3f3566c9b94a26b70b36c1f74a010a37c0950c22d683cc50da324fdca',
u'DataHex': u'5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20323132323532303430302c20226e616d65223a20226465616e2d6d696c6c65722d35343933222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a2032352e37352c202273746172745f65706f6368223a20313437343236313038362c202274797065223a20312c202275726c223a2022687474703a2f2f6461736863656e7472616c2e6f72672f6465616e2d6d696c6c65722d35343933227d5d5d',
u'DataString': u'[["proposal", {"end_epoch": 2122520400, "name": "dean-miller-5493", "payment_address": "yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui", "payment_amount": 25.75, "start_epoch": 1474261086, "type": 1, "url": "http://hiluxcentral.org/dean-miller-5493"}]]',
u'Hash': u'dfd7d63979c0b62456b63d5fc5306dbec451180adee85876cbf5b28c69d1a86c',
u'IsValidReason': u'',
u'NoCount': 25,
u'YesCount': 1025,
u'fBlockchainValidity': True,
u'fCachedDelete': False,
u'fCachedEndorsed': False,
u'fCachedFunding': False,
u'fCachedValid': True},
{u'AbsoluteYesCount': 1000,
u'AbstainCount': 29,
u'CollateralHash': u'3efd23283aa98c2c33f80e4d9ed6f277d195b72547b6491f43280380f6aac810',
u'DataHex': u'5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20323132323532303430302c20226e616d65223a20226665726e616e64657a2d37363235222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a2032352e37352c202273746172745f65706f6368223a20313437343236313038362c202274797065223a20312c202275726c223a2022687474703a2f2f6461736863656e7472616c2e6f72672f6665726e616e64657a2d37363235227d5d5d',
u'DataString': u'[["proposal", {"end_epoch": 2122520400, "name": "fernandez-7625", "payment_address": "yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui", "payment_amount": 25.75, "start_epoch": 1474261086, "type": 1, "url": "http://hiluxcentral.org/fernandez-7625"}]]',
u'Hash': u'0523445762025b2e01a2cd34f1d10f4816cf26ee1796167e5b029901e5873630',
u'IsValidReason': u'',
u'NoCount': 56,
u'YesCount': 1056,
u'fBlockchainValidity': True,
u'fCachedDelete': False,
u'fCachedEndorsed': False,
u'fCachedFunding': False,
u'fCachedValid': True},
]
return items
@pytest.fixture
def proposal():
pobj = Proposal(
start_epoch=1483250400,
end_epoch=2122520400,
name="wine-n-cheeze-party",
url="https://hiluxcentral.com/wine-n-cheeze-party",
payment_address="yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui",
payment_amount=13
)
return pobj
def test_proposal_is_valid(proposal):
from hiluxd import HiluxDaemon
import hiluxlib
hiluxd = HiluxDaemon.from_hilux_conf(config.hilux_conf)
orig = Proposal(**proposal.get_dict())
assert proposal.is_valid() is True
proposal.end_epoch = proposal.start_epoch
assert proposal.is_valid() is False
proposal.end_epoch = proposal.start_epoch - 1
assert proposal.is_valid() is False
proposal.end_epoch = proposal.start_epoch + 0
assert proposal.is_valid() is False
proposal.end_epoch = proposal.start_epoch + 1
assert proposal.is_valid() is True
proposal = Proposal(**orig.get_dict())
proposal.name = ' heya!@209h '
assert proposal.is_valid() is False
proposal.name = "anything' OR 'x'='x"
assert proposal.is_valid() is False
proposal.name = ' '
assert proposal.is_valid() is False
proposal.name = ''
assert proposal.is_valid() is False
proposal.name = '0'
assert proposal.is_valid() is True
proposal.name = 'R66-Y'
assert proposal.is_valid() is True
proposal.name = hiluxlib.deserialise('22385c7530303933375c75303363375c75303232395c75303138635c75303064335c75303163345c75303264385c75303236615c75303134625c75303163335c75303063335c75303362385c75303266615c75303261355c75303266652f2b5c75303065395c75303164655c75303136655c75303338645c75303062385c75303138635c75303064625c75303064315c75303038325c75303133325c753032333222')
assert proposal.is_valid() is False
proposal = Proposal(**orig.get_dict())
proposal.payment_address = '7'
assert proposal.is_valid() is False
proposal.payment_address = 'YYE8KWYAUU5YSWSYMB3Q3RYX8XTUU9Y7UI'
assert proposal.is_valid() is False
proposal.payment_address = 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Uj'
assert proposal.is_valid() is False
proposal.payment_address = '221 B Baker St., London, United Kingdom'
assert proposal.is_valid() is False
proposal.payment_address = '7gnwGHt17heGpG9Crfeh4KGpYNFugPhJdh'
assert proposal.is_valid() is False
proposal.payment_address = 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui'
assert proposal.is_valid() is True
proposal = Proposal(**orig.get_dict())
proposal.url = ' '
assert proposal.is_valid() is False
proposal.url = ' '
assert proposal.is_valid() is False
proposal.url = 'http://bit.ly/1e1EYJv'
assert proposal.is_valid() is True
proposal.url = 'https://example.com/resource.ext?param=1&other=2'
assert proposal.is_valid() is True
proposal.url = 'www.com'
assert proposal.is_valid() is True
proposal.url = 'v.ht/'
assert proposal.is_valid() is True
proposal.url = 'ipfs:///ipfs/QmPwwoytFU3gZYk5tSppumxaGbHymMUgHsSvrBdQH69XRx/'
assert proposal.is_valid() is True
proposal.url = '/ipfs/QmPwwoytFU3gZYk5tSppumxaGbHymMUgHsSvrBdQH69XRx/'
assert proposal.is_valid() is True
proposal.url = 's3://bucket/thing/anotherthing/file.pdf'
assert proposal.is_valid() is True
proposal.url = 'http://zqktlwi4fecvo6ri.onion/wiki/index.php/Main_Page'
assert proposal.is_valid() is True
proposal.url = 'ftp://ftp.funet.fi/pub/standards/RFC/rfc959.txt'
assert proposal.is_valid() is True
proposal.url = hiluxlib.deserialise('22687474703a2f2f5c75303330385c75303065665c75303362345c75303362315c75303266645c75303331345c625c75303134655c75303031615c75303139655c75303133365c75303264315c75303238655c75303364395c75303230665c75303363355c75303030345c75303336665c75303238355c75303165375c75303063635c75303139305c75303262615c75303239316a5c75303130375c75303362365c7530306562645c75303133335c75303335665c7530326562715c75303038655c75303332645c75303362645c75303064665c75303135654f365c75303237335c75303363645c7530333539275c75303165345c75303339615c75303365385c75303334345c75303130615c75303265662e5c75303231625c75303164356a5c75303232345c75303163645c75303336365c75303064625c75303339665c75303230305c75303337615c75303138395c75303263325c75303038345c75303066615c75303031335c75303233655c75303135345c75303165395c75303139635c75303239375c75303039355c75303038345c75303362305c7530306233435c75303135345c75303063665c75303163345c75303261335c75303362655c75303136305c75303139365c75303263665c75303131305c7530313031475c75303162645c75303338645c75303363325c75303138625c75303235625c75303266325c75303264635c75303139335c75303066665c75303066645c75303133625c75303234305c75303137615c75303062355c75303031645c75303238655c75303166315c75303232315c75303161615c75303265325c75303335625c75303333665c75303239345c75303335315c75303038345c75303339395c75303262385c75303132375c75303330357a5c75303263625c75303066305c75303062355c75303164335c75303338385c75303364385c75303130625c75303266325c75303137305c75303335315c75303030305c75303136385c75303039646d5c75303331315c75303236615c75303330375c75303332635c75303361635c665c75303363335c75303264365c75303238645c75303136395c7530323438635c75303163385c75303261355c75303164615c75303165375c75303337355c75303332645c7530333165755c75303131665c75303338375c75303135325c75303065325c75303135326c5c75303164325c75303164615c75303136645c75303061665c75303333375c75303264375c75303339375c75303139395c75303134635c75303165385c75303234315c75303336635c75303130645c75303230635c75303161615c75303339355c75303133315c75303064615c75303165615c75303336645c75303064325c75303337365c75303363315c75303132645c75303266305c75303064364f255c75303263635c75303162645c75303062385c75303238365c75303136395c75303337335c75303232335c75303336655c75303037665c75303062616b5c75303132365c75303233305c75303330645c75303362385c75303164355c75303166615c75303338395c75303062635c75303135325c75303334365c75303139645c75303135615c75303031395c75303061385c75303133615c75303338635c75303339625c75303261655c75303065395c75303362635c75303166385c75303031665c75303230615c75303263355c75303134335c75303361635c75303334355c75303236645c75303139365c75303362665c75303135615c75303137305c75303165395c75303231395c75303332665c75303232645c75303030365c75303066305c75303134665c75303337375c75303234325d5c75303164325c75303337655c75303265665c75303331395c75303261355c75303265385c75303338395c75303235645c75303334315c75303338395c7530323230585c75303062645c75303166365c75303238645c75303231375c75303066665c75303130385c75303331305c75303330335c75303031395c75303039635c75303363315c75303039615c75303334355c75303331305c75303162335c75303263315c75303132395c75303234335c75303038627c5c75303361335c75303261635c75303165655c75303030305c75303237615c75303038385c75303066355c75303232375c75303236635c75303236355c7530336336205c75303038615c7530333561787c735c75303336305c75303362655c75303235385c75303334345c75303264365c75303262355c75303361315c75303135345c75303131625c75303061625c75303038615c75303332655c75303238325c75303031393d5c75303263335c75303332655c75303163645c75303139305c75303231305c75303131365c75303334305c75303234665c75303162635c75303333645c75303135305c75303132335c75303233645c75303133345c75303062327a5c75303331635c75303136312a5c753032316522')
assert proposal.is_valid() is False
proposal = Proposal(**orig.get_dict())
# ============================================================
proposal.payment_amount = -1
assert proposal.is_valid() is False
def test_proposal_is_expired(proposal):
cycle = 24 # testnet
now = misc.now()
proposal.start_epoch = now - (86400 * 2) # two days ago
proposal.end_epoch = now - (60 * 60) # expired one hour ago
assert proposal.is_expired(superblockcycle=cycle) is False
# fudge factor + a 24-block cycle == an expiry window of 9086, so...
proposal.end_epoch = now - 9085
assert proposal.is_expired(superblockcycle=cycle) is False
proposal.end_epoch = now - 9087
assert proposal.is_expired(superblockcycle=cycle) is True
def test_proposal_is_deletable(proposal):
now = misc.now()
assert proposal.is_deletable() is False
proposal.end_epoch = now - (86400 * 29)
assert proposal.is_deletable() is False
# add a couple seconds for time variance
proposal.end_epoch = now - ((86400 * 30) + 2)
assert proposal.is_deletable() is True
# deterministic ordering
def test_approved_and_ranked(go_list_proposals):
from hiluxd import HiluxDaemon
hiluxd = HiluxDaemon.from_hilux_conf(config.hilux_conf)
for item in go_list_proposals:
(go, subobj) = GovernanceObject.import_gobject_from_hiluxd(hiluxd, item)
prop_list = Proposal.approved_and_ranked(proposal_quorum=1, next_superblock_max_budget=60)
assert prop_list[0].object_hash == u'dfd7d63979c0b62456b63d5fc5306dbec451180adee85876cbf5b28c69d1a86c'
assert prop_list[1].object_hash == u'0523445762025b2e01a2cd34f1d10f4816cf26ee1796167e5b029901e5873630'
| true | true |
1c38ae6d57c9f55f638d14d8f5d57647b4c2ce3d | 9,786 | py | Python | example/ssd/symbol/inceptionv3.py | axbaretto/mxnet | 5f593885356ff6d14f5519fa18e79b944beb51cd | [
"Apache-2.0"
] | 866 | 2016-10-07T16:05:13.000Z | 2022-01-19T08:30:31.000Z | smd_hpi/examples/binary-imagenet1k/symbols/inception-v3.py | yanghaojin/BMXNet | 102f8d0ed59529bbd162c37bf07ae58ad6c4caa1 | [
"Apache-2.0"
] | 237 | 2016-10-06T21:19:45.000Z | 2021-07-20T03:52:45.000Z | smd_hpi/examples/binary-imagenet1k/symbols/inception-v3.py | yanghaojin/BMXNet | 102f8d0ed59529bbd162c37bf07ae58ad6c4caa1 | [
"Apache-2.0"
] | 431 | 2016-10-19T10:08:07.000Z | 2021-10-03T00:43:33.000Z | """
Inception V3, suitable for images with around 299 x 299
Reference:
Szegedy, Christian, et al. "Rethinking the Inception Architecture for Computer Vision." arXiv preprint arXiv:1512.00567 (2015).
"""
import mxnet as mx
def Conv(data, num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=None, suffix=''):
conv = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, no_bias=True, name='%s%s_conv2d' %(name, suffix))
bn = mx.sym.BatchNorm(data=conv, name='%s%s_batchnorm' %(name, suffix), fix_gamma=True)
act = mx.sym.Activation(data=bn, act_type='relu', name='%s%s_relu' %(name, suffix))
return act
def Inception7A(data,
num_1x1,
num_3x3_red, num_3x3_1, num_3x3_2,
num_5x5_red, num_5x5,
pool, proj,
name):
tower_1x1 = Conv(data, num_1x1, name=('%s_conv' % name))
tower_5x5 = Conv(data, num_5x5_red, name=('%s_tower' % name), suffix='_conv')
tower_5x5 = Conv(tower_5x5, num_5x5, kernel=(5, 5), pad=(2, 2), name=('%s_tower' % name), suffix='_conv_1')
tower_3x3 = Conv(data, num_3x3_red, name=('%s_tower_1' % name), suffix='_conv')
tower_3x3 = Conv(tower_3x3, num_3x3_1, kernel=(3, 3), pad=(1, 1), name=('%s_tower_1' % name), suffix='_conv_1')
tower_3x3 = Conv(tower_3x3, num_3x3_2, kernel=(3, 3), pad=(1, 1), name=('%s_tower_1' % name), suffix='_conv_2')
pooling = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(1, 1), pad=(1, 1), pool_type=pool, name=('%s_pool_%s_pool' % (pool, name)))
cproj = Conv(pooling, proj, name=('%s_tower_2' % name), suffix='_conv')
concat = mx.sym.Concat(*[tower_1x1, tower_5x5, tower_3x3, cproj], name='ch_concat_%s_chconcat' % name)
return concat
# First Downsample
def Inception7B(data,
num_3x3,
num_d3x3_red, num_d3x3_1, num_d3x3_2,
pool,
name):
tower_3x3 = Conv(data, num_3x3, kernel=(3, 3), pad=(0, 0), stride=(2, 2), name=('%s_conv' % name))
tower_d3x3 = Conv(data, num_d3x3_red, name=('%s_tower' % name), suffix='_conv')
tower_d3x3 = Conv(tower_d3x3, num_d3x3_1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name=('%s_tower' % name), suffix='_conv_1')
tower_d3x3 = Conv(tower_d3x3, num_d3x3_2, kernel=(3, 3), pad=(0, 0), stride=(2, 2), name=('%s_tower' % name), suffix='_conv_2')
pooling = mx.symbol.Pooling(data=data, kernel=(3, 3), stride=(2, 2), pad=(0,0), pool_type="max", name=('max_pool_%s_pool' % name))
concat = mx.sym.Concat(*[tower_3x3, tower_d3x3, pooling], name='ch_concat_%s_chconcat' % name)
return concat
def Inception7C(data,
num_1x1,
num_d7_red, num_d7_1, num_d7_2,
num_q7_red, num_q7_1, num_q7_2, num_q7_3, num_q7_4,
pool, proj,
name):
tower_1x1 = Conv(data=data, num_filter=num_1x1, kernel=(1, 1), name=('%s_conv' % name))
tower_d7 = Conv(data=data, num_filter=num_d7_red, name=('%s_tower' % name), suffix='_conv')
tower_d7 = Conv(data=tower_d7, num_filter=num_d7_1, kernel=(1, 7), pad=(0, 3), name=('%s_tower' % name), suffix='_conv_1')
tower_d7 = Conv(data=tower_d7, num_filter=num_d7_2, kernel=(7, 1), pad=(3, 0), name=('%s_tower' % name), suffix='_conv_2')
tower_q7 = Conv(data=data, num_filter=num_q7_red, name=('%s_tower_1' % name), suffix='_conv')
tower_q7 = Conv(data=tower_q7, num_filter=num_q7_1, kernel=(7, 1), pad=(3, 0), name=('%s_tower_1' % name), suffix='_conv_1')
tower_q7 = Conv(data=tower_q7, num_filter=num_q7_2, kernel=(1, 7), pad=(0, 3), name=('%s_tower_1' % name), suffix='_conv_2')
tower_q7 = Conv(data=tower_q7, num_filter=num_q7_3, kernel=(7, 1), pad=(3, 0), name=('%s_tower_1' % name), suffix='_conv_3')
tower_q7 = Conv(data=tower_q7, num_filter=num_q7_4, kernel=(1, 7), pad=(0, 3), name=('%s_tower_1' % name), suffix='_conv_4')
pooling = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(1, 1), pad=(1, 1), pool_type=pool, name=('%s_pool_%s_pool' % (pool, name)))
cproj = Conv(data=pooling, num_filter=proj, kernel=(1, 1), name=('%s_tower_2' % name), suffix='_conv')
# concat
concat = mx.sym.Concat(*[tower_1x1, tower_d7, tower_q7, cproj], name='ch_concat_%s_chconcat' % name)
return concat
def Inception7D(data,
num_3x3_red, num_3x3,
num_d7_3x3_red, num_d7_1, num_d7_2, num_d7_3x3,
pool,
name):
tower_3x3 = Conv(data=data, num_filter=num_3x3_red, name=('%s_tower' % name), suffix='_conv')
tower_3x3 = Conv(data=tower_3x3, num_filter=num_3x3, kernel=(3, 3), pad=(0,0), stride=(2, 2), name=('%s_tower' % name), suffix='_conv_1')
tower_d7_3x3 = Conv(data=data, num_filter=num_d7_3x3_red, name=('%s_tower_1' % name), suffix='_conv')
tower_d7_3x3 = Conv(data=tower_d7_3x3, num_filter=num_d7_1, kernel=(1, 7), pad=(0, 3), name=('%s_tower_1' % name), suffix='_conv_1')
tower_d7_3x3 = Conv(data=tower_d7_3x3, num_filter=num_d7_2, kernel=(7, 1), pad=(3, 0), name=('%s_tower_1' % name), suffix='_conv_2')
tower_d7_3x3 = Conv(data=tower_d7_3x3, num_filter=num_d7_3x3, kernel=(3, 3), stride=(2, 2), name=('%s_tower_1' % name), suffix='_conv_3')
pooling = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(2, 2), pool_type=pool, name=('%s_pool_%s_pool' % (pool, name)))
# concat
concat = mx.sym.Concat(*[tower_3x3, tower_d7_3x3, pooling], name='ch_concat_%s_chconcat' % name)
return concat
def Inception7E(data,
num_1x1,
num_d3_red, num_d3_1, num_d3_2,
num_3x3_d3_red, num_3x3, num_3x3_d3_1, num_3x3_d3_2,
pool, proj,
name):
tower_1x1 = Conv(data=data, num_filter=num_1x1, kernel=(1, 1), name=('%s_conv' % name))
tower_d3 = Conv(data=data, num_filter=num_d3_red, name=('%s_tower' % name), suffix='_conv')
tower_d3_a = Conv(data=tower_d3, num_filter=num_d3_1, kernel=(1, 3), pad=(0, 1), name=('%s_tower' % name), suffix='_mixed_conv')
tower_d3_b = Conv(data=tower_d3, num_filter=num_d3_2, kernel=(3, 1), pad=(1, 0), name=('%s_tower' % name), suffix='_mixed_conv_1')
tower_3x3_d3 = Conv(data=data, num_filter=num_3x3_d3_red, name=('%s_tower_1' % name), suffix='_conv')
tower_3x3_d3 = Conv(data=tower_3x3_d3, num_filter=num_3x3, kernel=(3, 3), pad=(1, 1), name=('%s_tower_1' % name), suffix='_conv_1')
tower_3x3_d3_a = Conv(data=tower_3x3_d3, num_filter=num_3x3_d3_1, kernel=(1, 3), pad=(0, 1), name=('%s_tower_1' % name), suffix='_mixed_conv')
tower_3x3_d3_b = Conv(data=tower_3x3_d3, num_filter=num_3x3_d3_2, kernel=(3, 1), pad=(1, 0), name=('%s_tower_1' % name), suffix='_mixed_conv_1')
pooling = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(1, 1), pad=(1, 1), pool_type=pool, name=('%s_pool_%s_pool' % (pool, name)))
cproj = Conv(data=pooling, num_filter=proj, kernel=(1, 1), name=('%s_tower_2' % name), suffix='_conv')
# concat
concat = mx.sym.Concat(*[tower_1x1, tower_d3_a, tower_d3_b, tower_3x3_d3_a, tower_3x3_d3_b, cproj], name='ch_concat_%s_chconcat' % name)
return concat
# In[49]:
def get_symbol(num_classes=1000, **kwargs):
data = mx.symbol.Variable(name="data")
# stage 1
conv = Conv(data, 32, kernel=(3, 3), stride=(2, 2), name="conv")
conv_1 = Conv(conv, 32, kernel=(3, 3), name="conv_1")
conv_2 = Conv(conv_1, 64, kernel=(3, 3), pad=(1, 1), name="conv_2")
pool = mx.sym.Pooling(data=conv_2, kernel=(3, 3), stride=(2, 2), pool_type="max", name="pool")
# stage 2
conv_3 = Conv(pool, 80, kernel=(1, 1), name="conv_3")
conv_4 = Conv(conv_3, 192, kernel=(3, 3), name="conv_4")
pool1 = mx.sym.Pooling(data=conv_4, kernel=(3, 3), stride=(2, 2), pool_type="max", name="pool1")
# stage 3
in3a = Inception7A(pool1, 64,
64, 96, 96,
48, 64,
"avg", 32, "mixed")
in3b = Inception7A(in3a, 64,
64, 96, 96,
48, 64,
"avg", 64, "mixed_1")
in3c = Inception7A(in3b, 64,
64, 96, 96,
48, 64,
"avg", 64, "mixed_2")
in3d = Inception7B(in3c, 384,
64, 96, 96,
"max", "mixed_3")
# stage 4
in4a = Inception7C(in3d, 192,
128, 128, 192,
128, 128, 128, 128, 192,
"avg", 192, "mixed_4")
in4b = Inception7C(in4a, 192,
160, 160, 192,
160, 160, 160, 160, 192,
"avg", 192, "mixed_5")
in4c = Inception7C(in4b, 192,
160, 160, 192,
160, 160, 160, 160, 192,
"avg", 192, "mixed_6")
in4d = Inception7C(in4c, 192,
192, 192, 192,
192, 192, 192, 192, 192,
"avg", 192, "mixed_7")
in4e = Inception7D(in4d, 192, 320,
192, 192, 192, 192,
"max", "mixed_8")
# stage 5
in5a = Inception7E(in4e, 320,
384, 384, 384,
448, 384, 384, 384,
"avg", 192, "mixed_9")
in5b = Inception7E(in5a, 320,
384, 384, 384,
448, 384, 384, 384,
"max", 192, "mixed_10")
# pool
pool = mx.sym.Pooling(data=in5b, kernel=(8, 8), stride=(1, 1), pool_type="avg", name="global_pool")
flatten = mx.sym.Flatten(data=pool, name="flatten")
fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=num_classes, name='fc1')
softmax = mx.symbol.SoftmaxOutput(data=fc1, name='softmax')
return softmax
| 57.905325 | 152 | 0.589822 | import mxnet as mx
def Conv(data, num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=None, suffix=''):
conv = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, no_bias=True, name='%s%s_conv2d' %(name, suffix))
bn = mx.sym.BatchNorm(data=conv, name='%s%s_batchnorm' %(name, suffix), fix_gamma=True)
act = mx.sym.Activation(data=bn, act_type='relu', name='%s%s_relu' %(name, suffix))
return act
def Inception7A(data,
num_1x1,
num_3x3_red, num_3x3_1, num_3x3_2,
num_5x5_red, num_5x5,
pool, proj,
name):
tower_1x1 = Conv(data, num_1x1, name=('%s_conv' % name))
tower_5x5 = Conv(data, num_5x5_red, name=('%s_tower' % name), suffix='_conv')
tower_5x5 = Conv(tower_5x5, num_5x5, kernel=(5, 5), pad=(2, 2), name=('%s_tower' % name), suffix='_conv_1')
tower_3x3 = Conv(data, num_3x3_red, name=('%s_tower_1' % name), suffix='_conv')
tower_3x3 = Conv(tower_3x3, num_3x3_1, kernel=(3, 3), pad=(1, 1), name=('%s_tower_1' % name), suffix='_conv_1')
tower_3x3 = Conv(tower_3x3, num_3x3_2, kernel=(3, 3), pad=(1, 1), name=('%s_tower_1' % name), suffix='_conv_2')
pooling = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(1, 1), pad=(1, 1), pool_type=pool, name=('%s_pool_%s_pool' % (pool, name)))
cproj = Conv(pooling, proj, name=('%s_tower_2' % name), suffix='_conv')
concat = mx.sym.Concat(*[tower_1x1, tower_5x5, tower_3x3, cproj], name='ch_concat_%s_chconcat' % name)
return concat
def Inception7B(data,
num_3x3,
num_d3x3_red, num_d3x3_1, num_d3x3_2,
pool,
name):
tower_3x3 = Conv(data, num_3x3, kernel=(3, 3), pad=(0, 0), stride=(2, 2), name=('%s_conv' % name))
tower_d3x3 = Conv(data, num_d3x3_red, name=('%s_tower' % name), suffix='_conv')
tower_d3x3 = Conv(tower_d3x3, num_d3x3_1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name=('%s_tower' % name), suffix='_conv_1')
tower_d3x3 = Conv(tower_d3x3, num_d3x3_2, kernel=(3, 3), pad=(0, 0), stride=(2, 2), name=('%s_tower' % name), suffix='_conv_2')
pooling = mx.symbol.Pooling(data=data, kernel=(3, 3), stride=(2, 2), pad=(0,0), pool_type="max", name=('max_pool_%s_pool' % name))
concat = mx.sym.Concat(*[tower_3x3, tower_d3x3, pooling], name='ch_concat_%s_chconcat' % name)
return concat
def Inception7C(data,
num_1x1,
num_d7_red, num_d7_1, num_d7_2,
num_q7_red, num_q7_1, num_q7_2, num_q7_3, num_q7_4,
pool, proj,
name):
tower_1x1 = Conv(data=data, num_filter=num_1x1, kernel=(1, 1), name=('%s_conv' % name))
tower_d7 = Conv(data=data, num_filter=num_d7_red, name=('%s_tower' % name), suffix='_conv')
tower_d7 = Conv(data=tower_d7, num_filter=num_d7_1, kernel=(1, 7), pad=(0, 3), name=('%s_tower' % name), suffix='_conv_1')
tower_d7 = Conv(data=tower_d7, num_filter=num_d7_2, kernel=(7, 1), pad=(3, 0), name=('%s_tower' % name), suffix='_conv_2')
tower_q7 = Conv(data=data, num_filter=num_q7_red, name=('%s_tower_1' % name), suffix='_conv')
tower_q7 = Conv(data=tower_q7, num_filter=num_q7_1, kernel=(7, 1), pad=(3, 0), name=('%s_tower_1' % name), suffix='_conv_1')
tower_q7 = Conv(data=tower_q7, num_filter=num_q7_2, kernel=(1, 7), pad=(0, 3), name=('%s_tower_1' % name), suffix='_conv_2')
tower_q7 = Conv(data=tower_q7, num_filter=num_q7_3, kernel=(7, 1), pad=(3, 0), name=('%s_tower_1' % name), suffix='_conv_3')
tower_q7 = Conv(data=tower_q7, num_filter=num_q7_4, kernel=(1, 7), pad=(0, 3), name=('%s_tower_1' % name), suffix='_conv_4')
pooling = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(1, 1), pad=(1, 1), pool_type=pool, name=('%s_pool_%s_pool' % (pool, name)))
cproj = Conv(data=pooling, num_filter=proj, kernel=(1, 1), name=('%s_tower_2' % name), suffix='_conv')
concat = mx.sym.Concat(*[tower_1x1, tower_d7, tower_q7, cproj], name='ch_concat_%s_chconcat' % name)
return concat
def Inception7D(data,
num_3x3_red, num_3x3,
num_d7_3x3_red, num_d7_1, num_d7_2, num_d7_3x3,
pool,
name):
tower_3x3 = Conv(data=data, num_filter=num_3x3_red, name=('%s_tower' % name), suffix='_conv')
tower_3x3 = Conv(data=tower_3x3, num_filter=num_3x3, kernel=(3, 3), pad=(0,0), stride=(2, 2), name=('%s_tower' % name), suffix='_conv_1')
tower_d7_3x3 = Conv(data=data, num_filter=num_d7_3x3_red, name=('%s_tower_1' % name), suffix='_conv')
tower_d7_3x3 = Conv(data=tower_d7_3x3, num_filter=num_d7_1, kernel=(1, 7), pad=(0, 3), name=('%s_tower_1' % name), suffix='_conv_1')
tower_d7_3x3 = Conv(data=tower_d7_3x3, num_filter=num_d7_2, kernel=(7, 1), pad=(3, 0), name=('%s_tower_1' % name), suffix='_conv_2')
tower_d7_3x3 = Conv(data=tower_d7_3x3, num_filter=num_d7_3x3, kernel=(3, 3), stride=(2, 2), name=('%s_tower_1' % name), suffix='_conv_3')
pooling = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(2, 2), pool_type=pool, name=('%s_pool_%s_pool' % (pool, name)))
concat = mx.sym.Concat(*[tower_3x3, tower_d7_3x3, pooling], name='ch_concat_%s_chconcat' % name)
return concat
def Inception7E(data,
num_1x1,
num_d3_red, num_d3_1, num_d3_2,
num_3x3_d3_red, num_3x3, num_3x3_d3_1, num_3x3_d3_2,
pool, proj,
name):
tower_1x1 = Conv(data=data, num_filter=num_1x1, kernel=(1, 1), name=('%s_conv' % name))
tower_d3 = Conv(data=data, num_filter=num_d3_red, name=('%s_tower' % name), suffix='_conv')
tower_d3_a = Conv(data=tower_d3, num_filter=num_d3_1, kernel=(1, 3), pad=(0, 1), name=('%s_tower' % name), suffix='_mixed_conv')
tower_d3_b = Conv(data=tower_d3, num_filter=num_d3_2, kernel=(3, 1), pad=(1, 0), name=('%s_tower' % name), suffix='_mixed_conv_1')
tower_3x3_d3 = Conv(data=data, num_filter=num_3x3_d3_red, name=('%s_tower_1' % name), suffix='_conv')
tower_3x3_d3 = Conv(data=tower_3x3_d3, num_filter=num_3x3, kernel=(3, 3), pad=(1, 1), name=('%s_tower_1' % name), suffix='_conv_1')
tower_3x3_d3_a = Conv(data=tower_3x3_d3, num_filter=num_3x3_d3_1, kernel=(1, 3), pad=(0, 1), name=('%s_tower_1' % name), suffix='_mixed_conv')
tower_3x3_d3_b = Conv(data=tower_3x3_d3, num_filter=num_3x3_d3_2, kernel=(3, 1), pad=(1, 0), name=('%s_tower_1' % name), suffix='_mixed_conv_1')
pooling = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(1, 1), pad=(1, 1), pool_type=pool, name=('%s_pool_%s_pool' % (pool, name)))
cproj = Conv(data=pooling, num_filter=proj, kernel=(1, 1), name=('%s_tower_2' % name), suffix='_conv')
concat = mx.sym.Concat(*[tower_1x1, tower_d3_a, tower_d3_b, tower_3x3_d3_a, tower_3x3_d3_b, cproj], name='ch_concat_%s_chconcat' % name)
return concat
def get_symbol(num_classes=1000, **kwargs):
data = mx.symbol.Variable(name="data")
conv = Conv(data, 32, kernel=(3, 3), stride=(2, 2), name="conv")
conv_1 = Conv(conv, 32, kernel=(3, 3), name="conv_1")
conv_2 = Conv(conv_1, 64, kernel=(3, 3), pad=(1, 1), name="conv_2")
pool = mx.sym.Pooling(data=conv_2, kernel=(3, 3), stride=(2, 2), pool_type="max", name="pool")
conv_3 = Conv(pool, 80, kernel=(1, 1), name="conv_3")
conv_4 = Conv(conv_3, 192, kernel=(3, 3), name="conv_4")
pool1 = mx.sym.Pooling(data=conv_4, kernel=(3, 3), stride=(2, 2), pool_type="max", name="pool1")
in3a = Inception7A(pool1, 64,
64, 96, 96,
48, 64,
"avg", 32, "mixed")
in3b = Inception7A(in3a, 64,
64, 96, 96,
48, 64,
"avg", 64, "mixed_1")
in3c = Inception7A(in3b, 64,
64, 96, 96,
48, 64,
"avg", 64, "mixed_2")
in3d = Inception7B(in3c, 384,
64, 96, 96,
"max", "mixed_3")
in4a = Inception7C(in3d, 192,
128, 128, 192,
128, 128, 128, 128, 192,
"avg", 192, "mixed_4")
in4b = Inception7C(in4a, 192,
160, 160, 192,
160, 160, 160, 160, 192,
"avg", 192, "mixed_5")
in4c = Inception7C(in4b, 192,
160, 160, 192,
160, 160, 160, 160, 192,
"avg", 192, "mixed_6")
in4d = Inception7C(in4c, 192,
192, 192, 192,
192, 192, 192, 192, 192,
"avg", 192, "mixed_7")
in4e = Inception7D(in4d, 192, 320,
192, 192, 192, 192,
"max", "mixed_8")
in5a = Inception7E(in4e, 320,
384, 384, 384,
448, 384, 384, 384,
"avg", 192, "mixed_9")
in5b = Inception7E(in5a, 320,
384, 384, 384,
448, 384, 384, 384,
"max", 192, "mixed_10")
pool = mx.sym.Pooling(data=in5b, kernel=(8, 8), stride=(1, 1), pool_type="avg", name="global_pool")
flatten = mx.sym.Flatten(data=pool, name="flatten")
fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=num_classes, name='fc1')
softmax = mx.symbol.SoftmaxOutput(data=fc1, name='softmax')
return softmax
| true | true |
1c38aeb55160851cc8b3ddc7ceec5cf13ea97b20 | 4,113 | py | Python | wagtail/contrib/settings/models.py | sonnybaker/wagtail | 5522992c2923276fca40417401e8fb2c536b4b4f | [
"BSD-3-Clause"
] | 8,851 | 2016-12-09T19:01:45.000Z | 2022-03-31T04:45:06.000Z | wagtail/contrib/settings/models.py | sonnybaker/wagtail | 5522992c2923276fca40417401e8fb2c536b4b4f | [
"BSD-3-Clause"
] | 5,197 | 2016-12-09T19:24:37.000Z | 2022-03-31T22:17:55.000Z | wagtail/contrib/settings/models.py | sonnybaker/wagtail | 5522992c2923276fca40417401e8fb2c536b4b4f | [
"BSD-3-Clause"
] | 2,548 | 2016-12-09T18:16:55.000Z | 2022-03-31T21:34:38.000Z | from django.db import models
from wagtail.core.models import Site
from wagtail.core.utils import InvokeViaAttributeShortcut
from .registry import register_setting
__all__ = ['BaseSetting', 'register_setting']
class BaseSetting(models.Model):
"""
The abstract base model for settings. Subclasses must be registered using
:func:`~wagtail.contrib.settings.registry.register_setting`
"""
# Override to fetch ForeignKey values in the same query when
# retrieving settings via for_site()
select_related = None
site = models.OneToOneField(
Site, unique=True, db_index=True, editable=False, on_delete=models.CASCADE)
class Meta:
abstract = True
@classmethod
def base_queryset(cls):
"""
Returns a queryset of objects of this type to use as a base
for calling get_or_create() on.
You can use the `select_related` attribute on your class to
specify a list of foreign key field names, which the method
will attempt to select additional related-object data for
when the query is executed.
If your needs are more complex than this, you can override
this method on your custom class.
"""
queryset = cls.objects.all()
if cls.select_related is not None:
queryset = queryset.select_related(*cls.select_related)
return queryset
@classmethod
def for_site(cls, site):
"""
Get or create an instance of this setting for the site.
"""
queryset = cls.base_queryset()
instance, created = queryset.get_or_create(site=site)
return instance
@classmethod
def for_request(cls, request):
"""
Get or create an instance of this model for the request,
and cache the result on the request for faster repeat access.
"""
attr_name = cls.get_cache_attr_name()
if hasattr(request, attr_name):
return getattr(request, attr_name)
site = Site.find_for_request(request)
site_settings = cls.for_site(site)
# to allow more efficient page url generation
site_settings._request = request
setattr(request, attr_name, site_settings)
return site_settings
@classmethod
def get_cache_attr_name(cls):
"""
Returns the name of the attribute that should be used to store
a reference to the fetched/created object on a request.
"""
return "_{}.{}".format(
cls._meta.app_label, cls._meta.model_name
).lower()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Allows get_page_url() to be invoked using
# `obj.page_url.foreign_key_name` syntax
self.page_url = InvokeViaAttributeShortcut(self, 'get_page_url')
# Per-instance page URL cache
self._page_url_cache = {}
def get_page_url(self, attribute_name, request=None):
"""
Returns the URL of a page referenced by a foreign key
(or other attribute) matching the name ``attribute_name``.
If the field value is null, or links to something other
than a ``Page`` object, an empty string is returned.
The result is also cached per-object to facilitate
fast repeat access.
Raises an ``AttributeError`` if the object has no such
field or attribute.
"""
if attribute_name in self._page_url_cache:
return self._page_url_cache[attribute_name]
if not hasattr(self, attribute_name):
raise AttributeError(
"'{}' object has no attribute '{}'"
.format(self.__class__.__name__, attribute_name)
)
page = getattr(self, attribute_name)
if hasattr(page, 'specific'):
url = page.specific.get_url(getattr(self, '_request', None))
else:
url = ""
self._page_url_cache[attribute_name] = url
return url
def __str__(self):
return "%s for %s" % (self._meta.verbose_name.capitalize(), self.site)
| 33.439024 | 83 | 0.641867 | from django.db import models
from wagtail.core.models import Site
from wagtail.core.utils import InvokeViaAttributeShortcut
from .registry import register_setting
__all__ = ['BaseSetting', 'register_setting']
class BaseSetting(models.Model):
select_related = None
site = models.OneToOneField(
Site, unique=True, db_index=True, editable=False, on_delete=models.CASCADE)
class Meta:
abstract = True
@classmethod
def base_queryset(cls):
queryset = cls.objects.all()
if cls.select_related is not None:
queryset = queryset.select_related(*cls.select_related)
return queryset
@classmethod
def for_site(cls, site):
queryset = cls.base_queryset()
instance, created = queryset.get_or_create(site=site)
return instance
@classmethod
def for_request(cls, request):
attr_name = cls.get_cache_attr_name()
if hasattr(request, attr_name):
return getattr(request, attr_name)
site = Site.find_for_request(request)
site_settings = cls.for_site(site)
site_settings._request = request
setattr(request, attr_name, site_settings)
return site_settings
@classmethod
def get_cache_attr_name(cls):
return "_{}.{}".format(
cls._meta.app_label, cls._meta.model_name
).lower()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.page_url = InvokeViaAttributeShortcut(self, 'get_page_url')
self._page_url_cache = {}
def get_page_url(self, attribute_name, request=None):
if attribute_name in self._page_url_cache:
return self._page_url_cache[attribute_name]
if not hasattr(self, attribute_name):
raise AttributeError(
"'{}' object has no attribute '{}'"
.format(self.__class__.__name__, attribute_name)
)
page = getattr(self, attribute_name)
if hasattr(page, 'specific'):
url = page.specific.get_url(getattr(self, '_request', None))
else:
url = ""
self._page_url_cache[attribute_name] = url
return url
def __str__(self):
return "%s for %s" % (self._meta.verbose_name.capitalize(), self.site)
| true | true |
1c38aebe7d9526fdb385a5791f5598f850053201 | 10,223 | py | Python | pytext/data/sources/data_source.py | abhinavarora/pytext | d72f977d34c741712d20bd74418feb1e0b7a13f1 | [
"BSD-3-Clause"
] | null | null | null | pytext/data/sources/data_source.py | abhinavarora/pytext | d72f977d34c741712d20bd74418feb1e0b7a13f1 | [
"BSD-3-Clause"
] | null | null | null | pytext/data/sources/data_source.py | abhinavarora/pytext | d72f977d34c741712d20bd74418feb1e0b7a13f1 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from typing import Dict
from pytext.config.component import Component, ComponentType
class RawExample(dict):
"""A wrapper class for a single example row with a dict interface.
This is here for any logic we want row objects to have that dicts don't do."""
# Map of registered types for data source subclasses
DATA_SOURCE_TYPES = {}
class SafeFileWrapper:
"""
A simple wrapper class for files which allows filedescriptors to be managed
with normal Python ref counts.
Without using this, if you create a file in a from_config you will see a warning
along the lines of "ResourceWarning: self._file is acquired but not always released"
this is because we're opening a file not in a context manager (with statement).
We want to do it this way because it lets us pass a file object to the DataSource,
rather than a filename. This exposes a ton more flexibility and testability, passing
filenames is one of the paths towards pain.
However, we don't have a clear resource management system set up for configuration.
from_config functions are the tool that we have to allow objects to specify how they
should be created from a configuration, which generally should only happen from the
command line, whereas in eg. a notebook you should build the objects with
constructors directly. If building from constructors, you can just open a file and
pass it, but from_config here needs to create a file object from a configured
filename. Python files don't close automatically, so you also need a system that
will close them when the python interpreter shuts down. If you don't, it will print
a resource warning at runtime, as the interpreter manually closes the filehandles
(although modern OSs are pretty okay with having open file handles, it's hard for me
to justify exactly why Python is so strict about this; I think one of the main
reasons you might actually care is if you have a writeable file handle it might not
have flushed properly when the C runtime exits, but Python doesn't actually
distinguish between writeable and non-writeable file handles).
This class is a wrapper that creates a system for (sort-of) safely closing the file
handles before the runtime exits. It does this by closing the file when the object's
deleter is called. Although the python standard doesn't actually make any guarantees
about when deleters are called, CPython is reference counted and so as an
mplementation detail will call a deleter whenever the last reference to it is
removed, which generally will happen to all objects created during program execution
as long as there aren't reference cycles (I don't actually know off-hand whether the
cycle collection is run before shutdown, and anyway the cycles would have to include
objects that the runtime itself maintains pointers to, which seems like you'd have
to work hard to do and wouldn't do accidentally). This isn't true for other python
systems like PyPy or Jython which use generational garbage collection and so don't
actually always call destructors before the system shuts down, but again this is
only really relevant for mutable files.
An alternative implementation would be to build a resource management system into
PyText, something like a function that we use for opening system resources that
registers the resources and then we make sure are all closed before system shutdown.
That would probably technically be the right solution, but I didn't really think of
that first and also it's a bit longer to implement.
If you are seeing resource warnings on your system, please file a github issue.
"""
def __init__(self, *args, **kwargs):
self._file = open(*args, **kwargs)
def __del__(self):
self._file.close()
def __iter__(self):
"""Some file utilities check hasattr(o, "__iter__") explicitly."""
return iter(self._file)
def __getattr__(self, attr):
return getattr(self._file, attr)
class GeneratorIterator:
"""Create an object which can be iterated over multiple times from a
generator call. Each iteration will call the generator and allow iterating
over it. This is unsafe to use on generators which have side effects, such
as file readers; it's up to the callers to safely manage these scenarios.
"""
def __init__(self, generator, *args, **kwargs):
self.generator = generator
self.args = args
self.kwargs = kwargs
def __iter__(self):
return self.generator(*self.args, **self.kwargs)
class GeneratorMethodProperty:
"""Identify a generator method as a property. This will allow instances to iterate
over the property multiple times, and not consume the generator. It accomplishes
this by wrapping the generator and creating multiple generator instances if
iterated over multiple times.
"""
def __init__(self, generator):
self.generator = generator
def __get__(self, obj, objtype=None):
return GeneratorIterator(self.generator, obj)
# Use the more typical property decorator style
generator_property = GeneratorMethodProperty
class DataSource(Component):
"""
Data sources are simple components that stream data from somewhere using Python's
iteration interface. It should expose 3 iterators, "train", "test", and "eval".
Each of these should be able to be iterated over any number of times, and iterating
over it should yield dictionaries whose values are deserialized python types.
Simply, these data sources exist as an interface to read through datasets
in a pythonic way, with pythonic types, and abstract away the form that they are
stored in.
"""
__COMPONENT_TYPE__ = ComponentType.DATA_SOURCE
__EXPANSIBLE__ = True
def __init__(self, schema):
self.schema = schema
@generator_property
def train(self):
raise NotImplementedError
@generator_property
def test(self):
raise NotImplementedError
@generator_property
def eval(self):
raise NotImplementedError
class RootDataSource(DataSource):
"""A data source which actually loads data from a location. This data source
needs to be responsible for converting types based on a schema, because it should
be the only part of the system that actually needs to understand details about
the underlying storage system.
RootDataSource presents a simpler abstraction than DataSource where the rows
are automatically converted to the right DataTypes.
A RootDataSource should implement `raw_train_data_generator`,
`raw_test_data_generator`, and `raw_eval_data_generator`. These functions
should yield dictionaries of raw objects which the loading system can
convert using the schema loading functions.
"""
class Config(Component.Config):
#: An optional column mapping, allowing the columns in the raw data source
#: to not map directly to the column names in the schema. This mapping will
#: remap names from the raw data source to names in the schema.
column_mapping: Dict[str, str] = {}
def __init__(self, schema, column_mapping=()):
super().__init__(schema)
self.column_mapping = dict(column_mapping)
def _convert_raw_source(self, source):
"""Convert a raw iterable source, ie. from
`DataSource.raw_train_data_generator`, to an iterable that will yield
`pytext.data.type.DataType` objects according to the schema and the
converters for this DataSource.
"""
for row in source:
example = RawExample()
for column_name, value in row.items():
name = self.column_mapping.get(column_name, column_name)
if name not in self.schema:
continue
example[name] = self.load(value, self.schema[name])
if len(example) != len(self.schema):
# We might need to re-evaluate this for multi-task training
logging.warn(
"Skipping row missing values: row {} -> schema {}".format(
list(example.keys()), list(self.schema.keys())
)
)
continue
yield example
@classmethod
def register_type(cls, type):
def decorator(fn):
DATA_SOURCE_TYPES[(cls, type)] = fn
return fn
return decorator
def load(self, value, schema_type):
# It would be nice for subclasses of data sources to work better with this
converter = DATA_SOURCE_TYPES[(type(self), schema_type)]
return converter(value)
def raw_train_data_generator(self):
"""
Returns a generator that yields the TRAIN data one item at a time
in a dictionary where each key is a field and the value is of the
raw type from the source.
DataSources need to implement this.
"""
raise NotImplementedError
def raw_test_data_generator(self):
"""
Returns a generator that yields the TEST data one item at a time
in a dictionary where each key is a field and the value is of the
raw type from the source.
DataSources need to implement this.
"""
raise NotImplementedError
def raw_eval_data_generator(self):
"""
Returns a generator that yields the EVAL data one item at a time
in a dictionary where each key is a field and the value is of the
raw type from the source.
DataSources need to implement this.
"""
raise NotImplementedError
@generator_property
def train(self):
return self._convert_raw_source(self.raw_train_data_generator())
@generator_property
def test(self):
return self._convert_raw_source(self.raw_test_data_generator())
@generator_property
def eval(self):
return self._convert_raw_source(self.raw_eval_data_generator())
| 41.556911 | 88 | 0.706055 |
import logging
from typing import Dict
from pytext.config.component import Component, ComponentType
class RawExample(dict):
DATA_SOURCE_TYPES = {}
class SafeFileWrapper:
def __init__(self, *args, **kwargs):
self._file = open(*args, **kwargs)
def __del__(self):
self._file.close()
def __iter__(self):
return iter(self._file)
def __getattr__(self, attr):
return getattr(self._file, attr)
class GeneratorIterator:
def __init__(self, generator, *args, **kwargs):
self.generator = generator
self.args = args
self.kwargs = kwargs
def __iter__(self):
return self.generator(*self.args, **self.kwargs)
class GeneratorMethodProperty:
def __init__(self, generator):
self.generator = generator
def __get__(self, obj, objtype=None):
return GeneratorIterator(self.generator, obj)
generator_property = GeneratorMethodProperty
class DataSource(Component):
__COMPONENT_TYPE__ = ComponentType.DATA_SOURCE
__EXPANSIBLE__ = True
def __init__(self, schema):
self.schema = schema
@generator_property
def train(self):
raise NotImplementedError
@generator_property
def test(self):
raise NotImplementedError
@generator_property
def eval(self):
raise NotImplementedError
class RootDataSource(DataSource):
class Config(Component.Config):
column_mapping: Dict[str, str] = {}
def __init__(self, schema, column_mapping=()):
super().__init__(schema)
self.column_mapping = dict(column_mapping)
def _convert_raw_source(self, source):
for row in source:
example = RawExample()
for column_name, value in row.items():
name = self.column_mapping.get(column_name, column_name)
if name not in self.schema:
continue
example[name] = self.load(value, self.schema[name])
if len(example) != len(self.schema):
logging.warn(
"Skipping row missing values: row {} -> schema {}".format(
list(example.keys()), list(self.schema.keys())
)
)
continue
yield example
@classmethod
def register_type(cls, type):
def decorator(fn):
DATA_SOURCE_TYPES[(cls, type)] = fn
return fn
return decorator
def load(self, value, schema_type):
converter = DATA_SOURCE_TYPES[(type(self), schema_type)]
return converter(value)
def raw_train_data_generator(self):
raise NotImplementedError
def raw_test_data_generator(self):
raise NotImplementedError
def raw_eval_data_generator(self):
raise NotImplementedError
@generator_property
def train(self):
return self._convert_raw_source(self.raw_train_data_generator())
@generator_property
def test(self):
return self._convert_raw_source(self.raw_test_data_generator())
@generator_property
def eval(self):
return self._convert_raw_source(self.raw_eval_data_generator())
| true | true |
1c38afc6f50dc3226ca40bcc448b91df40033700 | 648 | py | Python | news/migrations/0007_auto_20191002_1638.py | josylad/Moringa-Tribune | 003895fb557b7454b4e3555df407aaf2f0ca58f8 | [
"MIT"
] | null | null | null | news/migrations/0007_auto_20191002_1638.py | josylad/Moringa-Tribune | 003895fb557b7454b4e3555df407aaf2f0ca58f8 | [
"MIT"
] | 2 | 2021-06-08T20:26:03.000Z | 2021-09-08T01:21:23.000Z | news/migrations/0007_auto_20191002_1638.py | josylad/Moringa-Tribune | 003895fb557b7454b4e3555df407aaf2f0ca58f8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-10-02 13:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0006_auto_20191002_1637'),
]
operations = [
migrations.AlterField(
model_name='article',
name='article_image',
field=models.ImageField(default='articles/beagle.jpg', upload_to='articles/'),
),
migrations.AlterField(
model_name='article',
name='title',
field=models.CharField(max_length=255),
),
]
| 24.923077 | 90 | 0.603395 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0006_auto_20191002_1637'),
]
operations = [
migrations.AlterField(
model_name='article',
name='article_image',
field=models.ImageField(default='articles/beagle.jpg', upload_to='articles/'),
),
migrations.AlterField(
model_name='article',
name='title',
field=models.CharField(max_length=255),
),
]
| true | true |
1c38b0b32e7b6af2e0e0a0a43aac9c00f48ba75c | 97 | py | Python | PythonExercicios/ex024-primeiras-letras-do-texto.py | ArthurAlbuquerquee/exercicios-python-cev | ba64d3a025731aae5e238c7cb469917420b7901e | [
"MIT"
] | null | null | null | PythonExercicios/ex024-primeiras-letras-do-texto.py | ArthurAlbuquerquee/exercicios-python-cev | ba64d3a025731aae5e238c7cb469917420b7901e | [
"MIT"
] | null | null | null | PythonExercicios/ex024-primeiras-letras-do-texto.py | ArthurAlbuquerquee/exercicios-python-cev | ba64d3a025731aae5e238c7cb469917420b7901e | [
"MIT"
] | null | null | null | cidade = str(input('Digite o nome de uma cidade: ')).strip()
print(cidade[:5].upper() == 'SANTO') | 48.5 | 60 | 0.649485 | cidade = str(input('Digite o nome de uma cidade: ')).strip()
print(cidade[:5].upper() == 'SANTO') | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.