repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/training/matching.py
|
# coding=utf-8
"""Training loops for KG matching models."""
import logging
from abc import abstractmethod
from typing import Any, Iterable, List, Mapping, Optional, Tuple, Type
import torch
from torch.optim import Optimizer
from torch.utils import data
from .base import BaseTrainer
from ..data import KnowledgeGraphAlignmentDataset, MatchSideEnum, SIDES
from ..models import KGMatchingModel
from ..modules import MatchingLoss, Similarity
from ..utils.types import IDAlignment, NodeIDs
logger = logging.getLogger(name=__name__)
class NodeSampler:
"""Abstract class for node sampler."""
@abstractmethod
def sample(
self,
positive_batch: IDAlignment,
) -> NodeIDs:
"""
Sample negative node indices for each side.
positive pair:
(positive_batch[0, i], positive_batch[1, i])
negative_pair:
(positive_batch[0, i], negative_batch[0, i, j])
:param positive_batch: shape: (2, pos_batch_size)
The batch of aligned nodes.
:return: shape: (2, pos_batch_size, num_negatives)
The negative node IDs. result[0] has to be combined with positive_batch[1] for a valid pair.
"""
raise NotImplementedError
class RandomNodeSampler(NodeSampler):
"""Randomly select additional nodes."""
def __init__(
self,
num_nodes: Mapping[MatchSideEnum, int],
num_negatives: int,
):
"""
Initialize the sampler.
:param num_nodes:
The number of nodes on each side.
:param num_negatives: >=0
The absolute number of negatives samples for each positive one.
"""
self.num_nodes = num_nodes
self.num_negatives = num_negatives
def sample(
self,
positive_batch: IDAlignment,
) -> NodeIDs: # noqa: D102
return torch.stack([
torch.randint(self.num_nodes[side], size=(positive_batch.shape[1], self.num_negatives))
for side in SIDES
], dim=0)
#: A 3-tuple:
# * indices (global)
# * positives (local)
# * negatives (local)
AlignmentBatch = Tuple[Optional[Mapping[MatchSideEnum, NodeIDs]], IDAlignment, Optional[NodeIDs]]
class AlignmentBatchCollator:
"""A custom collator for adding negative nodes to a batch of positives."""
def __init__(
self,
node_sampler: Optional[NodeSampler] = None,
):
"""
Initialize the collator.
:param node_sampler:
The node sampler.
"""
self.sampler = node_sampler
def collate(
self,
positives: List[Tuple[IDAlignment]],
) -> AlignmentBatch:
"""
Collate a batch.
:param positives:
A tuple of positive pairs.
:return:
A tuple of batch node indices per side and the number of positives in the batch.
"""
global_positives: IDAlignment = torch.stack([p[0] for p in positives], dim=-1)
# no sampling
if self.sampler is None:
return None, global_positives, None
global_negatives = self.sampler.sample(positive_batch=global_positives)
# Translate to batch local indices
indices = dict()
local_positives = []
local_negatives = []
for side, pos_on_side, neg_on_side in zip(SIDES, global_positives, global_negatives):
# There are positive indices P and negative indices N
# There may be duplicates
# * in P, due to 1-n alignments
# * in N, due to random sampling with replacement
# * between P and N due to not filtering in N
# We do not want to re-compute representations; thus we only keep the unique indices.
indices_on_side = torch.cat([pos_on_side.unsqueeze(dim=-1), neg_on_side], dim=-1)
indices[side], inverse = indices_on_side.unique(sorted=False, return_inverse=True)
local_positives.append(inverse[:, 0])
local_negatives.append(inverse[:, 1:])
return (
indices,
torch.stack(local_positives, dim=0),
torch.stack(local_negatives, dim=0),
)
def prepare_alignment_batch_data_loader(
dataset: KnowledgeGraphAlignmentDataset,
positive_batch_size: Optional[int] = None,
negative_sampler: Optional[NodeSampler] = None,
num_workers: int = 0,
) -> data.DataLoader:
"""
Prepare a PyTorch data loader for alignment model training.
:param dataset:
The knowledge graph alignment dataset.
:param positive_batch_size:
The batch size for alignment pairs.
:param negative_sampler:
The sampler for additional nodes from the graphs.
:param num_workers:
The number of worker processes.
.. seealso ::
torch.utils.data.DataLoader
:return:
The data loader.
"""
positives = data.TensorDataset(dataset.alignment.train.t())
if positive_batch_size is None:
positive_batch_size = dataset.alignment.num_train
collator = AlignmentBatchCollator(node_sampler=negative_sampler)
return data.DataLoader(
dataset=positives,
batch_size=positive_batch_size,
shuffle=True,
num_workers=num_workers,
collate_fn=collator.collate,
pin_memory=True,
)
class AlignmentModelTrainer(BaseTrainer[AlignmentBatch]):
"""A wrapper around a model encapsulating training and evaluation."""
#: The model instance
model: KGMatchingModel
#: The similarity instance
similarity: Similarity
#: The loss instance
loss: MatchingLoss
def __init__(
self,
model: KGMatchingModel,
similarity: Similarity,
dataset: KnowledgeGraphAlignmentDataset,
loss: MatchingLoss,
batch_size: Optional[int] = None,
optimizer_cls: Type[Optimizer] = None,
optimizer_kwargs: Optional[Mapping[str, Any]] = None,
clip_grad_norm: Optional[float] = None,
accumulate_gradients: int = 1,
device: Optional[torch.device] = None,
negative_sampler: Optional[NodeSampler] = None,
num_workers: int = 0,
):
"""
Initialize a new training loop.
:param model:
The model.
:param similarity:
The similarity.
:param dataset:
The dataset.
:param loss:
The loss instance.
:param batch_size:
The batch size, or None for full-batch training.
:param optimizer_cls:
The optimizer class.
:param optimizer_kwargs:
Keyword-based arguments for the optimizer.
:param clip_grad_norm:
Whether to apply gradient clipping (norm-based).
:param accumulate_gradients:
Accumulate gradients over batches. This can be used to simulate a larger batch size, while keeping the
memory footprint small.
:param device:
The device on which to train.
:param num_workers:
The number of workers to use for preparing batches.
"""
super().__init__(
model=model,
train_batch_size=batch_size,
optimizer_cls=optimizer_cls,
optimizer_kwargs=optimizer_kwargs,
clip_grad_norm=clip_grad_norm,
accumulate_gradients=accumulate_gradients,
device=device,
)
self.similarity = similarity
self.loss = loss
self.dataset = dataset
self.alignment = dataset.alignment
self.num_workers = num_workers
self.negative_sampler = negative_sampler
def _iter_batches(self) -> Iterable[AlignmentBatch]: # noqa: D102
return prepare_alignment_batch_data_loader(
dataset=self.dataset,
positive_batch_size=self.train_batch_size,
negative_sampler=self.negative_sampler,
num_workers=self.num_workers,
)
def _train_one_batch(self, batch: AlignmentBatch) -> Tuple[torch.Tensor, int]:
# Unpack
batch_node_indices, batch_alignment, negatives = batch
# Calculate node representations
node_repr = self.model(indices=batch_node_indices)
# return batch loss
return self.loss(
alignment=batch_alignment,
representations=node_repr,
negatives=negatives,
), batch_alignment.shape[1]
| 8,475
| 30.509294
| 114
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/training/__init__.py
|
# coding=utf-8
"""Training Loops."""
| 37
| 11.666667
| 21
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/eval/matching.py
|
# coding=utf-8
"""Entity Alignment evaluation methods."""
from typing import Collection, Dict, Mapping, Optional, Tuple, TypeVar, Union
import torch
from .common import aggregate_ranks, get_rank
from ..data import MatchSideEnum, SIDES
from ..models import KGMatchingModel
from ..modules import Similarity
from ..utils.torch_utils import maximize_memory_utilization
from ..utils.types import IDAlignment
__all__ = [
'AlignmentEvaluator',
'evaluate_matching_model',
'evaluate_alignment',
]
T = TypeVar('T')
def evaluate_matching_model(
model: KGMatchingModel,
alignments: Mapping[T, IDAlignment],
similarity: Similarity,
eval_batch_size: Optional[int] = None,
ks: Collection[int] = (1, 10, 50, 100),
) -> Tuple[Mapping[T, Mapping[str, float]], int]:
"""Evaluate a model on multiple alignments.
:param model:
The KG matching model to evaluate.
:param alignments:
A mapping of key -> alignment, where alignment is a LongTensor of shape (2, num_alignments).
:param similarity:
The similarity.
:param eval_batch_size:
The evaluation batch size.
:param ks:
The values for which to evaluate hits@k.
:return:
A mapping key -> subresult, where subresult is a mapping from metric-name to metric value.
"""
# Evaluation
with torch.no_grad():
# Set model in evaluation mode
model.eval()
result = {}
safe_eval_batch_size = None
for key, alignment in alignments.items():
alignment, indices = _reduce_alignment(alignment=alignment)
partial_repr = model.get_node_representations(indices=indices)
partial_result, this_eval_batch_size = evaluate_alignment(
similarity=similarity,
alignment=alignment,
representations=partial_repr,
eval_batch_size=eval_batch_size,
ks=ks,
)
result[key] = partial_result
if this_eval_batch_size is not None:
if safe_eval_batch_size is None:
safe_eval_batch_size = this_eval_batch_size
else:
safe_eval_batch_size = min(safe_eval_batch_size, this_eval_batch_size)
assert safe_eval_batch_size is not None
return result, safe_eval_batch_size
def evaluate_alignment(
similarity: Similarity,
alignment: IDAlignment,
representations: Mapping[MatchSideEnum, torch.FloatTensor],
eval_batch_size: Optional[int] = None,
ks: Collection[int] = (1, 10, 50, 100),
) -> Tuple[Dict[str, float], int]:
"""
Evaluate an alignment.
:param representations: side -> repr
The node representations, a tensor of shape (num_nodes[side], d).
:param alignment: shape: (2, num_alignments)
The alignment.
:param similarity:
The similarity.
:param eval_batch_size: int (positive, optional)
The batch size to use for evaluation.
:param ks:
The values for which to compute hits@k.
:return: A tuple with
1) dictionary with keys 'mr, 'mrr', 'hits_at_k' for all k in ks.
2) The maximum evaluation batch size.
"""
num_alignments = alignment.shape[1]
if num_alignments <= 0:
return dict(), None
node_repr = dict()
for side, alignment_on_side in zip(SIDES, alignment):
repr_on_side = representations[side]
node_repr[side] = repr_on_side[alignment_on_side.to(repr_on_side.device)]
left, right = [representations[side] for side in SIDES]
# Ensure data is on correct device
right, alignment = [t.to(device=left.device) for t in (right, alignment)]
if eval_batch_size is None:
eval_batch_size = num_alignments
return maximize_memory_utilization(
_evaluate_alignment,
parameter_name='eval_batch_size',
parameter_max_value=eval_batch_size,
alignment=alignment,
similarity=similarity,
left=left,
right=right,
ks=ks,
)
def _summarize_ranks(
ranks: torch.LongTensor,
n: Union[int, Tuple[int, int]],
ks: Collection[int],
) -> Dict[str, float]:
if isinstance(n, int):
n = (n, n)
# overall
result = dict(aggregate_ranks(
ranks=ranks,
emr=(sum(n) / 2 + 1) / 2,
ks=ks,
))
# side-specific
for i, side in enumerate(SIDES):
result[side.value] = aggregate_ranks(
ranks=ranks[i],
emr=(n[i] + 1) / 2,
ks=ks,
)
return result
def _evaluate_alignment(
eval_batch_size: int,
alignment: IDAlignment,
similarity: Similarity,
left: torch.FloatTensor,
right: torch.FloatTensor,
ks: Collection[int],
) -> Dict[str, float]:
"""Evaluate an entity alignment.
:param eval_batch_size:
The evaluation batch size.
:param alignment: shape: (2, num_alignments)
The alignment.
:param similarity:
The similarity.
:param left: shape: (num_left, dim)
The left aligned representations.
:param right: shape: (num_right, dim)
The right aligned representations.
:param ks:
The values for which to calculate Hits@k.
:return:
The evaluation results as dictionary.
"""
num_alignments = alignment.shape[1]
ranks = left.new_empty(2, num_alignments)
for i in range(0, num_alignments, eval_batch_size):
batch = alignment[:, i:i + eval_batch_size]
# match a batch of right nodes to all left nodes
sim_right_to_all_left = similarity.all_to_all(left, right[batch[1]]).t()
ranks[0, i:i + eval_batch_size] = get_rank(sim=sim_right_to_all_left, true=batch[0])
# match a batch of left nodes to all right nodes
sim_left_to_all_right = similarity.all_to_all(left[batch[0]], right)
ranks[1, i:i + eval_batch_size] = get_rank(sim=sim_left_to_all_right, true=batch[1])
num_nodes = [n.shape[0] for n in (left, right)]
return _summarize_ranks(ranks=ranks, n=num_nodes, ks=ks)
def _reduce_alignment(alignment: IDAlignment) -> Tuple[IDAlignment, Mapping[MatchSideEnum, torch.LongTensor]]:
indices = dict()
local_alignment = []
for side, alignment_on_side in zip(SIDES, alignment):
uniq, inverse = torch.unique(alignment_on_side, sorted=False, return_inverse=True)
indices[side] = uniq
local_alignment.append(inverse)
alignment = torch.stack(local_alignment, dim=0)
return alignment, indices
| 6,525
| 31.63
| 110
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/eval/common.py
|
"""Common utility methods for evaluation."""
import logging
from typing import Collection, Mapping, Optional
import torch
logger = logging.getLogger(name=__name__)
# Small constant for floating point comparison
EPSILON = 1.0e-08
def get_rank(sim: torch.FloatTensor, true: torch.LongTensor) -> torch.FloatTensor:
"""Compute the rank, exploiting that there is only one true hit."""
batch_size = true.shape[0]
true_sim = sim[torch.arange(batch_size), true].unsqueeze(1)
best_rank = torch.sum(sim > true_sim, dim=1, dtype=torch.long).float() + 1
worst_rank = torch.sum(sim >= true_sim, dim=1, dtype=torch.long).float()
return 0.5 * (best_rank + worst_rank)
def compute_ranks(
scores: torch.FloatTensor,
true_indices: torch.LongTensor,
smaller_is_better: bool = True,
mask: Optional[torch.LongTensor] = None,
) -> torch.FloatTensor:
"""Compute the rank of the true hit.
:param scores: shape: (k, n)
The scores for each sample.
:param true_indices: shape: (k,)
Values between 0 (incl.) and n (excl.)
:param smaller_is_better:
Whether smaller of larger values are better.
:param mask: shape: (m, 2), optional
Optional mask for filtered setting
:return: shape: (k,)
The ranks, a number between 1 and n.
"""
# Ensure that larger is better
if smaller_is_better:
scores = -scores
# Get the scores of the currently considered true entity.
batch_size = scores.shape[0]
true_score = (scores[torch.arange(0, batch_size), true_indices.flatten()]).view(-1, 1)
# The best rank is the rank when assuming all options with an equal score are placed behind the currently
# considered. Hence, the rank is the number of options with better scores, plus one, as the rank is one-based.
best_rank = (scores > true_score).sum(dim=1) + 1
# The worst rank is the rank when assuming all options with an equal score are placed in front of the currently
# considered. Hence, the rank is the number of options which have at least the same score minus one (as the
# currently considered option in included in all options). As the rank is one-based, we have to add 1, which
# nullifies the "minus 1" from before.
worst_rank = (scores >= true_score).sum(dim=1)
# The average rank is the average of the best and worst rank, and hence the expected rank over all permutations of
# the elements with the same score as the currently considered option.
# We use the double average rank to avoid precision loss due to floating point operations.
double_avg_rank = best_rank + worst_rank
# In filtered setting ranking another true entity higher than the currently considered one should not be punished.
# Hence, an adjustment is computed, which is the number of other true entities ranked higher. This adjustment is
# subtracted from the rank.
if mask is not None:
batch_indices, entity_indices = mask.t()
true_scores = true_score[batch_indices, 0]
other_true_scores = scores[batch_indices, entity_indices]
double_other_true_in_front = -2 * (other_true_scores > true_scores).long()
double_avg_rank.index_add_(dim=0, index=batch_indices, source=double_other_true_in_front)
avg_rank = 0.5 * double_avg_rank.float()
return avg_rank
def aggregate_ranks(
ranks: torch.FloatTensor,
emr: float,
ks: Collection[int] = (1, 10, 50, 100),
) -> Mapping[str, float]:
"""
Compute rank aggregation metrics.
:param ranks:
The individual ranks.
:param emr:
The expected mean rank.
:param ks:
The values for which to compute Hits@k.
:return:
A dictionary
{
'mean_rank': The mean rank.
'amr': The adjusted mean rank.
'mrr': The mean reciprocal rank.
'hits_at_k': Hits@k for each provided k.
}
"""
mr = torch.mean(ranks).item()
result = dict(
num_rank=ranks.numel(),
mean_rank=mr,
median_rank=torch.median(ranks).item(),
std_rank=ranks.std(unbiased=True).item(),
adjusted_mean_rank=mr / emr,
adjusted_mean_rank_index=1 - (mr - 1) / (emr - 1) if emr > 1.0 else 0.0,
mean_reciprocal_rank=torch.mean(torch.reciprocal(ranks)).item(),
)
result.update({
f'hits_at_{k}': torch.mean((ranks <= (k + EPSILON)).float()).item()
for k in ks
})
return result
| 4,478
| 36.957627
| 118
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/eval/__init__.py
|
# coding=utf-8
"""Evaluation methods."""
from .common import compute_ranks
from .matching import evaluate_alignment, evaluate_matching_model
__all__ = [
'compute_ranks',
'evaluate_alignment',
'evaluate_matching_model',
]
| 234
| 20.363636
| 65
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/models/__init__.py
|
# coding=utf-8
"""Entity Alignment Models."""
from .matching import GCNAlign, GraphBasedKGMatchingModel, KGMatchingModel, PureEmbeddingModel, get_matching_model_by_name
__all__ = [
'GraphBasedKGMatchingModel',
'GCNAlign',
'KGMatchingModel',
'PureEmbeddingModel',
'get_matching_model_by_name',
]
| 316
| 25.416667
| 122
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/models/matching/base.py
|
# coding=utf-8
"""API for models for knowledge graph matching."""
import logging
from abc import ABC, abstractmethod
from collections import defaultdict
from typing import Any, Callable, Mapping, Optional, Type
import torch
from frozendict import frozendict
from torch import nn
from ...data import KnowledgeGraphAlignmentDataset, MatchSideEnum, SIDES
from ...data.reduction import KnowledgeGraphToGraphReduction
from ...modules.embeddings import get_embedding_pair
from ...modules.embeddings.base import EmbeddingNormalizationMode, NodeEmbeddingInitMethod
from ...modules.embeddings.norm import EmbeddingNormalizationMethod
from ...utils.common import get_subclass_by_name, kwargs_or_empty
from ...utils.torch_utils import ExtendedModule, maximize_memory_utilization
from ...utils.types import EntityIDs
logger = logging.getLogger(name=__name__)
__all__ = [
'GraphBasedKGMatchingModel',
'IndependentSideMixin',
'KGMatchingModel',
'PureEmbeddingModel',
'get_matching_model_by_name',
]
class KGMatchingModel(ExtendedModule):
"""
Generic class for (knowledge) graph matching models of a specific form.
The models produce vector representation for each node, and the matching is done by comparing these representations
by some similarity measure.
"""
#: The number of nodes on each side.
num_nodes: Mapping[MatchSideEnum, int]
def __init__(
self,
num_nodes: Mapping[MatchSideEnum, int],
):
"""
Initialize the model.
:param num_nodes:
The number of nodes on each side.
"""
super().__init__()
self.num_nodes = frozendict(num_nodes)
self.batch_size = sum(num_nodes.values())
# pylint: disable=arguments-differ
@abstractmethod
def forward(
self,
indices: Optional[Mapping[MatchSideEnum, EntityIDs]] = None,
) -> Mapping[MatchSideEnum, torch.FloatTensor]:
"""Return embeddings for nodes on both sides.
:param indices:
If provided only return representations for these indices.
:return: a mapping side -> representations
where
representations: shape: (num_nodes_on_side, embedding_dim)
"""
raise NotImplementedError
def _get_node_representations(
self,
indices: Mapping[MatchSideEnum, EntityIDs],
batch_size: int,
) -> Mapping[MatchSideEnum, torch.FloatTensor]:
"""
Batched calculation of node representations.
:param indices:
The indices for each side.
:param batch_size:
The batch size.
:return:
A mapping from side to node representations on side.
"""
result = defaultdict(list)
total_num_nodes = sum(v.shape[0] for v in indices.values())
num_first_side = indices[SIDES[0]].shape[0]
for start in range(0, total_num_nodes, batch_size):
# construct indices
batch_indices = dict()
for i_side, side in enumerate(SIDES):
start_side = max(start - i_side * num_first_side, 0)
end_side = min(max(start + batch_size - i_side * num_first_side, 0), self.num_nodes[side])
if end_side - start_side > 0:
batch_indices[side] = indices[side][start_side:end_side].to(self.device)
# update result
for side, partial_node_repr in self(indices=batch_indices).items():
result[side].append(partial_node_repr)
# combine result
return {
side: torch.cat(partial_node_repr)
for side, partial_node_repr in result.items()
}
def get_node_representations(
self,
indices: Optional[Mapping[MatchSideEnum, EntityIDs]] = None,
) -> Mapping[MatchSideEnum, torch.FloatTensor]:
"""
Calculate node representations for all nodes using batching.
:param indices:
Optional restriction to some indices.
:return:
The node representations.
"""
if indices is None:
indices = {
side: torch.arange(num, device=self.device)
for side, num in self.num_nodes.items()
}
result, self.batch_size = maximize_memory_utilization(
self._get_node_representations,
parameter_name='batch_size',
parameter_max_value=self.batch_size,
indices=indices,
)
return result
class IndependentSideMixin(KGMatchingModel):
"""Mix-in for models which compute independent representations on each side."""
def forward(
self,
indices: Optional[Mapping[MatchSideEnum, EntityIDs]] = None,
) -> Mapping[MatchSideEnum, torch.FloatTensor]: # noqa: D102
if indices is None:
indices = {
side: None
for side in SIDES
}
return {
side: self._forward_side(side=side, indices=indices_on_side)
for side, indices_on_side in indices.items()
}
@abstractmethod
def _forward_side(
self,
side: MatchSideEnum,
indices: Optional[torch.LongTensor] = None,
) -> torch.FloatTensor:
"""
Compute node representations on one side.
:param side:
The side.
:param indices:
The indices. None means to compute all representations.
:return: shape: (num_indices, embedding_dim)
The node representations.
"""
raise NotImplementedError
# pylint: disable=abstract-method
class GraphBasedKGMatchingModel(KGMatchingModel, ABC):
"""A knowledge graph matching model explicitly using the graph structure."""
#: The reductions to adjacency matrices.
reductions: Mapping[MatchSideEnum, KnowledgeGraphToGraphReduction]
def __init__(
self,
dataset: KnowledgeGraphAlignmentDataset,
reduction_cls: Type[KnowledgeGraphToGraphReduction],
reduction_kwargs: Optional[Mapping[str, Any]] = None,
):
"""
Initialize the model.
:param dataset:
The dataset.
:param reduction_cls:
The reduction strategy to obtain a (weighted) adjacency matrix from a knowledge graph.
:param reduction_kwargs:
Optional key-word based arguments to pass to the reduction.
"""
super().__init__(num_nodes=dataset.num_nodes)
reduction_kwargs = kwargs_or_empty(reduction_kwargs)
self.reductions = nn.ModuleDict({
side: reduction_cls(knowledge_graph=graph, **reduction_kwargs)
for side, graph in dataset.graphs.items()
})
def get_matching_model_by_name(
name: str,
normalizer: Optional[Callable[[str], str]] = None,
) -> Type[KGMatchingModel]:
"""
Get a matching model class by name.
:param name:
The name.
:param normalizer:
An optional custom name normalization method.
:return:
The matching class.
"""
if normalizer is None:
normalizer = str.lower
return get_subclass_by_name(base_class=KGMatchingModel, name=name, normalizer=normalizer, exclude={GraphBasedKGMatchingModel})
class PureEmbeddingModel(IndependentSideMixin, KGMatchingModel):
"""A knowledge graph matching model with learned node representations without interaction between the nodes."""
def __init__(
self,
dataset: KnowledgeGraphAlignmentDataset,
embedding_dim: int = 3,
node_embedding_init_method: NodeEmbeddingInitMethod = NodeEmbeddingInitMethod.sqrt_individual,
node_embedding_init_config: Optional[Mapping[str, Any]] = None,
node_embedding_normalization_method: EmbeddingNormalizationMethod = EmbeddingNormalizationMethod.none,
node_embedding_normalization_mode: EmbeddingNormalizationMode = EmbeddingNormalizationMode.none,
dropout: Optional[float] = None,
):
"""
Initialize the model.
:param embedding_dim: > 0
The dimensionality of the embedding.
:param node_embedding_init_method:
The embedding initialization method used for the node embeddings.
:param node_embedding_init_config:
Additional keyword based arguments for the initializer.
:param node_embedding_normalization_method:
The node embedding normalization method.
:param node_embedding_normalization_mode:
The node embedding normalization mode.
:param dropout:
If present, apply dropout to the node embeddings.
"""
super().__init__(num_nodes=dataset.num_nodes)
self.embeddings = get_embedding_pair(
init=node_embedding_init_method,
dataset=dataset,
embedding_dim=embedding_dim,
dropout=dropout,
trainable=True,
init_config=node_embedding_init_config,
norm=node_embedding_normalization_method,
normalization_mode=node_embedding_normalization_mode,
)
self.reset_parameters()
def _forward_side(
self,
side: MatchSideEnum,
indices: Optional[torch.LongTensor] = None,
) -> torch.FloatTensor: # noqa: D102
return self.embeddings[side](indices=indices)
| 9,382
| 32.996377
| 130
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/models/matching/gcn_align.py
|
# coding=utf-8
"""
Implementation of GCN-Align.
The paper introducing the model can be found at https://www.aclweb.org/anthology/D18-1032.pdf.
The authors' implementation can be found at https://github.com/1049451037/GCN-Align and they also refer to
https://github.com/1049451037/HIN-Align for an improved implementation.
"""
import logging
from typing import Any, Mapping, Optional
import torch
from torch import nn
from .base import GraphBasedKGMatchingModel, IndependentSideMixin
from ...data import KnowledgeGraphAlignmentDataset, MatchSideEnum, SIDES
from ...data.reduction import DropRelationInformationKnowledgeGraphToGraphReduction, KnowledgeGraphToGraphReduction, target_normalization
from ...modules.embeddings.base import Embedding, EmbeddingNormalizationMode, NodeEmbeddingInitMethod, get_embedding_pair
from ...modules.embeddings.norm import EmbeddingNormalizationMethod
from ...modules.graph import GCNBlock, IdentityMessageCreator, MessagePassingBlock, OnlyUpdate, SumAggregator
logger = logging.getLogger(name=__name__)
class GCNAlign(IndependentSideMixin, GraphBasedKGMatchingModel):
"""GCN-Align model implementation."""
#: The node embeddings
node_embeddings: Mapping[MatchSideEnum, Embedding]
def __init__(
self,
dataset: KnowledgeGraphAlignmentDataset,
reduction_cls: Optional[KnowledgeGraphToGraphReduction] = None,
reduction_kwargs: Optional[Mapping[str, Any]] = None,
embedding_dim: int = 200,
activation_cls: nn.Module = nn.ReLU,
n_layers: int = 2,
use_conv_weights: bool = False,
node_embedding_init_method: NodeEmbeddingInitMethod = NodeEmbeddingInitMethod.sqrt_total, # 'total', # 'individual'
vertical_sharing: bool = True,
node_embedding_dropout: Optional[float] = None,
node_embedding_init_config: Optional[Mapping[str, Any]] = None,
):
"""
Initialize the model.
:param dataset:
The dataset.
:param reduction_cls:
The reduction strategy to obtain a (weighted) adjacency matrix from a knowledge graph.
:param embedding_dim:
The dimension of the node embedding.
:param activation_cls:
The non-linear activation to use between the message passing steps.
:param n_layers:
The number of layers.
:param use_conv_weights:
Whether to use convolution weights.
:param node_embedding_init_method:
The method used to initialize the node embeddings.
:param vertical_sharing:
Whether to use "vertical weight sharing", i.e. apply the same convolution weights for all layers.
:param node_embedding_dropout:
An optional dropout to use on the node embeddings.
"""
if reduction_cls is None:
reduction_cls = DropRelationInformationKnowledgeGraphToGraphReduction
reduction_kwargs = dict(
normalization=target_normalization,
)
super().__init__(dataset=dataset, reduction_cls=reduction_cls, reduction_kwargs=reduction_kwargs)
# node embeddings
self.node_embeddings = get_embedding_pair(
init=node_embedding_init_method,
dataset=dataset,
embedding_dim=embedding_dim,
dropout=node_embedding_dropout,
trainable=True,
init_config=node_embedding_init_config,
norm=EmbeddingNormalizationMethod.l2,
normalization_mode=EmbeddingNormalizationMode.every_forward,
)
# GCN layers
self.n_layers = n_layers
self.use_conv_weights = use_conv_weights
self.vertical_sharing = vertical_sharing
blocks = []
if use_conv_weights:
if self.vertical_sharing:
gcn_block = GCNBlock(input_dim=embedding_dim, output_dim=embedding_dim, use_bias=True)
activation = activation_cls()
for _ in range(n_layers):
blocks.append(gcn_block)
blocks.append(activation)
else:
for _ in range(n_layers):
gcn_block = GCNBlock(input_dim=embedding_dim, output_dim=embedding_dim, use_bias=True)
activation = activation_cls()
blocks.append(gcn_block)
blocks.append(activation)
else:
message_block = MessagePassingBlock(
message_creator=IdentityMessageCreator(),
message_aggregator=SumAggregator(),
node_updater=OnlyUpdate(),
)
for _ in range(n_layers):
blocks.append(message_block)
activation = activation_cls()
blocks.append(activation)
side_to_modules = {
side: nn.ModuleList(blocks)
for side in SIDES
}
self.layers = nn.ModuleDict(modules=side_to_modules)
# Initialize parameters
self.reset_parameters()
def _forward_side(
self,
side: MatchSideEnum,
indices: Optional[torch.LongTensor] = None,
) -> torch.FloatTensor: # noqa: D102
x = self.node_embeddings[side](indices=None)
# Prepare message passing keyword arguments
adjacency = self.reductions[side]()
message_passing_kwargs = {
'source': adjacency.source,
'target': adjacency.target,
'edge_weights': adjacency.values,
}
# forward pass through all layers
if side in self.layers.keys():
layers = self.layers[side] if side in self.layers.keys() else []
else:
logger.warning('No layers for side %s', side)
layers = []
for layer in layers:
if isinstance(layer, MessagePassingBlock):
x = layer(x, **message_passing_kwargs)
else:
x = layer(x)
# Select indices if requested
if indices is not None:
x = x[indices]
return x
| 6,089
| 37.789809
| 137
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/models/matching/__init__.py
|
# coding=utf-8
"""Models for (knowledge) graph matching."""
from .base import GraphBasedKGMatchingModel, KGMatchingModel, PureEmbeddingModel, get_matching_model_by_name
from .gcn_align import GCNAlign
__all__ = [
'GraphBasedKGMatchingModel',
'GCNAlign',
'KGMatchingModel',
'PureEmbeddingModel',
'get_matching_model_by_name',
]
| 348
| 25.846154
| 108
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/utils/mlflow_utils.py
|
# coding=utf-8
"""Utility methods for MLFlow."""
import hashlib
import itertools
import logging
import os
import platform
from typing import Any, Callable, Collection, Dict, List, Mapping, Optional, Tuple, Union
import mlflow
import mlflow.entities
import pandas
import tqdm
from .common import to_dot
logger = logging.getLogger(name=__name__)
def log_params_to_mlflow(
config: Dict[str, Any],
) -> None:
"""Log parameters to MLFlow. Allows nested dictionaries."""
nice_config = to_dot(config)
# mlflow can only process 100 parameters at once
keys = sorted(nice_config.keys())
batch_size = 100
for start in range(0, len(keys), batch_size):
mlflow.log_params({k: nice_config[k] for k in keys[start:start + batch_size]})
def log_metrics_to_mlflow(
metrics: Dict[str, Any],
step: Optional[int] = None,
prefix: Optional[str] = None,
) -> None:
"""Log metrics to MLFlow. Allows nested dictionaries."""
nice_metrics = to_dot(metrics, prefix=prefix)
mlflow.log_metrics(nice_metrics, step=step)
def query_mlflow(
tracking_uri: str,
experiment_id: str,
params: Dict[str, Union[str, int, float]] = None,
metrics: Dict[str, Union[str, int, float]] = None,
tags: Dict[str, Union[str, int, float]] = None
) -> List[mlflow.entities.Run]:
"""Query MLFlow for runs with matching params, metrics and tags."""
client = mlflow.tracking.MlflowClient(tracking_uri=tracking_uri)
# Construct query
q_params = [f'params.{p} = "{v}"' for p, v in to_dot(params).items()] if params else []
q_metrics = [f'metrics.{m} = "{v}"' for m, v in to_dot(metrics).items()] if metrics else []
q_tags = [f'tags.{t} = "{v}"' for t, v in tags.items()] if tags else []
query = ' and '.join([*q_params, *q_metrics, *q_tags])
return client.search_runs(experiment_id, query)
def experiment_name_to_id(
tracking_uri: str,
experiment_id: int,
) -> str:
"""Convert an experiment name to experiment ID."""
client = mlflow.tracking.MlflowClient(tracking_uri=tracking_uri)
return [exp.name for exp in client.list_experiments() if int(exp.experiment_id) == experiment_id][0]
def get_metric_history_for_runs(
tracking_uri: str,
metrics: Union[str, Collection[str]],
runs: Union[str, Collection[str]],
) -> pandas.DataFrame:
"""
Get metric history for selected runs.
:param tracking_uri:
The URI of the tracking server.
:param metrics:
The metrics.
:param runs:
The IDs of selected runs.
:return:
A dataframe with columns {'run_id', 'key', 'step', 'timestamp', 'value'}.
"""
# normalize input
if isinstance(metrics, str):
metrics = [metrics]
if isinstance(runs, str):
runs = [runs]
client = mlflow.tracking.MlflowClient(tracking_uri=tracking_uri)
data = []
task_list = sorted(itertools.product(metrics, runs))
n_success = n_error = 0
with tqdm.tqdm(task_list, unit='metric+task', unit_scale=True) as progress:
for metric, run in progress:
try:
data.extend(
(run, measurement.key, measurement.step, measurement.timestamp, measurement.value)
for measurement in client.get_metric_history(run_id=run, key=metric)
)
n_success += 1
except ConnectionError as error:
n_error += 1
progress.write(f'[Error] {error.strerror}')
progress.set_postfix(dict(success=n_success, error=n_error))
return pandas.DataFrame(
data=data,
columns=['run_id', 'key', 'step', 'timestamp', 'value']
)
def get_metric_history(
tracking_uri: str,
experiment_ids: Union[int, Collection[int]],
metrics: Collection[str],
runs: Optional[Collection[str]] = None,
convert_to_wide_format: bool = False,
filter_string: Optional[str] = "",
) -> pandas.DataFrame:
"""
Get metric history data for experiment(s).
:param tracking_uri:
The URI of the tracking server.
:param experiment_ids:
The experiments ID(s).
:param metrics:
The name of the metrics to retrieve the history for.
:param runs:
An optional selection of runs via IDs. If None, get all.
:param convert_to_wide_format:
Whether to convert the dataframe from "long" to "wide" format.
:param filter_string:
Filter query string, defaults to searching all runs.
:return:
A dataframe of results.
"""
# Normalize runs
if runs is None:
runs = get_all_runs_from_experiments(
tracking_uri=tracking_uri,
filter_string=filter_string,
experiment_ids=experiment_ids
)
logger.info(f'Retrieved {len(runs)} runs for experiment(s) {experiment_ids}.')
df = get_metric_history_for_runs(tracking_uri=tracking_uri, metrics=metrics, runs=runs)
if convert_to_wide_format:
df = _convert_metric_history_long_to_wide(history_df=df)
return df
def _convert_metric_history_long_to_wide(
history_df: pandas.DataFrame,
) -> pandas.DataFrame:
"""
Convert ta dataframe of metric history from "long" to "wide" format.
:param history_df:
The dataframe in long format.
:return:
The dataframe in wide format.
"""
return history_df.pivot_table(
index=['run_id', 'step'],
values='value',
columns=['key'],
)
def get_all_runs_from_experiments(
*,
experiment_ids: Union[int, Collection[int]],
filter_string: Optional[str] = "",
tracking_uri: Optional[str] = None,
client: Optional[mlflow.tracking.MlflowClient] = None,
) -> Collection[str]:
"""
Collect IDs for all runs associated with an experiment ID.
.. note ::
Exactly one of `tracking_uri` or `client` has to be provided.
:param experiment_ids:
The experiment IDs.
:param filter_string:
Filter query string, defaults to searching all runs.
:param tracking_uri:
The Mlflow tracking URI.
:param client:
The Mlflow client.
:return:
A collection of run IDs.
"""
# Normalize input
if isinstance(experiment_ids, int):
experiment_ids = [experiment_ids]
if None not in {tracking_uri, client}:
raise ValueError('Cannot provide tracking_uri and client.')
if tracking_uri is not None:
client = mlflow.tracking.MlflowClient(tracking_uri=tracking_uri)
runs = []
# support for paginated results
continue_searching = True
page_token = None
while continue_searching:
page_result_list = client.search_runs(
experiment_ids=list(map(str, experiment_ids)),
filter_string=filter_string,
page_token=page_token
)
runs.extend(run.info.run_uuid for run in page_result_list)
page_token = page_result_list.token
continue_searching = page_token is not None
return runs
def _sort_key(x: Mapping[str, Any]) -> str:
return hashlib.md5((';'.join(f'{k}={x}' for k, v in x.items()) + ';' + str(platform.node()) + ';' + str(os.getenv('CUDA_VISIBLE_DEVICES', '?'))).encode()).hexdigest()
def run_experiments(
search_list: List[Mapping[str, Any]],
experiment: Callable[[Mapping[str, Any]], Tuple[Mapping[str, Any], int]],
num_replicates: int = 1,
break_on_error: bool = False,
) -> None:
"""
Run experiments synchronized by MLFlow.
:param search_list:
The search list of parameters. Each entry corresponds to one experiment.
:param experiment:
The experiment as callable. Takes the dictionary of parameters as input, and produces a result dictionary as well as a final step.
"""
# randomize sort order to avoid collisions with multiple workers
search_list = sorted(search_list, key=_sort_key)
n_experiments = len(search_list)
counter = {
'error': 0,
'success': 0,
'skip': 0,
}
for run, params in enumerate(search_list * num_replicates):
logger.info('================== Run %4d/%4d ==================', run, n_experiments * num_replicates)
params = dict(**params)
# Check, if run with current parameters already exists
query = ' and '.join(list(map(lambda item: f"params.{item[0]} = '{str(item[1])}'", to_dot(params).items())))
logger.info('Query: \n%s\n', query)
run_hash = hashlib.md5(query.encode()).hexdigest()
params['run_hash'] = run_hash
logger.info('Hash: %s', run_hash)
existing_runs = mlflow.search_runs(filter_string=f"params.run_hash = '{run_hash}'", run_view_type=mlflow.tracking.client.ViewType.ACTIVE_ONLY)
if len(existing_runs) >= num_replicates:
logger.info('Skipping existing run.')
counter['skip'] += 1
continue
mlflow.start_run()
params['environment'] = {
'server': platform.node(),
}
# Log to MLFlow
log_params_to_mlflow(params)
log_metrics_to_mlflow({'finished': False}, step=0)
# Run experiment
try:
final_evaluation, final_step = experiment(params)
# Log to MLFlow
log_metrics_to_mlflow(metrics=final_evaluation, step=final_step)
log_metrics_to_mlflow({'finished': True}, step=final_step)
counter['success'] += 1
except Exception as e: # pylint: disable=broad-except
logger.error('Error occured.')
logger.exception(e)
log_metrics_to_mlflow(metrics={'error': 1})
counter['error'] += 1
if break_on_error:
raise e
mlflow.end_run()
logger.info('Ran %d experiments.', counter)
| 9,803
| 31.463576
| 170
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/utils/data_utils.py
|
"""Utilities for retrieving and checking data files."""
import hashlib
import logging
import pathlib
from typing import Optional
import humanize
import requests
import tqdm
logger = logging.getLogger(name=__name__)
def resolve_google_drive_file_url(
id_: str,
session: requests.Session,
) -> requests.Response:
"""
Resolve the download path for a Google Drive file.
This method clicks through download confirmation pages.
:param id_:
The file ID.
:param session:
The session.
:return:
The response.
"""
# cf. https://stackoverflow.com/a/39225272
GOOGLE_DRIVE_BASE_URL = "https://docs.google.com/uc?export=download"
# request file
response = session.get(GOOGLE_DRIVE_BASE_URL, params={'id': id_}, stream=True)
# Download warning page
for key, value in response.cookies.items():
if key.startswith('download_warning'):
params = {'id': id_, 'confirm': value}
return session.get(GOOGLE_DRIVE_BASE_URL, params=params, stream=True)
return response
def save_response_content(
response: requests.Response,
destination: pathlib.Path,
chunk_size: int = 2 ** 16,
show_progress: bool = True,
) -> None:
"""
Save content from a response to a file.
:param response:
The response object.
:param destination:
The destination where the content should be stored. Its parent directories will we created if they do not exist already.
:param chunk_size:
The chunk size in which to write to the file.
:param show_progress:
Whether to show a progress bar during download.
"""
if response.status_code != requests.codes.ok: # pylint: disable=no-member
raise ValueError(f'Status Code of response is not OK ({requests.codes.ok}), but {response.status_code}') # pylint: disable=no-member
# Ensure that the parent directory exists.
destination.parent.mkdir(parents=True, exist_ok=True)
# Try to infer download size
try:
total_size = int(response.headers.get('content-length', None))
except TypeError:
total_size = None
logger.warning('Could not infer download size.')
logger.info('Downloading from %s to %s', response.url, str(destination.absolute()))
with destination.open(mode='wb') as f:
iterator = response.iter_content(chunk_size=chunk_size)
if show_progress:
progress_bar = tqdm.tqdm(desc='Download', total=total_size, unit='iB', unit_scale=True, unit_divisor=2 ** 10)
for chunk in iterator:
# filter out keep-alive new chunks
if not chunk:
continue
# Write to file
f.write(chunk)
# Update counter
chunk_size = len(chunk)
# Update progress bar, if such exist
if show_progress:
progress_bar.update(n=chunk_size)
if show_progress:
progress_bar.close()
# Check total file size against header information
actual_size = destination.stat().st_size
if total_size is not None:
if actual_size != total_size:
raise RuntimeError(f'Download of {response.url} failed. Expected size {total_size} vs. actual size {actual_size}')
logger.info('Finished download of %s.', humanize.naturalsize(value=actual_size, binary=True))
def check_hashsums(
destination: pathlib.Path,
chunk_size: int = 64 * 2 ** 10,
**hashes: str,
) -> bool:
"""
Check a file for hash sums.
:param destination:
The file path.
:param chunk_size:
The chunk size for reading the file.
:param hashes:
The expected hashsums as (algorithm_name, hash_sum) pairs where hash_sum is the hexdigest
:return:
Whether all hash sums match.
"""
if len(hashes) == 0:
logger.warning('There are no hash sums to check for.')
return True
# instantiate algorithms
hash_algorithms = {}
for alg in hashes.keys():
hash_algorithms[alg] = hashlib.new(alg)
# calculate hash sums of file incrementally
buffer = memoryview(bytearray(chunk_size))
with destination.open('rb', buffering=0) as f:
for this_chunk_size in iter(lambda: f.readinto(buffer), 0):
for alg in hash_algorithms.values():
alg.update(buffer[:this_chunk_size])
# Compare digests
integer_file = True
for alg, digest in hashes.items():
digest_ = hash_algorithms[alg].hexdigest()
if digest_ != digest:
logger.fatal('Hashsum does not match! expected %s=%s, but got %s', alg, digest, digest_)
integer_file = False
else:
logger.info('Successfully checked with %s', alg)
return integer_file
def resolve_cache_root(
cache_root: Optional[pathlib.Path],
*directories: str
) -> pathlib.Path:
"""
Resolve cache root.
:param cache_root:
The cache root. If None, use ~/.kgm
:param directories:
Additional directories inside the cache root which are created if necessary.
:return:
An absolute path to an existing directory.
"""
# default cache root
if cache_root is None:
cache_root = pathlib.Path('~', '.kgm')
# Ensure it is an absolute path
cache_root = cache_root.expanduser().absolute()
# Create sub-directories
for directory in directories:
cache_root = cache_root / directory
# Ensure that cache_root is an existing directory
cache_root.mkdir(parents=True, exist_ok=True)
return cache_root
| 5,600
| 29.606557
| 141
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/utils/types.py
|
"""Type annotation aliases."""
import torch
#: A (n, 3) tensor of IDs.
Triples = torch.LongTensor
#: A (n,) tensor of IDs.
EntityIDs = torch.LongTensor
#: A (n,) tensor of IDs.
RelationIDs = torch.LongTensor
#: A (n,) tensor of IDs.
NodeIDs = torch.LongTensor
#: A (2, n) tensor of IDs.
IDAlignment = torch.LongTensor
#: A (2, n) tensor of IDs.
EdgeTensor = torch.LongTensor
| 381
| 17.190476
| 30
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/utils/torch_utils.py
|
"""Utility methods using pytorch."""
import itertools
import logging
from abc import ABC
from collections import defaultdict
from operator import itemgetter
from typing import Any, Callable, MutableMapping, Optional, Sequence, Tuple, Type, TypeVar, Union
import numpy
import torch
from torch import nn, optim
from .common import get_subclass_by_name, integer_portion, reduce_kwargs_for_method
from .types import EdgeTensor, NodeIDs
logger = logging.getLogger(name=__name__)
def send_messages(
edge_tensor: EdgeTensor,
source_data: torch.FloatTensor,
edge_weights: Optional[torch.FloatTensor] = None,
accumulator: Optional[torch.FloatTensor] = None,
) -> torch.FloatTensor:
"""
Message passing.
:param edge_tensor: shape: (2, num_edges)
The edges as (source, target) tuples.
:param source_data: shape: (num_nodes, dim)
The node features.
:param edge_weights: shape: (num_edges,)
Edge weights (optional).
:param accumulator: shape: (num_nodes, dim)
The accumulator.
:return: shape: (num_nodes, dim)
The updated node representations.
"""
# Send messages to edges
source, target = edge_tensor
msg = source_data.index_select(dim=0, index=source)
# Message weighting
if edge_weights is not None:
if edge_weights.ndimension() < msg.ndimension():
edge_weights = edge_weights.unsqueeze(dim=-1)
msg = msg * edge_weights
# Allocate accumulator if none is given
if accumulator is None:
accumulator = torch.zeros_like(source_data)
# Accumulate messages
return accumulator.index_add(dim=0, index=target, source=msg)
def get_device(
device: Union[None, str, torch.device],
) -> torch.device:
"""Resolve the device, either specified as name, or device."""
if device is None:
device = 'cuda'
if isinstance(device, str):
device = torch.device(device=device)
assert isinstance(device, torch.device)
if not torch.cuda.is_available() and device.type == 'cuda':
logger.warning('Requested device %s, but CUDA is unavailable. Falling back to cpu.', device)
device = torch.device('cpu')
return device
def split_tensor(
tensor: torch.Tensor,
ratios: Union[float, Sequence[float]],
shuffle: Optional[bool] = True,
dim: Optional[int] = 0,
seed: Optional[int] = 42,
) -> Sequence[torch.Tensor]:
"""
Split tensor into multiple partitions along a dimension.
The splits are performed consecutive, where each individual split is according to the given ratios.
:param tensor:
The tensor to split.
:param ratios:
A sequence of floats between [0, 1] specifying the ratio of the first partition of each split.
:param shuffle:
Whether to randomize order of data.
:param dim:
The dimension to split along.
:param seed:
The random seed to use for shuffling.
:return:
A sequence of disjoint subsets of the input tensor.
"""
if isinstance(ratios, float):
ratios = [ratios]
num_elements = tensor.shape[dim]
# shuffle
if shuffle:
# random seeding
if seed is not None:
generator = torch.manual_seed(seed=seed)
else:
generator = torch.random.default_generator
indices = torch.randperm(n=num_elements, generator=generator, device=tensor.device)
else:
indices = torch.arange(0, num_elements, device=tensor.device)
output = []
remainder = indices
for ratio in ratios:
size_first = integer_portion(number=remainder.shape[0], ratio=ratio)
this, remainder = remainder[:size_first], remainder[size_first:]
output.append(tensor.index_select(dim=dim, index=this))
output.append(tensor.index_select(dim=dim, index=remainder))
return output
def _guess_num_nodes(
num_nodes: Optional[int],
source: Optional[NodeIDs] = None,
target: Optional[NodeIDs] = None,
) -> int:
"""Try to guess the number of nodes."""
if num_nodes is not None:
return num_nodes
if source is None and target is None:
raise ValueError('If no num_nodes are given, either source, or target must be given!')
return max(x.max().item() for x in (source, target) if x is not None)
def get_optimizer_class_by_name(name: str) -> Type[optim.Optimizer]:
"""Return an optimizer class given its name."""
return get_subclass_by_name(base_class=optim.Optimizer, name=name, normalizer=str.lower)
def _is_oom_error(error: RuntimeError) -> bool:
"""Check whether a runtime error was caused by insufficient memory."""
message = error.args[0]
# CUDA out of memory
if 'CUDA out of memory.' in message:
return True
# CPU out of memory
if "[enforce fail at CPUAllocator.cpp:64] . DefaultCPUAllocator: can't allocate memory:" in message:
return True
return False
R = TypeVar('R')
def maximize_memory_utilization(
func: Callable[..., R],
parameter_name: str,
parameter_max_value: int,
*args,
**kwargs
) -> Tuple[R, int]: # noqa: D401
"""
Iteratively reduce parameter value until no RuntimeError is generated by CUDA.
:param func:
The callable.
:param parameter_name:
The name of the parameter to maximise.
:param parameter_max_value:
The maximum value to start with.
:param args:
Additional positional arguments for func. Does _not_ include parameter_name!
:param kwargs:
Additional keyword-based arguments for func. Does _not_ include parameter_name!
:return:
The result, as well as the maximum value which led to successful execution.
"""
result = None
direct_success = True
if not all((not torch.is_tensor(obj) or obj.device.type == 'cuda') for obj in itertools.chain(args, kwargs.values())):
logger.warning('Using maximize_memory_utilization on non-CUDA tensors. This may lead to undocumented crashes due to CPU OOM killer.')
while parameter_max_value > 0:
p_kwargs = {parameter_name: parameter_max_value}
try:
result = func(*args, **p_kwargs, **kwargs)
if not direct_success:
logger.info('Execution succeeded with %s=%d', parameter_name, parameter_max_value)
break
except RuntimeError as runtime_error:
# Failed at least once
direct_success = False
# clear cache
torch.cuda.empty_cache()
# check whether the error is an out-of-memory error
if not _is_oom_error(error=runtime_error):
raise runtime_error
logger.info('Execution failed with %s=%d', parameter_name, parameter_max_value)
parameter_max_value //= 2
if parameter_max_value == 0:
raise MemoryError(f'Execution did not even succeed with {parameter_name}=1.')
return result, parameter_max_value
def construct_optimizer_from_config(model: nn.Module, optimizer_config: MutableMapping[str, Any]) -> optim.Optimizer:
"""
Create a pytorch optimizer for a model, given a config.
:param model:
The model.
:param optimizer_config:
The config: dict(
cls=<OPTIMIZER_CLASS_NAME>,
**kwargs,
)
where kwargs are passed down to the optimizer's constructor, and stripped before from unused arguments.
:return:
The optimizer instance.
"""
optim_name = optimizer_config.pop('cls')
opt_cls = get_optimizer_class_by_name(name=optim_name)
# reduce to parameter needed
optimizer_config = reduce_kwargs_for_method(opt_cls.__init__, kwargs=optimizer_config, raise_on_missing=False)
# instantiate optimizer
optimizer = opt_cls(params=(p for p in model.parameters() if p.requires_grad), **optimizer_config)
return optimizer
# pylint: disable=abstract-method
class ExtendedModule(nn.Module):
"""Extends nn.Module by a few utility methods."""
@property
def device(self) -> torch.device:
"""Return the model's device."""
devices = {
tensor.data.device
for tensor in itertools.chain(self.parameters(), self.buffers())
}
if len(devices) == 0:
raise ValueError('Could not infer device, since there are neither parameters nor buffers.')
elif len(devices) > 1:
device_info = dict(
parameters=dict(self.named_parameters()),
buffers=dict(self.named_buffers()),
)
raise ValueError(f'Ambiguous device! Found: {devices}\n\n{device_info}')
return next(iter(devices))
def reset_parameters(self):
"""Reset the model's parameters."""
# Make sure that all modules with parameters do have a reset_parameters method.
uninitialized_parameters = set(map(id, self.parameters()))
parents = defaultdict(list)
# Recursively visit all sub-modules
task_list = []
for name, module in self.named_modules():
# skip self
if module is self:
continue
# Track parents for blaming
for p in module.parameters():
parents[id(p)].append(module)
# call reset_parameters if possible
if hasattr(module, 'reset_parameters'):
task_list.append((name.count('.'), module))
# initialize from bottom to top
# This ensures that specialized initializations will take priority over the default ones of its components.
for module in map(itemgetter(1), sorted(task_list, reverse=True, key=itemgetter(0))):
module.reset_parameters()
uninitialized_parameters.difference_update(map(id, module.parameters()))
# emit warning if there where parameters which were not initialised by reset_parameters.
if len(uninitialized_parameters) > 0:
logger.warning('reset_parameters() not found for all modules containing parameters. %d parameters where likely not initialised.', len(uninitialized_parameters))
# Additional debug information
for i, p_id in enumerate(uninitialized_parameters, start=1):
logger.debug('[%3d] Parents to blame: %s', i, parents.get(p_id))
class SparseMatrix(ExtendedModule, ABC):
"""A matrix."""
#: The shape (n_rows, n_cols)
shape: Tuple[int, int]
def __init__(self, shape: Tuple[int, int]):
"""
Initialize matrix.
:param shape:
The shape, (n_rows, n_cols).
"""
super().__init__()
self.shape = shape
def __matmul__(self, other: torch.Tensor) -> torch.Tensor:
"""
Matrix-matrix multiplication.
:param other: shape: (n_cols, d)
The vector.
:return: shape: (n_rows, d)
out[i, :] = self[:, i] * other[i, :]
"""
if other.shape[0] != self.shape[1]:
raise ValueError(f'Shape mismatch: self.shape={self.shape}, other.shape={other.shape}. {self.shape[1]} != {other.shape[0]}.')
return self._real_matmul(other=other)
def _real_matmul(self, other: torch.Tensor) -> torch.Tensor:
"""Perform the matrix-matrix multiplication."""
raise NotImplementedError
def t(self) -> 'SparseMatrix':
"""Matrix transposition."""
raise NotImplementedError
def detach(self) -> 'SparseMatrix':
"""Detaches the values, i.e. breaks the gradient flow."""
raise NotImplementedError
def dense(self) -> torch.Tensor:
"""Return a dense version of the matrix."""
raise NotImplementedError
# pylint: disable=arguments-differ
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Return self @ x."""
return self @ x
class SparseCOOMatrix(SparseMatrix):
"""A sparse matrix in COO format."""
#: The indices of the non-zero elements.
sparse_matrix: torch.sparse.Tensor
def __init__(
self,
matrix: torch.sparse.Tensor
):
"""
Initialize the matrix.
:param matrix:
The matrix.
"""
super().__init__(shape=matrix.shape)
assert len(matrix.shape) == 2
self.register_buffer(name='sparse_matrix', tensor=matrix.coalesce())
@staticmethod
def from_indices_values_pair(
indices: torch.LongTensor,
values: Optional[torch.Tensor] = None,
size: Optional[Union[int, Tuple[int, int]]] = None,
) -> 'SparseCOOMatrix':
"""
Instantiate the matrix using a pair of indices and optional values.
:param indices: shape: (2, nnz)
The indices.
:param values: shape: (nnz,)
The values.
:param size:
The size. If None, infer from indices.
:return:
The matrix.
"""
if size is None:
size = tuple((indices.max(dim=1).values + 1).tolist())
if isinstance(size, int):
size = (size, size)
for dim, (index_dim, size_dim) in enumerate(zip(indices, size)):
max_id_on_dim = index_dim.max().item()
if max_id_on_dim >= size_dim:
raise ValueError(f'Index out of range for dim={dim}: {max_id_on_dim} vs. {size_dim}')
if values is None:
values = indices.new_ones(indices.shape[1], dtype=torch.float32)
return SparseCOOMatrix(matrix=torch.sparse_coo_tensor(indices=indices, values=values, size=size))
@staticmethod
def from_edge_tensor(
edge_tensor: torch.LongTensor,
edge_weights: Optional[torch.Tensor] = None,
size: Optional[Union[int, Tuple[int, int]]] = None,
) -> 'SparseCOOMatrix':
"""
Construct a sparse adjacency matrix for a given edge_tensor.
:param edge_tensor: shape: (2, num_edges)
The edge tensor, elements: (source, target)
:param edge_weights: shape: (num_edges,)
Edge weights.
:param size: >0
The size, format num_nodes or (num_targets, num_sources).
:return:
The adjacency matrix.
"""
return SparseCOOMatrix.from_indices_values_pair(
indices=edge_tensor.flip(0),
values=edge_weights,
size=size,
)
@staticmethod
def from_dense(
dense: torch.Tensor,
) -> 'SparseCOOMatrix':
"""
Construct a sparse matrix from a given dense version.
:param dense: shape: (m, n)
The dense matrix. Should have some/many zero elements.
:return:
The sparse matrix containing only the non-zero elements.
"""
# convert to sparse matrix
indices = dense.nonzero(as_tuple=True)
values = dense[indices]
return SparseCOOMatrix.from_indices_values_pair(
indices=torch.stack(indices, dim=0),
values=values,
size=dense.shape,
)
@staticmethod
def eye(n: int, device: Union[torch.device, str, None] = None) -> 'SparseCOOMatrix':
"""
Construct a sparse identity matrix.
:param n:
The dimension.
:param device:
The device.
:return:
The identity matrix.
"""
return SparseCOOMatrix.from_indices_values_pair(
indices=torch.arange(n, device=device).unsqueeze(dim=0).repeat(2, 1),
size=n,
)
@property
def indices(self) -> torch.LongTensor:
"""Return the indices."""
return self.sparse_matrix.indices()
@property
def values(self) -> torch.FloatTensor:
"""Return the values."""
return self.sparse_matrix.values()
def sum(self, dim: int) -> torch.Tensor:
"""
Compute the sum along a dimension.
:param dim:
The dimension. From {0, 1}.
:return: shape: (shape_at_dim,)
The sum, a tensor of shape[dim].
"""
return torch.sparse.sum(input=self.sparse_matrix, dim=dim).to_dense()
def normalize(
self,
dim: int = 1,
target_sum: Optional[float] = None,
) -> 'SparseCOOMatrix':
"""
Normalize the matrix row-wise / column-wise.
:param dim:
The dimension.
:param target_sum:
An optional target value for the row/column sum. Defaults to 1.
:return:
The normalized matrix.
"""
weights = self.sum(dim=dim).reciprocal()
if target_sum is not None:
weights = weights * target_sum
weights = self.scatter(x=weights, dim=1 - dim) * self.values
return self.with_weights(weights=weights)
@property
def source(self) -> torch.LongTensor:
"""Return the source indices for message passing."""
return self.indices[1]
@property
def target(self) -> torch.LongTensor:
"""Return the target indices for message passing."""
return self.indices[0]
def with_weights(self, weights: torch.Tensor) -> 'SparseCOOMatrix':
"""Return a matrix of the same structure, with adjusted weights."""
return SparseCOOMatrix.from_indices_values_pair(
indices=self.indices,
values=weights,
size=self.shape,
)
def without_weights(self) -> 'SparseCOOMatrix':
"""Return the matrix without weights."""
self.coalesce_()
return SparseCOOMatrix(
matrix=torch.sparse_coo_tensor(
indices=self.sparse_matrix.indices(),
values=torch.ones_like(self.sparse_matrix.values()),
size=self.shape,
),
)
def scatter(self, x: torch.Tensor, dim: int = 1) -> torch.Tensor:
"""
Scatter elements of x to the edges.
:param x: shape: (self.shape[dim], d1, ..., dk)
The values for each node.
:param dim: The dimension, from {0, 1}.
dim=0 -> from target
dim=1 -> from source
:return: shape: (nnz, d1, ..., dk)
The values broadcasted to each edge.
"""
if x.shape[0] != self.shape[dim]:
raise ValueError(x.shape, self.shape[dim])
return x.index_select(dim=0, index=self.indices[dim])
def gather(self, m: torch.Tensor, dim: int = 0) -> torch.Tensor:
"""
Gather elements of m from edges to nodes.
:param m: shape: (num_edges, d1, ..., dk)
The values for each edge.
:param dim: The dimension, from {0, 1}.
dim=0 -> to source
dim=1 -> to target
:return: shape: (num_nodes, d1, ..., dk)
The values broadcasted to each node.
"""
if m.shape[0] != self.indices.shape[1]:
raise ValueError(m.shape, self.indices.shape[1])
return m.new_zeros(self.shape[dim], *m.shape[1:]).index_add(dim=0, index=self.indices[dim], source=m)
def t(self) -> 'SparseCOOMatrix':
"""Transposed matrix."""
return SparseCOOMatrix(matrix=self.sparse_matrix.t())
def _real_matmul(self, other: torch.Tensor) -> torch.Tensor: # noqa: D102
# torch.sparse.mm requires float values
if self.values.is_floating_point() and other.is_floating_point():
return torch.sparse.mm(mat1=self.sparse_matrix, mat2=other)
msg = self.scatter(x=other)
if self.values is not None:
msg = msg * self.values.view(msg.shape[0], 1)
return self.gather(m=msg)
def coalesce_(self) -> 'SparseCOOMatrix':
"""In-place index de-duplication."""
self.sparse_matrix = self.sparse_matrix.coalesce()
return self
def coalesce(self) -> 'SparseCOOMatrix':
"""
Collapses duplicate entries for (row, col) in indices.
Since COO format permits duplicates (row, col), and some operations require unique indices, this operation
collapses them, by adding the elements. This operation is quite costly.
"""
return SparseCOOMatrix(matrix=self.sparse_matrix.coalesce())
def __add__(self, other: 'SparseCOOMatrix') -> 'SparseCOOMatrix': # noqa: D105
if not isinstance(other, SparseCOOMatrix):
raise NotImplementedError
return SparseCOOMatrix(matrix=self.sparse_matrix + other.sparse_matrix)
def detach(self) -> 'SparseCOOMatrix': # noqa: D102
return SparseCOOMatrix(matrix=self.sparse_matrix.detach())
def dense(self) -> torch.Tensor: # noqa: D102
assert len(self.shape) == 2
self.coalesce_()
result = self.values.new_zeros(size=self.shape)
result[self.indices[0], self.indices[1]] = self.values
return result
@property
def edge_tensor(self) -> torch.LongTensor:
"""Return the edge_tensor view of the adjacency matrix."""
return torch.stack([
self.source,
self.target,
], dim=0)
@property
def edge_weights(self) -> torch.FloatTensor:
"""Return the edge_weights view of the adjacency matrix."""
return self.values
@property
def nnz(self) -> int:
"""Return the number of occupied indices."""
return self.indices.shape[1]
def extra_repr(self) -> str:
"""Return a string with some basic information."""
return f'size={self.shape}, nnz={self.nnz}, sparsity={1. - (self.nnz / numpy.prod(self.shape)):.2%}'
| 21,537
| 32.86478
| 172
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/utils/common.py
|
"""General utility methods."""
import hashlib
import inspect
import logging
import pickle
import random
import string
from collections import deque
from enum import Enum
from typing import Any, Callable, Collection, Dict, Iterable, List, Mapping, Optional, Sequence, Set, Type, TypeVar, Union
T = TypeVar('T')
logger = logging.getLogger(name=__name__)
def enum_values(enum_cls: Type[Enum]) -> List:
"""List enum values."""
return [v.value for v in enum_cls]
def value_to_enum(enum_cls: Type[Enum], value: T) -> Enum:
"""Lookup enum for a given value."""
pos = [v for v in enum_cls if v.value == value]
if len(pos) != 1:
raise AssertionError(f'Could not resolve {value} for enum {enum_cls}. Available are {list(v for v in enum_cls)}.')
return pos[0]
def identity(x: T) -> T:
"""Return the value itself."""
return x
def get_all_subclasses(base_class: Type[T]) -> Set[Type[T]]:
"""Get a collection of all (recursive) subclasses of a given base class."""
return set(base_class.__subclasses__()).union(s for c in base_class.__subclasses__() for s in get_all_subclasses(c))
def get_subclass_by_name(
base_class: Type[T],
name: str,
normalizer: Optional[Callable[[str], str]] = None,
exclude: Optional[Union[Collection[Type[T]], Type[T]]] = None,
) -> Type[T]:
"""Get a subclass of a base-class by name.
:param base_class:
The base class.
:param name:
The name.
:param normalizer:
An optional name normalizer, e.g. str.lower
:param exclude:
An optional collection of subclasses to exclude.
:return:
The subclass with matching name.
:raises ValueError:
If no such subclass can be determined.
"""
if normalizer is None:
normalizer = identity
if exclude is None:
exclude = set()
if isinstance(exclude, type):
exclude = {exclude}
norm_name = normalizer(name)
for subclass in get_all_subclasses(base_class=base_class).difference(exclude):
if normalizer(subclass.__name__) == norm_name:
return subclass
subclass_dict = {normalizer(c.__name__): c for c in get_all_subclasses(base_class=base_class)}
raise ValueError(f'{base_class} does not have a subclass named {norm_name}. Subclasses: {subclass_dict}.')
def argparse_bool(x):
"""Convert a command line arguments for a boolean value."""
return str(x).lower() in {'true', '1', 'yes'}
def kwargs_or_empty(kwargs: Optional[Mapping[str, Any]]) -> Mapping[str, Any]:
"""Return the dictionary, or an empty dictionary."""
if kwargs is None:
kwargs = {}
return kwargs
def reduce_kwargs_for_method(
method,
kwargs: Optional[Mapping[str, Any]] = None,
raise_on_missing: bool = True,
) -> Mapping[str, Any]:
"""Prepare keyword arguments for a method.
Drops excess parameters with warning, and checks whether arguments are provided for all mandantory parameters.
"""
# Ensure kwargs is a dictionary
kwargs = kwargs_or_empty(kwargs=kwargs)
# compare keys with argument names
signature = inspect.signature(method)
parameters = set(signature.parameters.keys())
# Drop arguments which are unexpected
to_drop = set(kwargs.keys()).difference(parameters)
if len(to_drop) > 0:
dropped = {k: kwargs[k] for k in to_drop}
logger.warning('Dropping parameters: %s', dropped)
kwargs = {k: v for k, v in kwargs.items() if k not in to_drop}
# Check whether all necessary parameters are provided
missing = set()
for parameter in signature.parameters.values():
if (parameter.default is parameter.empty) and parameter.name not in kwargs.keys() and parameter.name != 'self' and parameter.kind != parameter.VAR_POSITIONAL and parameter.kind != parameter.VAR_KEYWORD:
missing.add(parameter.name)
# check whether missing parameters are provided via kwargs
missing = missing.difference(kwargs.get('kwargs', dict()).keys())
if len(missing) > 0 and raise_on_missing:
raise ValueError(f'Method {method.__name__} missing required parameters: {missing}')
return kwargs
def to_dot(
config: Dict[str, Any],
prefix: Optional[str] = None,
separator: str = '.',
function_to_name: bool = True,
) -> Dict[str, Any]:
"""Convert nested dictionary to flat dictionary.
:param config:
The potentially nested dictionary.
:param prefix:
An optional prefix.
:param separator:
The separator used to flatten the dictionary.
:param function_to_name:
Whether to convert functions to a string representation.
:return:
A flat dictionary where nested keys are joined by a separator.
"""
result = dict()
for k, v in config.items():
if prefix is not None:
k = f'{prefix}{separator}{k}'
if isinstance(v, dict):
v = to_dot(config=v, prefix=k, separator=separator)
elif hasattr(v, '__call__') and function_to_name:
v = {k: v.__name__ if hasattr(v, '__name__') else str(v)}
else:
v = {k: v}
result.update(v)
return result
def from_dot(
dictionary: Mapping[str, Any],
separator: str = '.',
) -> Dict[str, Any]:
"""Convert flat dictionary to a nested dictionary.
:param dictionary:
The flat dictionary.
:param separator:
The separator used to flatten the dictionary.
:return:
A nested dictionary where flat keys are split by a separator.
"""
result = {}
for k, v in dictionary.items():
key_sequence = k.split(sep=separator)
sub_result = result
for key in key_sequence[:-1]:
if key not in sub_result:
sub_result[key] = dict()
sub_result = sub_result[key]
sub_result[key_sequence[-1]] = v
return result
class NonFiniteLossError(RuntimeError):
"""A non-finite loss value."""
K = TypeVar('K')
V = TypeVar('V')
def invert_mapping(
mapping: Mapping[K, V]
) -> Mapping[V, K]:
"""
Invert a mapping. Has to be a bijection, i.e. one-to-one mapping.
:param mapping:
The mapping key -> value
:return:
The mapping value -> key
"""
if len(set(mapping.values())) < len(mapping):
raise ValueError('Mapping is not a bijection, since there are duplicate values!')
return {
v: k
for k, v in mapping.items()
}
def random_split_range(max_val: int, num: int) -> List[int]:
"""Randomly split a range into num parts."""""
if max_val <= 0:
raise ValueError(f'max_val must be strictly positive, but max_val{max_val}')
if num <= 0:
raise ValueError(f'num must be strictly positive, but num={num}')
if num > max_val:
raise ValueError(f'Cannot split {max_val} into {num} positive parts.')
breaks = [0] + sorted(random.sample(range(1, max_val), k=num - 1)) + [max_val]
return [(stop - start) for start, stop in zip(breaks, breaks[1:])]
def get_value_from_nested_mapping(
dictionary: Mapping[str, Any],
keys: Sequence[str],
default: Optional = 'raise',
) -> Any:
"""
Get a value from a nested dictionary addressed by a sequence of keys.
:param dictionary:
The (nested) dictionary.
:param keys:
A sequence of keys.
:return:
The value.
"""
for key in keys:
if key not in dictionary:
if default == 'raise':
raise KeyError
else:
return default
dictionary = dictionary[key]
return dictionary
def integer_portion(
number: int,
ratio: float = 1.,
multiple_of: int = 1,
) -> int:
"""
Multiply a number by a ratio and round the result.
Constraints:
1. The output is at least multiple_of
2. Besides, the output is the closest multiple.
:param number:
The original number.
:param ratio:
The relative factor.
:param multiple_of:
Use the closest multiple of this number.
:return:
"""
for name, value in dict(
number=number,
ratio=ratio,
multiple_of=multiple_of,
).items():
if value <= 0:
raise ValueError(f'{name} needs to be strictly positive, but is {value}.')
return max(int(round(number * ratio / multiple_of)), 1) * multiple_of
def last(iterable: Iterable[T]) -> T:
"""Return the last item of an iterable."""
return deque(iterable, maxlen=1).pop()
def random_sentence_list(
num_sentences: int = 1,
word_sep: str = ' ',
min_num_words: int = 1,
max_num_words: int = 1,
max_word_length: int = 10,
min_word_length: int = 2,
word_prefix: str = '',
sentence_prefix: str = '',
alphabet: Sequence[str] = string.ascii_letters,
) -> Sequence[str]:
"""Generate a list of random words."""
return [
sentence_prefix + word_sep.join(
word_prefix + ''.join(
random.sample(
alphabet,
random.randrange(min_word_length, max_word_length + 1)
)
)
for _ in range(random.randrange(min_num_words, max_num_words) if max_num_words > min_num_words else min_num_words)
)
for _ in range(num_sentences)
]
def multi_hash(*keys: Any, hash_function: str = "sha512") -> str:
"""Return a hash sum for a sequence of objects."""
return hashlib.new(name=hash_function, data=pickle.dumps(tuple(keys))).hexdigest()
| 9,553
| 29.234177
| 210
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/utils/__init__.py
|
# coding=utf-8
"""A collection of utility methods."""
| 54
| 17.333333
| 38
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/data/reduction.py
|
"""Reduction strategies from Knowledge Graph to (weighted) uni-relational graphs."""
import enum
import logging
from typing import Callable, Optional
import torch
from .knowledge_graph import KnowledgeGraph
from ..utils.torch_utils import ExtendedModule, SparseCOOMatrix
logger = logging.getLogger(name=__name__)
# pylint: disable=abstract-method
class KnowledgeGraphToGraphReduction(ExtendedModule):
r"""
Base class for methods reducing the full KG tensor to a single adjacency matrix.
A knowledge graph (KG) comprises a set of triples :math:`\mathcal{T} = \{(h, r, t)\}`, where
:math:`h, r \in \mathcal{E}` are entities, and :math:`r \in \mathcal{R}` are relations.
The KG can also be represenated by a three-dimensional binary tensor
:math:`\mathbf{T} \in \{0, 1\}^{E \times R \times E}`, where :math:`E := |\mathcal{E}|`, and :math:`R := |\mathcal{R}|`.
Often GCN-based models are only defined for uni-relational graphs. Thus, the KG adjacency tensor :math:`\mathbf{T}`
needs to be reduced to a (weighted) adjacency matrix :math:`\mathbf{A} \in \mathbb{R}^{E \times E}`.
"""
# pylint: disable=arguments-differ
def forward(self) -> SparseCOOMatrix:
"""Get the (weighted) uni-relational adjacency matrix."""
return self.get_adjacency()
def _get_raw_edge_tensor(knowledge_graph: KnowledgeGraph) -> torch.LongTensor:
"""Get the raw edge_tensor, i.e. {{(h,t) | (h,r,t) in T}}."""
return knowledge_graph.triples[:, [0, 2]].t()
class StaticKnowledgeGraphToGraphReduction(KnowledgeGraphToGraphReduction):
"""A base class for parameter-free reduction."""
def __init__(
self,
knowledge_graph: KnowledgeGraph,
normalization: Optional[Callable[[SparseCOOMatrix], SparseCOOMatrix]] = None,
):
"""
Initialize the reduction strategy.
:param knowledge_graph:
The knowledge graph to reduce.
:param normalization:
An optional normalization of the resulting adjacency matrix.
"""
super().__init__()
adjacency = self.get_static_adjacency(knowledge_graph=knowledge_graph)
if normalization is not None:
adjacency = normalization(adjacency)
self.adjacency = adjacency
def get_static_adjacency(self, knowledge_graph: KnowledgeGraph) -> SparseCOOMatrix:
"""Compute the adjacency matrix in advance."""
raise NotImplementedError
def forward(self) -> SparseCOOMatrix: # noqa: D102
return self.adjacency
# pylint: disable=abstract-method
class DropRelationInformationKnowledgeGraphToGraphReduction(StaticKnowledgeGraphToGraphReduction):
"""Drop the relation information, i.e. there is an edge if there is at least one triple."""
def __init__(
self,
knowledge_graph: KnowledgeGraph,
normalization: Optional[Callable[[SparseCOOMatrix], SparseCOOMatrix]] = None,
unique: bool = True,
add_self_loops: bool = False,
add_inverse: bool = False,
):
"""
Initialize the reduction strategy.
:param knowledge_graph:
The knowledge graph to reduce.
:param normalization:
An optional normalization of the resulting adjacency matrix.
:param unique:
Whether to drop duplicate edges.
:param add_self_loops:
Whether to add self-loops.
:param add_inverse:
Whether to add inverse edges, i.e. make the adjacency symmetric.
"""
self.unique = unique
self.add_self_loops = add_self_loops
self.add_inverse = add_inverse
super().__init__(knowledge_graph=knowledge_graph, normalization=normalization)
def get_static_adjacency(self, knowledge_graph: KnowledgeGraph) -> SparseCOOMatrix: # noqa: D102
edge_tensor = _get_raw_edge_tensor(knowledge_graph)
if self.add_inverse:
edge_tensor = torch.cat([edge_tensor, edge_tensor.flip(0)], dim=1)
if self.add_self_loops:
edge_tensor = torch.cat([edge_tensor, torch.arange(knowledge_graph.num_entities, device=edge_tensor.device).view(1, -1).repeat(2, 1)], dim=-1)
# Drop duplicates
if self.unique:
num_edges = edge_tensor.shape[1]
edge_tensor = torch.unique(edge_tensor, dim=1)
num_edges_reduced = edge_tensor.shape[1]
if num_edges_reduced < num_edges:
logger.info('Dropped %d/%d edges.', num_edges - num_edges_reduced, num_edges)
return SparseCOOMatrix.from_edge_tensor(
edge_tensor=edge_tensor,
edge_weights=None,
size=knowledge_graph.num_entities,
)
def _scale_edge_weights(
adjacency: SparseCOOMatrix,
edge_factor: torch.FloatTensor,
) -> SparseCOOMatrix:
"""
Multiply the edge weights by an edge-specific factor.
Handles special case where the original matrix is unweighted.
:param adjacency:
The adjacency.
:param edge_factor: shape: (num_edges,)
The edge-wise factor.
:return:
The scaled adjacency matrix.
"""
if adjacency.values is not None:
edge_factor = adjacency.values * edge_factor
return adjacency.with_weights(weights=edge_factor)
def target_normalization(adjacency: SparseCOOMatrix) -> SparseCOOMatrix:
r"""
Normalize an adjacency matrix row-wise.
.. math ::
\hat{A}_{ij} = A_{ij} / \sum_{k} A_{ik}
:param adjacency:
The adjacency matrix.
:return:
The normalized adjacency matrix.
"""
return adjacency.normalize(dim=1)
def source_normalization(adjacency: SparseCOOMatrix) -> SparseCOOMatrix:
r"""
Normalize an adjacency matrix column-wise.
.. math ::
\hat{A}_{ij} = A_{ij} / \sum_{k} A_{kj}
:param adjacency:
The adjacency matrix.
:return:
The normalized adjacency matrix.
"""
return adjacency.normalize(dim=0)
def symmetric_normalization(adjacency: SparseCOOMatrix) -> SparseCOOMatrix:
r"""
Normalize an adjacency matrix symmetrically.
.. math ::
\hat{A}_{ij} = A_{ij} / \sqrt{\left(\sum_{k} A_{kj} \right) \cdot \left(\sum_{k} A_{kj} \right)}
:param adjacency:
The adjacency matrix.
:return:
The normalized adjacency matrix.
"""
edge_factor = (adjacency.scatter(adjacency.sum(dim=1), dim=0) * adjacency.scatter(adjacency.sum(dim=0), dim=1)).sqrt().reciprocal()
# edge_factor = adjacency.scatter(adjacency.sum(dim=1).sqrt().reciprocal(), dim=0) * adjacency.scatter(adjacency.sum(dim=0).sqrt().reciprocal(), dim=1)
return _scale_edge_weights(
adjacency=adjacency,
edge_factor=edge_factor,
)
class EdgeWeightsEnum(str, enum.Enum):
"""Which edge weights to use."""
#: None
none = 'none'
#: Inverse in-degree -> sum of weights for incoming messages = 1
inverse_in_degree = 'inverse_in_degree'
#: Inverse out-degree -> sum of weights for outgoing messages = 1
inverse_out_degree = 'inverse_out_degree'
#: 1 / sqrt(in-degree * out-degree)
symmetric = 'symmetric'
def normalize_adjacency(
adjacency: SparseCOOMatrix,
mode: EdgeWeightsEnum,
) -> SparseCOOMatrix:
"""
Normalize adjacency according to normalization mode.
:param adjacency:
The adjacency matrix.
:param mode:
The mode.
:return:
The normalized adjacency.
"""
if mode == EdgeWeightsEnum.inverse_in_degree:
return target_normalization(adjacency=adjacency)
elif mode == EdgeWeightsEnum.inverse_out_degree:
return source_normalization(adjacency=adjacency)
elif mode == EdgeWeightsEnum.symmetric:
return symmetric_normalization(adjacency=adjacency)
return adjacency
| 7,812
| 32.246809
| 155
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/data/loaders.py
|
"""Data loading for Entity Alignment datasets."""
import abc
import io
import json
import logging
import lzma
import pathlib
import tarfile
import zipfile
from typing import Collection, Generic, Mapping, Optional, Tuple, Type, TypeVar, Union
import pandas
import requests
import torch
from .knowledge_graph import EntityAlignment, KnowledgeGraph, KnowledgeGraphAlignmentDataset, MatchSideEnum, SIDES
from ..utils.common import get_all_subclasses, get_subclass_by_name, multi_hash
from ..utils.data_utils import check_hashsums, resolve_cache_root, resolve_google_drive_file_url, save_response_content
from ..utils.torch_utils import split_tensor
from ..utils.types import IDAlignment, Triples
A = TypeVar('A', zipfile.ZipFile, tarfile.TarFile)
logger = logging.getLogger(name=__name__)
class Archive(Generic[A]):
"""A generic class for reading from archives."""
#: The archive file
archive_file: A
#: The default file extension:
default_file_extension: str
def __init__(self, archive_path: pathlib.Path):
"""
Initialize the archive.
:param archive_path:
The archive path.
"""
self.path = archive_path
def __enter__(self): # noqa: D105
self.archive_file = self._open_archive(path=self.path)
return self
def __exit__(self, exc_type, exc_val, exc_tb): # noqa: D105
self.archive_file.close()
# pylint: disable=unused-argument
def open_file(
self,
relative_path: Union[pathlib.Path, str],
encoding: Optional[str] = None,
) -> io.TextIOBase:
"""Open a file from the archive in read mode."""
return self.archive_file.open(name=str(relative_path))
def _open_archive(self, path: pathlib.Path) -> A:
"""Open the archive in read mode."""
raise NotImplementedError
class ZipArchive(Archive[zipfile.ZipFile]):
"""A zipfile archive."""
default_file_extension = 'zip'
def _open_archive(
self,
path: pathlib.Path,
) -> zipfile.ZipFile: # noqa: D102
return zipfile.ZipFile(file=path)
class TarArchive(Archive[tarfile.TarFile]):
"""A tarfile archive."""
default_file_extension = 'tar.gz'
def _open_archive(
self,
path: pathlib.Path,
) -> tarfile.TarFile: # noqa: D102
return tarfile.open(name=path)
def open_file(
self,
relative_path: Union[pathlib.Path, str],
encoding: Optional[str] = None,
) -> io.TextIOBase: # noqa: D102
return io.TextIOWrapper(self.archive_file.extractfile(member=str(relative_path)), encoding=encoding)
def apply_compaction(
triples: Triples,
compaction: Mapping[int, int],
columns: Union[int, Collection[int]],
dim: int = 0,
) -> Triples:
"""
Apply ID compaction to triples.
:param triples:
The triples
:param compaction:
The ID compaction, i.e. mapping old ID to new ID.
:param columns:
The columns on which to apply the compaction.
:param dim:
The dimension along which to apply the compaction.
:return:
The updated triples.
"""
if compaction is None:
return triples
if isinstance(columns, int):
columns = [columns]
if dim not in {0, 1}:
raise KeyError(dim)
triple_shape = triples.shape
if dim == 1:
triples = triples.t()
new_cols = []
for c in range(triples.shape[1]):
this_column = triples[:, c]
if c in columns:
new_cols.append(torch.tensor([compaction[int(e)] for e in this_column])) # pylint: disable=not-callable
else:
new_cols.append(this_column)
new_triples = torch.stack(new_cols, dim=1 - dim)
assert new_triples.shape == triple_shape
return new_triples
def compact_columns(
triples: Triples,
label_to_id_mapping: Mapping[str, int],
columns: Union[int, Collection[int]],
) -> Tuple[Triples, Optional[Mapping[str, int]], Optional[Mapping[int, int]]]:
"""
Calculate compaction of the columns of triples.
:param triples: shape: (num_triples, 3)
The original triples.
:param label_to_id_mapping:
The old label-to-ID mapping.
:param columns:
The columns on which to calculate the compaction.
:return:
A 3-tuple (new_triples, new_mapping, compaction) where
* new_triples: shape: (num_triples, 3)
The compacted triples.
* new_mapping:
The updated label to ID mapping.
* compaction:
A mapping old ID to new ID.
Note: new_mapping and compaction may be None, if the old triples where already compact.
"""
ids = label_to_id_mapping.values()
num_ids = len(ids)
assert len(set(ids)) == len(ids)
max_id = max(ids)
if num_ids < max_id + 1:
compaction = dict((old, new) for new, old in enumerate(sorted(ids)))
assert set(compaction.keys()) == set(label_to_id_mapping.values())
assert set(compaction.values()) == set(range(num_ids))
new_triples = apply_compaction(triples, compaction, columns, dim=0)
new_mapping = {label: compaction[_id] for label, _id in label_to_id_mapping.items()}
logger.info('Compacted: %d -> %d', max_id, num_ids - 1)
else:
compaction = None
new_triples = triples
new_mapping = label_to_id_mapping
logger.debug('No compaction necessary.')
return new_triples, new_mapping, compaction
def compact_graph(
graph: KnowledgeGraph,
no_duplicates: bool = True,
) -> Tuple[KnowledgeGraph, Optional[Mapping[int, int]], Optional[Mapping[int, int]]]:
"""
Compact a KG.
:param graph:
The KG.
:param no_duplicates:
Whether to drop duplicates.
:return:
The updated KG, and mappings from old ID to compact ID, or None if the KG is already compliant.
"""
if graph.inverse_triples:
raise NotImplementedError
triples0 = graph.triples
# Compact entities
triples1, compact_entity_label_to_id, entity_compaction = compact_columns(triples=triples0, label_to_id_mapping=graph.entity_label_to_id, columns=(0, 2))
# Compact relations
triples2, compact_relation_label_to_id, relation_compaction = compact_columns(triples=triples1, label_to_id_mapping=graph.relation_label_to_id, columns=(1,))
# Filter duplicates
if no_duplicates:
old_size = triples2.shape[0]
triples2 = torch.unique(triples2, dim=0)
new_size = triples2.shape[0]
if new_size < old_size:
logger.info('Aggregated edges: %d -> %d.', old_size, new_size)
# Compile to new knowledge graph
compact_graph_ = KnowledgeGraph(
triples=triples2,
entity_label_to_id=compact_entity_label_to_id,
relation_label_to_id=compact_relation_label_to_id,
lang_code=graph.lang_code,
dataset_name=graph.dataset_name,
subset_name=graph.subset_name
)
return compact_graph_, entity_compaction, relation_compaction
def compact_single_alignment(
single_alignment: IDAlignment,
left_compaction: Mapping[int, int],
right_compaction: Mapping[int, int],
) -> IDAlignment:
"""
Apply ID compaction to a single alignment.
:param single_alignment: shape: (2, num_alignments), dtype: long
The alignment.
:param left_compaction:
The compaction for the left side, i.e. a mapping old ID -> new ID for the left graph.
:param right_compaction:
The compaction for the right side, i.e. a mapping old ID -> new ID for the right graph.
:return: shape: (2, num_alignments)
The updated alignment.
"""
compact_single_alignment_ = single_alignment
for col, compaction in enumerate([left_compaction, right_compaction]):
compact_single_alignment_ = apply_compaction(triples=compact_single_alignment_, compaction=compaction, columns=col, dim=1)
return compact_single_alignment_
def compact_knowledge_graph_alignment(
alignment: EntityAlignment,
left_entity_compaction: Mapping[int, int],
right_entity_compaction: Mapping[int, int],
) -> EntityAlignment:
"""
Apply ID compaction to entity alignment.
:param alignment:
The entity alignment.
:param left_entity_compaction:
The compaction for the left side, i.e. a mapping old ID -> new ID for the left graph.
:param right_entity_compaction:
The compaction for the right side, i.e. a mapping old ID -> new ID for the right graph.
:return:
The updated entity alignment.
"""
# Entity compaction
compact_entity_alignment_train = compact_single_alignment(single_alignment=alignment.train, left_compaction=left_entity_compaction, right_compaction=right_entity_compaction)
compact_entity_alignment_test = compact_single_alignment(single_alignment=alignment.test, left_compaction=left_entity_compaction, right_compaction=right_entity_compaction)
if alignment.num_validation > 0:
compact_entity_alignment_val = compact_single_alignment(single_alignment=alignment.validation, left_compaction=left_entity_compaction, right_compaction=right_entity_compaction)
else:
compact_entity_alignment_val = None
return EntityAlignment(
train=compact_entity_alignment_train,
test=compact_entity_alignment_test,
_validation=compact_entity_alignment_val,
)
def compact_knowledge_graph_alignment_dataset(
left_graph: KnowledgeGraph,
right_graph: KnowledgeGraph,
alignment: EntityAlignment,
no_duplicates: bool = True,
) -> Tuple[KnowledgeGraph, KnowledgeGraph, EntityAlignment]:
"""
Compact a knowledge graph alignment dataset.
When loading a KG with pre-defined label-to-ID mappings, it might happen that the ID range is not consecutive, or starts from 0.
Thus, a compaction is applied by mapping the IDs monotonously to {0, ..., num_labels - 1}.
:param left_graph:
The left KG.
:param right_graph:
The right KG.
:param alignment:
The entity alignment.
:param no_duplicates:
Whether to discard duplicate triples.
:return:
The updated left/right graph and alignment.
"""
left_compact_graph, left_entity_compaction = compact_graph(graph=left_graph, no_duplicates=no_duplicates)[:2]
right_compact_graph, right_entity_compaction = compact_graph(graph=right_graph, no_duplicates=no_duplicates)[:2]
compact_alignment = compact_knowledge_graph_alignment(
alignment=alignment,
left_entity_compaction=left_entity_compaction,
right_entity_compaction=right_entity_compaction,
)
return left_compact_graph, right_compact_graph, compact_alignment
def load_triples(
triples_file: io.TextIOBase,
delimiter: str = '\t',
encoding: str = 'utf8',
engine: str = 'c',
) -> Tuple[Triples, Mapping[str, int], Mapping[str, int]]:
"""
Load triples from a file-like object.
:param triples_file:
The opened file-like object.
:param delimiter:
The delimiter.
:param encoding:
The encoding,
:param engine:
The pandas engine.
:return:
A tuple (triples, entity_label_to_id, relation_label_to_id) where
* triples: shape: (num_triples, 3), dtype: long
* entity_label_to_id / relation_label_to_id: mapping from labels to IDs.
"""
# Load triples from tsv file
df = pandas.read_csv(
filepath_or_buffer=triples_file,
sep=delimiter,
encoding=encoding,
header=None,
names=['h', 'r', 't'],
engine=engine,
dtype=str,
)
df = df.applymap(str)
# Sorting ensures consistent results when the triples are permuted
entity_label_to_id = {
e: i for i, e in enumerate(sorted(set(df['h'].unique()).union(set(df['t'].unique()))))
}
relation_label_to_id = {
r: i for i, r in enumerate(sorted(df['r'].unique()))
}
# Label triples to ID
for col, mapping in zip('hrt', [entity_label_to_id, relation_label_to_id, entity_label_to_id]):
df[col] = df[col].apply(mapping.__getitem__)
triples = torch.as_tensor(data=df.values, dtype=torch.long).unique(dim=0)
# Log some info
logger.info(
'Loaded %d unique triples, with %d unique entities and %d unique relations.',
triples.shape[0],
len(entity_label_to_id),
len(relation_label_to_id)
)
return triples, entity_label_to_id, relation_label_to_id
def _load_label_to_id(
archive: Archive,
relative_path: pathlib.Path,
) -> Mapping[str, int]:
"""
Load entity label to ID file.
:param archive:
The opened archive file.
:param relative_path:
The relative path within the archive.
:return:
A mapping from entity labels to IDs.
"""
with archive.open_file(relative_path=relative_path) as text_file:
df = pandas.read_csv(filepath_or_buffer=text_file, names=['id', 'label'], header=None, sep='\t', encoding='utf8', engine='c')
return dict(zip(df['label'].values.tolist(), df['id'].values.tolist()))
def _load_entity_alignment(
archive: Archive,
relative_path: pathlib.Path,
left_graph: KnowledgeGraph,
right_graph: KnowledgeGraph,
sep: str = '\t',
) -> IDAlignment:
"""
Load entity alignment from an open archive.
:param archive:
The opened archive.
:param relative_path:
The relative path within the archive.
:param left_graph:
The left KG.
:param right_graph:
The right KG.
:return: shape: (2, num_alignments)
The entity alignment.
"""
# Load label alignment
with archive.open_file(relative_path=relative_path) as text_file:
entity_alignment = pandas.read_csv(
filepath_or_buffer=text_file,
names=['L', 'R'],
header=None,
sep=sep,
encoding='utf8',
engine='c' if len(sep) == 1 else 'python',
dtype=str,
)
return translate_alignment(labelled_entity_alignment=entity_alignment, left_graph=left_graph, right_graph=right_graph)
def translate_alignment(
labelled_entity_alignment: pandas.DataFrame,
left_graph: KnowledgeGraph,
right_graph: KnowledgeGraph,
) -> IDAlignment:
"""
Convert an alignment of labels to an alignment of IDs.
:param labelled_entity_alignment: columns: ['L', 'R']
The entity alignment, label-based.
:param left_graph:
The left KG.
:param right_graph:
The right KG.
:return: shape: (2, num_alignments)
The ID-based alignment.
"""
# Translate to ID alignment
alignment = torch.stack(
[
torch.as_tensor(
data=labelled_entity_alignment[col].apply(graph.entity_label_to_id.get, args=(-1,)),
dtype=torch.long,
)
for col, graph in zip('LR', [left_graph, right_graph])
],
dim=0,
)
# Drop invalid
invalid_mask = (alignment < 0).any(dim=0)
num_invalid = invalid_mask.sum()
if num_invalid > 0:
logger.warning('Dropping %d invalid rows.', num_invalid)
alignment = alignment[:, ~invalid_mask]
alignment = alignment.unique(dim=1)
logger.info('Loaded alignment of size %d.', alignment.shape[1])
return alignment
def _load_tensor_from_csv(
archive: Archive,
relative_path: pathlib.Path,
) -> torch.LongTensor:
"""
Load an integer tensor from a TSV file in an opened archive.
:param archive:
The opened archive.
:param relative_path:
The relative path within the archive.
:return: dtype: long
The tensor.
"""
with archive.open_file(relative_path=relative_path) as text_file:
return torch.tensor( # pylint: disable=not-callable
data=pandas.read_csv(filepath_or_buffer=text_file, header=None, sep='\t', encoding='utf8', engine='c').values,
dtype=torch.long,
)
class OnlineKnowledgeGraphAlignmentDatasetLoader:
"""Contains a lazy reference to a knowledge graph alignment data set."""
#: The URL where the data can be downloaded from
url: str
#: The subsets
subsets: Collection[str] = frozenset()
#: The pre-defined train-test splits
predefined_splits: Collection[float] = frozenset()
#: The archive file type
archive_type: Type[Archive] = TarArchive
#: The file name for the archive
archive_file_name: str
#: The directory where the datasets will be extracted to
cache_root: pathlib.Path
def __init__(
self,
subset: Optional[str] = None,
train_test_split: Optional[float] = None,
cache_root: Optional[Union[pathlib.Path, str]] = None,
compact: bool = True,
train_validation_split: Optional[float] = 0.8,
with_inverse_triples: bool = False,
with_self_loops: bool = False,
random_seed: int = 42,
) -> None:
"""
Initialize the data loader.
:param subset:
The name of the subset to use. Check subsets() for available subsets. If None, use the alphabetically
first one. This should *not* happen within a production environment.
:param train_test_split:
The train-test split ratio.
:param cache_root:
The cache root to use for caching downloaded files.
:param compact:
Whether to compact the label-to-ID mappings, i.e. ensure that the IDs are consecutive from
{0, ..., num_labels-1}
:param train_validation_split:
The train-validation split ratio.
:param with_inverse_triples:
Whether to add inverse triples.
:param with_self_loops:
Whether to add self-loops.
:param random_seed:
The random seed to use for splitting.
"""
self.cache_root = resolve_cache_root(cache_root, self.cache_sub_directory_name)
logger.info('Using cache_root=%s', self.cache_root)
if subset is None:
subset = sorted(self.subsets)[0]
logger.warning('No subset specified. This should not happen in production. Using "%s".', subset)
if subset not in self.subsets:
raise ValueError(f'Invalid subset={subset}. Allowed subsets: {self.subsets} (check '
f'{self.__class__.__name__}.subsets() for this list).')
self.subset = subset
if train_test_split is None:
train_test_split = 0.3
logger.warning('No train_test_split was given. Defaulting to 0.3.')
if train_test_split <= 0.0 or train_test_split >= 1.0:
raise ValueError(f'Split must be a float with 0 < train_test_split < 1, but train_test_split={train_test_split},')
if train_test_split not in self.predefined_splits:
logger.warning('Using a custom train_test_split=%f, and none of the pre-defined: %s.', train_test_split, self.predefined_splits)
self.train_test_split = train_test_split
self.compact = compact
self.train_validation_split = train_validation_split
self.with_inverse_triples = with_inverse_triples
self.with_self_loops = with_self_loops
self.random_seed = random_seed
@property
def cache_sub_directory_name(self) -> str:
"""Return the name of the sub-directory within the cache root."""
return self.__class__.__name__.lower()
def _get_split_name(self) -> str:
"""Get a unique split name."""
return str(hash((self.train_validation_split, self.train_test_split, self.random_seed)))
def load(
self,
force_download: bool = False,
) -> KnowledgeGraphAlignmentDataset:
"""
Load the dataset.
:param force_download:
Whether to force downloading the file, even if is already exists.
:return:
The dataset.
"""
# Ensure directory exists
self.cache_root.mkdir(parents=True, exist_ok=True)
# Check if files already exist
archive_path = self.cache_root / f'{self.archive_file_name}.{self.archive_type.default_file_extension}' # pylint: disable=no-member
if archive_path.is_file() and not force_download:
logger.info('Checking hash sums for existing file %s.', str(archive_path))
check_sums_match = check_hashsums(destination=archive_path, **self.hash_digests())
if not check_sums_match:
logger.warning('Checksums do not match. Forcing download.')
force_download = not check_sums_match
else:
force_download = True
if force_download:
# create session
session = requests.Session()
if 'drive.google.com' in self.url:
_id = self.url.split('?id=')[1]
response = resolve_google_drive_file_url(id_=_id, session=session)
else:
logger.info('Requesting dataset from %s', self.url)
response = session.get(url=self.url, stream=True)
# Real download
save_response_content(response=response, destination=archive_path)
check_sums_match = check_hashsums(destination=archive_path, **self.hash_digests())
if not check_sums_match:
raise ValueError('Checksums do not match!')
else:
logger.info('Skipping to download from %s due to existing files in %s.', self.url, self.cache_root)
# Try to load from artifact
artifact_root = self.cache_root / 'preprocessed' / self.__class__.__name__.lower() / self.subset
# graphs
graphs = dict()
compactions = dict()
for side in SIDES:
graph = compaction = "load-from-archive"
# try to load from artifact
graph_path = artifact_root / f"{side.value}_graph"
compaction_path = graph_path / "compaction.json.xz"
if graph_path.is_dir():
try:
graph = KnowledgeGraph.load(directory=graph_path)
logger.info(f"Loaded preprocessed graph from {graph_path}")
except FileNotFoundError as error:
logger.error(f"Error occurred by loading graph from {graph_path}: {error}")
if compaction_path.is_file():
with lzma.open(compaction_path, "rt") as json_file:
compaction = json.load(json_file)
# load from archive only if necessary
if graph == "load-from-archive" or compaction == "load-from-archive":
with self.archive_type(archive_path=archive_path) as archive:
graph = self._load_graph(archive=archive, side=side)
# compact
graph, compaction = compact_graph(graph=graph, no_duplicates=True)[:2]
# save
graph.save(directory=graph_path)
with lzma.open(compaction_path, "wt") as json_file:
json.dump(
compaction,
fp=json_file,
sort_keys=True,
indent=2,
)
logger.info(f"Saved preprocessed graph to {graph_path}")
assert graph is not None
graphs[side], compactions[side] = graph, compaction
left_graph, right_graph = [graphs[side] for side in SIDES]
# alignment
# key0 = .
all_alignment_path = artifact_root / "alignment.pt"
# key1 = (train_test_split, random_seed)
train_test_key = multi_hash(self.train_test_split, self.random_seed, hash_function="md5")
test_indices_path = artifact_root / "splits" / f"test_{train_test_key}.pt"
test_indices_path.parent.mkdir(parents=True, exist_ok=True)
# key2 = (train_test_split, train_validation_split, random_seed)
train_test_validation_key = multi_hash(self.train_test_split, self.random_seed, self.train_validation_split, hash_function="md5")
train_indices_path = artifact_root / "splits" / f"train_{train_test_validation_key}.pt"
validation_indices_path = artifact_root / "splits" / f"validation_{train_test_validation_key}.pt"
if all_alignment_path.is_file():
all_alignment = torch.load(all_alignment_path)
num_alignments = all_alignment.shape[1]
logger.info(f"Loaded {num_alignments} preprocessed alignments from {all_alignment_path}")
train_validation_indices = None
if test_indices_path.is_file():
test_indices = torch.load(test_indices_path)
logger.info(f"Loaded {test_indices.numel()} preprocessed test indices from {test_indices_path}")
else:
# train-test split
train_validation_indices, test_indices = split_tensor(tensor=torch.randperm(num_alignments), ratios=self.train_test_split, seed=self.random_seed)
torch.save(test_indices, test_indices_path)
logger.info(f"Saved {test_indices.numel()} preprocessed test indices to {test_indices_path}")
validation_indices = None
if train_indices_path.is_file():
train_indices = torch.load(train_indices_path)
logger.info(f"Loaded {train_indices.numel()} preprocessed train indices from {train_indices_path}")
if self.train_validation_split is not None:
validation_indices = torch.load(validation_indices_path)
logger.info(f"Loaded {validation_indices.numel()} preprocessed validation indices from {validation_indices_path}")
else:
if train_validation_indices is None:
train_validation_indices = torch.as_tensor(data=sorted(set(range(num_alignments)).difference(test_indices.tolist())))
if self.train_validation_split is not None:
train_indices, validation_indices = split_tensor(tensor=train_validation_indices, ratios=self.train_validation_split, )
torch.save(validation_indices, validation_indices_path)
logger.info(f"Saved {validation_indices.numel()} preprocessed validation indices to {validation_indices_path}")
else:
train_indices = train_validation_indices
torch.save(train_indices, train_indices_path)
logger.info(f"Saved {train_indices.numel()} preprocessed train indices to {train_indices_path}")
# Compose alignment
alignment = EntityAlignment(**{
part: all_alignment[:, indices]
for part, indices in dict(
train=train_indices,
test=test_indices,
_validation=validation_indices,
).items()
})
else:
# load from archive only if necessary
with self.archive_type(archive_path=archive_path) as archive:
alignment = self._load_alignment(archive=archive, left_graph=left_graph, right_graph=right_graph)
# compact
alignment = compact_knowledge_graph_alignment(
alignment=alignment,
left_entity_compaction=compactions[MatchSideEnum.left],
right_entity_compaction=compactions[MatchSideEnum.right],
)
# (re-)split if necessary
if self.train_validation_split is not None:
if round(self.train_validation_split * (alignment.num_train + alignment.num_validation)) == alignment.num_train:
logger.debug('Data was already split')
else:
if alignment.num_validation > 0:
logger.warning('Re-splitting data.')
alignment = alignment.validation_split(train_ratio=self.train_validation_split, seed=self.random_seed)
logger.info('Train-Validation-Split')
# better format for saving
a = torch.cat([alignment.train, alignment.test, alignment.validation], dim=1)
# lexicographic sort
i1 = a[1].argsort()
i2 = a[0, i1].argsort()
i = i1[i2]
i: torch.Tensor
a = a[:, i]
torch.save(a, all_alignment_path)
logger.info(f"Store preprocessed alignments to {all_alignment_path}")
# inverse
i = i.argsort()
i_train, i_test, i_validation = i.split(
split_size=[
alignment.num_train,
alignment.num_test,
alignment.num_validation,
])
for path, indices in (
(test_indices_path, i_test),
(train_indices_path, i_train),
(validation_indices_path, i_validation),
):
torch.save(indices, path)
logger.info(f"Store preprocessed split to {path}")
dataset = KnowledgeGraphAlignmentDataset(
left_graph=left_graph,
right_graph=right_graph,
alignment=alignment,
)
if self.with_inverse_triples:
dataset = dataset.with_inverse_triples()
logger.info('Created inverse triples')
if self.with_self_loops:
dataset = dataset.with_self_loops()
logger.info('Created self-loops')
return dataset
def hash_digests(self) -> Mapping[str, str]:
"""Return the hash digests for file integrity check."""
return dict()
def _load_graph(
self,
archive: Archive,
side: MatchSideEnum,
) -> KnowledgeGraph:
"""
Load one graph from an archive.
:param archive:
The opened archive.
:param side:
The side.
:return:
The knowledge graph for this side.
"""
raise NotImplementedError
def _load_alignment(
self,
archive: Archive,
left_graph: KnowledgeGraph,
right_graph: KnowledgeGraph,
) -> EntityAlignment:
"""
Load the entity alignment from an opened archive.
:param archive:
The opened archive.
:param left_graph:
The left graph.
:param right_graph:
The right graph.
:return:
The alignment.
"""
raise NotImplementedError
class _DBP15k(OnlineKnowledgeGraphAlignmentDatasetLoader, abc.ABC):
"""
Superclass for DBP15k variants.
The datasets were first described in https://iswc2017.semanticweb.org/wp-content/uploads/papers/MainProceedings/188.pdf
> We selected DBpedia (2016-04) to build three cross-lingual datasets. DBpedia isa large-scale multi-lingual KB
> including inter-language links (ILLs) from entities of English version to those in other languages. In our
> experiments, we extracted 15 thousand ILLs with popular entities from English to Chinese, Japanese and French
> respectively, and considered them as our reference alignment (i.e., gold standards). Our strategy to extract
> datasets is that we randomly selected an ILL pair s.t. the involved entities have at least 4
> relationship triples and then extracted relationship and attribute infobox triples for selected entities.
This implementation only considers the relationship triples, and NOT the attributes triples.
"""
subsets = frozenset({'zh_en', 'ja_en', 'fr_en', })
class DBP15kJAPE(_DBP15k):
"""Smaller variant of DBP15k from JAPE repository."""
url = 'https://github.com/nju-websoft/JAPE/raw/master/data/dbp15k.tar.gz'
predefined_splits = frozenset({0.1, 0.2, 0.3, 0.4, 0.5})
archive_file_name = 'dbp15k_jape'
@property
def root(self) -> pathlib.Path:
"""Return the relative path within the archive."""
return pathlib.Path('dbp15k', self.subset, f'0_{str(int(100 * self.train_test_split))[0]}')
def hash_digests(self) -> Mapping[str, str]: # noqa: D102
return dict(
sha512='a3bcee42dd0ecfd7188be36c57b9ec6d57b2995d0cf6a17e8fd6f302b4e70d2fc354282f7f7130040bcdcc6c7a55eab7a3af4c361fb1fd98c376bda1490e3f9d',
)
def _load_graph(
self,
archive: Archive,
side: MatchSideEnum,
) -> KnowledgeGraph: # noqa: D102
lang_codes = self.subset.split('_')
lang_code = lang_codes[0] if side == MatchSideEnum.left else lang_codes[1]
num = 1 if side == MatchSideEnum.left else 2
triples = _load_tensor_from_csv(archive=archive, relative_path=self.root / f'triples_{num}')
entity_label_to_id = _load_label_to_id(archive=archive, relative_path=self.root / f'ent_ids_{num}')
relation_label_to_id = _load_label_to_id(archive=archive, relative_path=self.root / f'rel_ids_{num}')
return KnowledgeGraph(
triples=triples,
entity_label_to_id=entity_label_to_id,
relation_label_to_id=relation_label_to_id,
lang_code=lang_code,
dataset_name='dbp15kjape',
subset_name=self.subset
)
def _load_alignment(
self,
archive: Archive,
left_graph: KnowledgeGraph,
right_graph: KnowledgeGraph,
) -> EntityAlignment: # noqa: D102
return EntityAlignment(
train=_load_tensor_from_csv(archive=archive, relative_path=self.root / 'sup_ent_ids').t(),
test=_load_tensor_from_csv(archive=archive, relative_path=self.root / 'ref_ent_ids').t(),
)
def dataset_name_normalization(name: str) -> str:
"""Normalize a dataset name."""
return name.lower().replace('_', '')
def available_datasets() -> Mapping[str, Collection[str]]:
"""List available datasets with their subsets."""
return {
dataset_name_normalization(cls.__name__): cls.subsets
for cls in get_all_subclasses(base_class=OnlineKnowledgeGraphAlignmentDatasetLoader)
if not cls.__name__.startswith('_')
}
def get_dataset_by_name(
dataset_name: str,
subset_name: Optional[str] = None,
train_test_split: Optional[float] = None,
cache_root: Optional[Union[pathlib.Path, str]] = None,
compact: bool = True,
train_validation_split: Optional[float] = 0.8,
inverse_triples: bool = False,
self_loops: bool = False,
random_seed: int = 42,
force_download: bool = False,
) -> KnowledgeGraphAlignmentDataset:
"""Load a dataset specified by name and subset name.
:param dataset_name:
The case-insensitive dataset name. One of ("DBP15k", )
:param subset_name:
An optional subset name
:param train_test_split: 0 < x < 1
A specification of the train-test split to use.
:param cache_root:
An optional cache directory for extracted downloads. If None is given, use /tmp/{dataset_name}
:param compact:
Whether to apply compaction, i.e. ensure consecutive relation and entity IDs.
:param train_validation_split: 0 < x < 1
An optional train-validation split ratio.
:param inverse_triples:
Whether to generate inverse triples (o, p_inv, s) for every triple (s, p, o).
:param self_loops:
Whether to generate self-loops (e, self_loop, e) for each entity e.
:param random_seed:
The seed to use for random splitting.
:param force_download:
Force downloading the files even if they already exist.
:return:
A dataset, a collection of two KG, and an entity alignment.
"""
dataset_loader = get_dataset_loader_by_name(
dataset_name=dataset_name,
subset_name=subset_name,
train_test_split=train_test_split,
cache_root=cache_root,
compact=compact,
train_validation_split=train_validation_split,
inverse_triples=inverse_triples,
self_loops=self_loops,
random_seed=random_seed,
)
# load dataset
dataset = dataset_loader.load(force_download=force_download)
logger.info('Created dataset: %s', dataset)
return dataset
def get_dataset_loader_by_name(
dataset_name: str,
subset_name: Optional[str] = None,
train_test_split: Optional[float] = None,
cache_root: Optional[Union[pathlib.Path, str]] = None,
compact: bool = True,
train_validation_split: Optional[float] = 0.8,
inverse_triples: bool = False,
self_loops: bool = False,
random_seed: int = 42,
):
"""Create a dataset loader for a dataset specified by name and subset name.
:param dataset_name:
The case-insensitive dataset name. One of ("DBP15k", )
:param subset_name:
An optional subset name
:param train_test_split: 0 < x < 1
A specification of the train-test split to use.
:param cache_root:
An optional cache directory for extracted downloads. If None is given, use /tmp/{dataset_name}
:param compact:
Whether to apply compaction, i.e. ensure consecutive relation and entity IDs.
:param train_validation_split: 0 < x < 1
An optional train-validation split ratio.
:param inverse_triples:
Whether to generate inverse triples (o, p_inv, s) for every triple (s, p, o).
:param self_loops:
Whether to generate self-loops (e, self_loop, e) for each entity e.
:param random_seed:
The seed to use for random splitting.
:return:
A dataset loader.
"""
# Normalize train-test-split
if train_test_split is None:
train_test_split = 0.3
if isinstance(train_test_split, str):
train_test_split = int(train_test_split) / 100.
assert isinstance(train_test_split, float)
# Resolve data set loader class
dataset_loader_cls = get_subclass_by_name(
base_class=OnlineKnowledgeGraphAlignmentDatasetLoader,
name=dataset_name,
normalizer=dataset_name_normalization,
)
# Instantiate dataset loader
return dataset_loader_cls(
subset=subset_name,
train_test_split=train_test_split,
cache_root=cache_root,
compact=compact,
train_validation_split=train_validation_split,
with_inverse_triples=inverse_triples,
with_self_loops=self_loops,
random_seed=random_seed,
)
| 38,530
| 35.942474
| 184
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/data/knowledge_graph.py
|
# coding=utf-8
"""Various knowledge graph related data structures."""
import enum
import json
import logging
import lzma
import pathlib
from dataclasses import dataclass
from typing import Mapping, Optional, Tuple, Union
import torch
from ..utils.torch_utils import split_tensor
from ..utils.types import EntityIDs, IDAlignment, Triples
logger = logging.getLogger(__name__)
__all__ = [
'EntityAlignment',
'KnowledgeGraph',
'KnowledgeGraphAlignmentDataset',
'MatchSideEnum',
'SIDES',
'exact_self_alignment',
'get_erdos_renyi',
'get_other_side',
'get_synthetic_math_graph',
'sub_graph_alignment',
'validation_split',
]
@enum.unique
class MatchSideEnum(str, enum.Enum):
"""The graph selection for a entity alignment dataset."""
#: The left side
left = 'left'
#: The right side
right = 'right'
# The canonical order of match sides
SIDES = (MatchSideEnum.left, MatchSideEnum.right)
def get_other_side(side: MatchSideEnum) -> MatchSideEnum:
"""Get the enum of the other side."""
return MatchSideEnum.left if side == MatchSideEnum.right else MatchSideEnum.right
def add_self_loops(
triples: Triples,
entity_label_to_id: Mapping[str, int],
relation_label_to_id: Mapping[str, int],
self_loop_relation_name: Optional[str] = None,
) -> Tuple[Triples, Mapping[str, int]]:
"""Add self loops with dummy relation.
For each entity e, add (e, self_loop, e).
:param triples: shape: (n, 3)
The triples.
:param entity_label_to_id:
The mapping from entity labels to ids.
:param relation_label_to_id:
The mapping from relation labels to ids.
:param self_loop_relation_name:
The name of the self-loop relation. Must not exist.
:return:
cat(triples, self_loop_triples)
updated mapping
"""
if self_loop_relation_name is None:
self_loop_relation_name = 'self_loop'
p = triples[:, 1]
# check if name clashes might occur
if self_loop_relation_name in relation_label_to_id.keys():
raise AssertionError(f'There exists a relation "{self_loop_relation_name}".')
# Append inverse relations to translation table
max_relation_id = max(relation_label_to_id.values())
updated_relation_label_to_id = {r_label: r_id for r_label, r_id in relation_label_to_id.items()}
self_loop_relation_id = max_relation_id + 1
updated_relation_label_to_id.update({self_loop_relation_name: self_loop_relation_id})
assert len(updated_relation_label_to_id) == len(relation_label_to_id) + 1
# create self-loops triples
assert (p <= max_relation_id).all()
e = torch.tensor(sorted(entity_label_to_id.values()), dtype=torch.long) # pylint: disable=not-callable
p_self_loop = torch.ones_like(e) * self_loop_relation_id
self_loop_triples = torch.stack([e, p_self_loop, e], dim=1)
all_triples = torch.cat([triples, self_loop_triples], dim=0)
return all_triples, updated_relation_label_to_id
def add_inverse_triples(
triples: Triples,
relation_label_to_id: Mapping[str, int],
inverse_relation_postfix: Optional[str] = None,
) -> Tuple[Triples, Mapping[str, int]]:
"""Create and append inverse triples.
For each triple (s, p, o), an inverse triple (o, p_inv, s) is added.
:param triples: shape: (n, 3)
The triples.
:param relation_label_to_id:
The mapping from relation labels to ids.
:param inverse_relation_postfix:
A postfix to use for creating labels for the inverse relations.
:return: cat(triples, inverse_triples)
"""
if inverse_relation_postfix is None:
inverse_relation_postfix = '_inv'
assert len(inverse_relation_postfix) > 0
s, p, o = triples[:, 0], triples[:, 1], triples[:, 2]
# check if name clashes might occur
suspicious_relations = sorted(k for k in relation_label_to_id.keys() if k.endswith('_inv'))
if len(suspicious_relations) > 0:
raise AssertionError(
f'Some of the inverse relations did already exist! Suspicious relations: {suspicious_relations}')
# Append inverse relations to translation table
num_relations = len(relation_label_to_id)
updated_relation_label_to_id = {r_label: r_id for r_label, r_id in relation_label_to_id.items()}
updated_relation_label_to_id.update({r_label + inverse_relation_postfix: r_id + num_relations for r_label, r_id in relation_label_to_id.items()})
assert len(updated_relation_label_to_id) == 2 * num_relations
# create inverse triples
assert (p < num_relations).all()
p_inv = p + num_relations
inverse_triples = torch.stack([o, p_inv, s], dim=1)
all_triples = torch.cat([triples, inverse_triples], dim=0)
return all_triples, updated_relation_label_to_id
@dataclass
class KnowledgeGraph:
"""A knowledge graph, a multi-relational graph, represented by triples."""
#: The triples, shape: (n, 3)
triples: Triples
#: The mapping from entity labels to IDs
entity_label_to_id: Optional[Mapping[str, int]]
#: The mapping from relations labels to IDs
relation_label_to_id: Optional[Mapping[str, int]]
#: Language code of the knowledge graph (e.g. zh, en, ...)
lang_code: Optional[str] = None
#: Dataset name
dataset_name: Optional[str] = None
#: Dataset subset name
subset_name: Optional[str] = None
#: Whether inverse triples have been added
inverse_triples: bool = False
#: Whether self-loops have been added.
self_loops: bool = False
@property
def num_triples(self) -> int:
"""Return the number of triples."""
return self.triples.shape[0]
@property
def num_entities(self) -> int:
"""Return the number of entities."""
return len(set(self.entity_label_to_id.values()))
@property
def num_relations(self) -> int:
"""Return the number of relations."""
return len(set(self.relation_label_to_id.values()))
def with_inverse_triples(
self,
inverse_relation_postfix: Optional[str] = None,
) -> 'KnowledgeGraph':
"""Return a KG with added inverse triples, if not already contained. Otherwise return reference to self."""
assert not self.self_loops
if self.inverse_triples:
return self
else:
enriched_triples, enriched_relation_label_to_id = add_inverse_triples(
triples=self.triples,
relation_label_to_id=self.relation_label_to_id,
inverse_relation_postfix=inverse_relation_postfix,
)
return KnowledgeGraph(
triples=enriched_triples,
entity_label_to_id=self.entity_label_to_id,
relation_label_to_id=enriched_relation_label_to_id,
inverse_triples=True,
self_loops=False,
lang_code=self.lang_code,
dataset_name=self.dataset_name,
subset_name=self.subset_name
)
def with_self_loops(
self,
self_loop_relation_name: Optional[str] = None,
) -> 'KnowledgeGraph':
"""Return a KG with added self-loops, if not already contained. Otherwise return reference to self."""
if self.self_loops:
return self
else:
enriched_triples, enriched_relation_label_to_id = add_self_loops(
triples=self.triples,
entity_label_to_id=self.entity_label_to_id,
relation_label_to_id=self.relation_label_to_id,
self_loop_relation_name=self_loop_relation_name,
)
return KnowledgeGraph(
triples=enriched_triples,
entity_label_to_id=self.entity_label_to_id,
relation_label_to_id=enriched_relation_label_to_id,
inverse_triples=self.inverse_triples,
self_loops=True,
lang_code=self.lang_code,
dataset_name=self.dataset_name,
subset_name=self.subset_name
)
def __str__(self): # noqa: D105
return f'{self.__class__.__name__}(num_triples={self.num_triples}, num_entities={self.num_entities}, num_relations={self.num_relations}, inverse_triples={self.inverse_triples}, self_loops={self.self_loops})'
def get_relation_label_by_id(self, relation_id: int) -> Optional[str]:
"""Lookup a relation label for a given ID."""
matches = [label for (label, id_) in self.relation_label_to_id.items() if id_ == relation_id]
if len(matches) == 0:
return None
if len(matches) > 1:
raise ValueError(f'More than one relation with ID {relation_id}')
return matches[0]
def save(self, directory: pathlib.Path) -> None:
"""Save the KG to a directory."""
# ensure the directory exists
directory.mkdir(parents=True, exist_ok=True)
# save triples
torch.save(self.triples, directory / 'triples.pth')
assert not self.inverse_triples
assert not self.self_loops
# save label-to-id
with lzma.open(directory / 'metadata.json.xz', 'wt') as json_file:
json.dump(
obj=dict(
entity_label_to_id=self.entity_label_to_id,
relation_label_to_id=self.relation_label_to_id,
lang_code=self.lang_code,
dataset_name=self.dataset_name,
subset_name=self.subset_name,
),
fp=json_file,
sort_keys=True,
indent=2,
)
@staticmethod
def load(directory: pathlib.Path) -> 'KnowledgeGraph':
"""Load the KG from a directory."""
triples = torch.load(directory / 'triples.pth')
with lzma.open(directory / 'metadata.json.xz', 'r') as json_file:
meta = json.load(json_file)
return KnowledgeGraph(
triples=triples,
entity_label_to_id=meta['entity_label_to_id'],
relation_label_to_id=meta['relation_label_to_id'],
lang_code=meta['lang_code'],
dataset_name=meta['dataset_name'],
subset_name=meta['subset_name'],
)
@dataclass
class EntityAlignment:
"""An entity alignment between two knowledge graphs."""
#: The entity alignment used for training, shape: (2, num_train_alignments)
train: IDAlignment
#: The entity alignment used for testing, shape: (2, num_test_alignments)
test: IDAlignment
#: The entity alignment used for validation, shape: (2, num_validation_alignments)
_validation: Optional[IDAlignment] = None
@property
def validation(self) -> IDAlignment:
"""
Return the validation alignment.
:return: shape: (2, num_val_alignments), dtype=long
The validation alignment.
"""
if self._validation is None:
return torch.empty(2, 0, dtype=torch.long, device=self.train.device)
return self._validation
@property
def num_train(self) -> int:
"""Return the number of training alignment pairs."""
return self.train.shape[1]
@property
def num_validation(self) -> int:
"""Return the number of validation alignment pairs."""
return self.validation.shape[1]
@property
def num_test(self) -> int:
"""Return the number of test alignment pairs."""
return self.test.shape[1]
@property
def all(self) -> IDAlignment:
"""
Return the concatenation of all alignments parts.
:return: shape: (2, num_total_alignments), dtype=long
All alignments (train, validation, test)
"""
return torch.cat([self.train, self.validation, self.test], dim=1)
def to_dict(self) -> Mapping[str, IDAlignment]:
"""Convert the alignment to a dictionary with keys {'train', 'test'}, and optionally 'validation'."""
return {
key: value
for key, value in zip(
('train', 'test', 'validation'),
(self.train, self.test, self.validation)
)
if value.numel() > 0
}
def validation_split(self, train_ratio: float, seed: Optional[int] = None) -> 'EntityAlignment':
"""Return a new alignment object where the training alignments have been split to train, and validation."""
if train_ratio <= 0. or train_ratio >= 1.:
raise ValueError(f'ratio must be in (0, 1), but is {train_ratio}')
return validation_split(alignment=self, train_ratio=train_ratio, seed=seed)
def __str__(self): # noqa: D105
return f'{self.__class__.__name__}(num_train={self.num_train}, num_test={self.num_test}, num_val={self.num_validation})'
@staticmethod
def from_full_alignment(
alignment: IDAlignment,
train_test_split: Optional[float],
train_validation_split: Optional[float],
seed: Optional[int] = 42,
) -> 'EntityAlignment':
"""
Create an entity alignment by splitting a given alignment tensor.
If requested the alignment is first split into a train and test part. Afterwards, if requested, the train part
is split to train and validation.
:param alignment: shape: (2, total_num_alignments)
The ID-based alignment.
:param train_test_split:
The train-test split ratio.
:param train_validation_split:
The train-validation split ratio.
:param seed:
The seed to be used for splitting.
:return:
An entity alignment.
"""
if train_test_split is None:
train_test_split = 1.
if train_validation_split is None:
train_validation_split = 1.
test_train_split = 1. - train_test_split
# pylint: disable=unbalanced-tuple-unpacking
test, train, validation = split_tensor(alignment, ratios=[test_train_split, train_validation_split], shuffle=True, dim=1, seed=seed)
return EntityAlignment(
train=train,
test=test,
_validation=validation,
)
def __getitem__(self, item: str) -> IDAlignment: # noqa: D105
if item == 'train':
return self.train
elif item == 'test':
return self.test
elif item == 'validation':
return self.validation
else:
raise KeyError(item)
class KnowledgeGraphAlignmentDataset:
"""A knowledge graph alignment data set, comprising a pair of graphs, and a (partial) alignment of their entities."""
#: The first knowledge graph
left_graph: KnowledgeGraph
#: The second knowledge graph
right_graph: KnowledgeGraph
#: The alignment
alignment: EntityAlignment
def __init__(
self,
left_graph: KnowledgeGraph,
right_graph: KnowledgeGraph,
alignment: EntityAlignment,
):
"""
Initialize the alignment dataset.
:param left_graph:
The left graph.
:param right_graph:
The right graph.
:param alignment:
The alignment between the graphs.
"""
self.left_graph = left_graph
self.right_graph = right_graph
self.alignment = alignment
def validation_split(self, train_ratio: float, seed: Optional[int] = None) -> 'KnowledgeGraphAlignmentDataset':
"""Return the dataset, where the training alignment part has been split into train and validation part."""
return KnowledgeGraphAlignmentDataset(
left_graph=self.left_graph,
right_graph=self.right_graph,
alignment=self.alignment.validation_split(train_ratio=train_ratio, seed=seed),
)
@property
def triples(self) -> Mapping[MatchSideEnum, Triples]:
"""Return a dictionary of the side to the corresponding triples on this side."""
return {
MatchSideEnum.left: self.left_graph.triples,
MatchSideEnum.right: self.right_graph.triples,
}
@property
def graphs(self) -> Mapping[MatchSideEnum, KnowledgeGraph]:
"""Return a dictionary of the side to KG on this side."""
return {
MatchSideEnum.left: self.left_graph,
MatchSideEnum.right: self.right_graph,
}
@property
def num_nodes(self) -> Mapping[MatchSideEnum, int]:
"""Return a dictionary of side to number of entities."""
return {
MatchSideEnum.left: self.left_graph.num_entities,
MatchSideEnum.right: self.right_graph.num_entities,
}
@property
def num_exclusives(self) -> Mapping[MatchSideEnum, int]:
"""Return a dictionary of side to number of exclusive nodes."""
return {
side: self.num_nodes[side] - len(set(aligned_on_side.tolist()))
for side, aligned_on_side in zip(SIDES, self.alignment.all)
}
@property
def exclusives(self) -> Mapping[MatchSideEnum, EntityIDs]:
"""Return a dictionary of side to ID of exclusive entities."""
return {
side: torch.as_tensor(
data=sorted(set(range(self.graphs[side].num_entities)).difference(aligned_on_side.tolist())),
dtype=torch.long,
)
for side, aligned_on_side in zip(
[MatchSideEnum.left, MatchSideEnum.right],
self.alignment.all,
)
}
@property
def dataset_name(self) -> str:
"""Return the name of the dataset."""
return self.left_graph.dataset_name
@property
def subset_name(self) -> str:
"""Return the name of the subset."""
return self.left_graph.subset_name
def __str__(self): # noqa: D105
return f'{self.__class__.__name__}(left={self.left_graph}, right={self.right_graph}, align={self.alignment})'
def to_dict(self) -> Mapping[str, Union[KnowledgeGraph, EntityAlignment]]:
"""Return a dictionary view of the dataset."""
return dict(
left_graph=self.left_graph,
right_graph=self.right_graph,
alignment=self.alignment,
)
def with_inverse_triples(self) -> 'KnowledgeGraphAlignmentDataset':
"""Return the dataset where both sides have been extended by inverse triples."""
return KnowledgeGraphAlignmentDataset(
left_graph=self.left_graph.with_inverse_triples(),
right_graph=self.right_graph.with_inverse_triples(),
alignment=self.alignment,
)
def with_self_loops(self) -> 'KnowledgeGraphAlignmentDataset':
"""Return the dataset where both sides have been extended by self-loops."""
return KnowledgeGraphAlignmentDataset(
left_graph=self.left_graph.with_self_loops(),
right_graph=self.right_graph.with_self_loops(),
alignment=self.alignment,
)
def validation_split(
alignment: EntityAlignment,
train_ratio: float = 0.8,
seed: int = 42,
) -> EntityAlignment:
"""
Split the train part of an entity alignment into train and validation.
:param alignment:
The alignment.
:param train_ratio: 0 < x < 1
The ratio of alignments to use for the train part.
:param seed:
The seed to use for randomisation.
:return:
An entity alignment with the updated train and validation part.
"""
# Check input
if not (0. < train_ratio < 1.):
raise ValueError(f'train_ratio must be between 0 and 1, but is {train_ratio}')
# re-combine train and validation, if already split
num_total = alignment.num_train
pool = alignment.train
if alignment.num_validation > 0:
num_total += alignment.num_validation
pool = torch.cat([pool, alignment.validation], dim=1)
# Delegate to tensor-based split.
# pylint: disable=unbalanced-tuple-unpacking
train_alignments, validation_alignments = split_tensor(tensor=pool, ratios=train_ratio, dim=1, seed=seed)
# Construct new alignment object.
return EntityAlignment(
train=train_alignments,
_validation=validation_alignments,
test=alignment.test,
)
def exact_self_alignment(
graph: KnowledgeGraph,
train_percentage: float = 0.5,
) -> KnowledgeGraphAlignmentDataset:
"""
Create a alignment between a graph a randomly permuted version of it.
:param graph: The graph.
:param train_percentage: The percentage of training alignments.
:return: A knowledge graph alignment dataset.
"""
# Create a random permutation as alignment
full_alignment = torch.stack([
torch.arange(graph.num_entities, dtype=torch.long),
torch.randperm(graph.num_entities)
], dim=0)
# shuffle
full_alignment = full_alignment[:, torch.randperm(graph.num_entities)]
# create mapping
mapping = {int(a): int(b) for a, b in full_alignment.t()}
# translate triples
h, r, t = graph.triples.t()
h_new, t_new = [torch.tensor([mapping[int(e)] for e in es], dtype=torch.long) for es in (h, t)] # pylint: disable=not-callable
r_new = r.detach().clone()
new_triples = torch.stack([h_new, r_new, t_new], dim=-1)
# compose second KG
second_graph = KnowledgeGraph(
triples=new_triples,
entity_label_to_id={k: mapping[v] for k, v in graph.entity_label_to_id.items()},
relation_label_to_id=graph.relation_label_to_id.copy(),
inverse_triples=False,
self_loops=False,
)
second_graph.inverse_triples = graph.inverse_triples
second_graph.self_loops = graph.self_loops
# split alignment
split_id = int(train_percentage * graph.num_entities)
alignment = EntityAlignment(
train=full_alignment[:, :split_id],
test=full_alignment[:, split_id:],
)
return KnowledgeGraphAlignmentDataset(
left_graph=graph,
right_graph=second_graph,
alignment=alignment,
)
def sub_graph_alignment(
graph: KnowledgeGraph,
overlap: float = 0.5,
ratio: float = 0.7,
train_test_split: float = 0.5,
train_validation_split: Optional[float] = 0.8,
) -> KnowledgeGraphAlignmentDataset:
"""
Create a synthetic entity alignment dataset, where both sides are random subgraphs from a larger one.
:param graph:
The source KG.
:param overlap:
The percentage of overlapping entities.
:param ratio:
The ratio of entities between the two KG.
:param train_test_split:
The ratio for train-test splitting the aligned entities.
:return:
A entity alignment dataset.
"""
# split entities
entities = torch.arange(graph.num_entities)
# pylint: disable=unbalanced-tuple-unpacking
common, left, right = split_tensor(tensor=entities, ratios=[overlap, ratio])
left = torch.cat([common, left])
right = torch.cat([common, right])
# create alignment
alignment = EntityAlignment.from_full_alignment(
alignment=torch.arange(common.shape[0]).unsqueeze(dim=0).repeat(2, 1),
train_test_split=train_test_split,
train_validation_split=train_validation_split,
)
# induced subgraph
graphs = []
for ent in [left, right]:
ent = set(ent.tolist())
entity_label_to_id = {
str(old_id): new_id
for new_id, old_id in enumerate(ent)
}
triples = torch.as_tensor(data=[
(entity_label_to_id[str(h)], r, entity_label_to_id[str(t)])
for h, r, t in graph.triples.tolist()
if (h in ent and t in ent)
], dtype=torch.long)
graphs.append(KnowledgeGraph(
triples=triples,
entity_label_to_id=entity_label_to_id,
relation_label_to_id=graph.relation_label_to_id,
))
return KnowledgeGraphAlignmentDataset(
left_graph=graphs[0],
right_graph=graphs[1],
alignment=alignment,
)
def get_erdos_renyi(
num_entities: int,
num_relations: int,
num_triples: int,
) -> KnowledgeGraph:
"""
Generate a synthetic KG using Erdos-Renyi, and random edge typing.
:param num_entities: >0
The number of entities.
:param num_relations: >0
The number of relations.
:param p:
The edge probability.
:param num_triples:
The number of triples. If present, ignore p.
:return:
A KG.
"""
triples = torch.stack([
torch.randint(max_id, size=(num_triples,))
for max_id in (num_entities, num_relations, num_entities)
], dim=-1)
return KnowledgeGraph(
triples=triples,
entity_label_to_id={str(i): i for i in range(num_entities)},
relation_label_to_id={str(i): i for i in range(num_relations)},
dataset_name='erdos_renyi',
subset_name=f'{num_entities}-{num_relations}-{p}',
)
def get_synthetic_math_graph(
num_entities: int,
) -> KnowledgeGraph:
"""
Generate a synthetic KG of positive integers, linked by modulo relations.
:param num_entities:
The number of entities.
:return:
A KG.
"""
entities = list(range(num_entities))
relations = list(range(num_entities))
triples = [(e, r, (e + r) % num_entities) for r in relations for e in entities]
return KnowledgeGraph(
triples=torch.as_tensor(triples, dtype=torch.long),
entity_label_to_id={str(e): e for e in entities},
relation_label_to_id={'+' + str(r): r for r in relations},
)
| 25,646
| 33.287433
| 215
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/data/__init__.py
|
# coding=utf-8
"""Data loading and representation."""
from .knowledge_graph import (
EntityAlignment,
KnowledgeGraph,
KnowledgeGraphAlignmentDataset,
MatchSideEnum,
SIDES,
exact_self_alignment,
get_erdos_renyi,
get_other_side,
get_synthetic_math_graph,
sub_graph_alignment,
validation_split,
)
from .loaders import available_datasets, get_dataset_by_name, get_dataset_loader_by_name
__all__ = [
'EntityAlignment',
'KnowledgeGraph',
'KnowledgeGraphAlignmentDataset',
'MatchSideEnum',
'SIDES',
'available_datasets',
'exact_self_alignment',
'get_dataset_by_name',
'get_dataset_loader_by_name',
'get_erdos_renyi',
'get_other_side',
'get_synthetic_math_graph',
'sub_graph_alignment',
'validation_split',
]
| 802
| 22.617647
| 88
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/executables/adjusted_ranking_experiments.py
|
# coding=utf-8
"""Evaluation of different training and test sizes."""
import argparse
import logging
import random
import mlflow
import numpy
import torch
import tqdm
from kgm.data import get_dataset_by_name
from kgm.eval.matching import evaluate_matching_model
from kgm.models import GCNAlign
from kgm.modules import MarginLoss, SampledMatchingLoss, get_similarity
from kgm.training.matching import AlignmentModelTrainer
from kgm.utils.mlflow_utils import log_metrics_to_mlflow, log_params_to_mlflow
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='dbp15k_jape')
parser.add_argument('--subset', type=str, default='zh_en')
parser.add_argument('--num_epochs', type=int, default=2_000)
parser.add_argument('--iterations', type=int, default=5)
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--tracking_uri', type=str, default='http://localhost:5000')
args = parser.parse_args()
# Mlflow settings
logging.info(f'Logging to MLFlow @ {args.tracking_uri}')
mlflow.set_tracking_uri(uri=args.tracking_uri)
mlflow.set_experiment('adjusted_ranking_experiments')
# Determine device
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
logging.info(f"Using device={device}")
# load dataset
dataset = get_dataset_by_name(
dataset_name=args.dataset,
subset_name=args.subset,
inverse_triples=True, # GCNAlign default
self_loops=True, # GCNAlign default
)
for num_train in [
0,
10,
20,
50,
100,
200,
500,
1000,
2000,
3000,
5000,
7500,
]:
ea_full = dataset.alignment.all
i_all = ea_full.shape[1]
i_train = num_train
# store optimal evaluation batch size for different sizes
for iteration in tqdm.trange(args.iterations, unit='run', unit_scale=True):
# fix random seed
torch.manual_seed(iteration)
numpy.random.seed(iteration)
random.seed(iteration)
# train-test split
assert ea_full.shape[0] == 2
ea_full = ea_full[:, torch.randperm(i_all)]
ea_train, ea_test = ea_full[:, :i_train], ea_full[:, i_train:]
# instantiate model
model = GCNAlign(
dataset=dataset,
embedding_dim=200,
n_layers=2,
use_conv_weights=False,
).to(device=device)
# instantiate similarity
similarity = get_similarity(
similarity="l1",
transformation="negative",
)
if i_train > 0:
# instantiate loss
loss = SampledMatchingLoss(
similarity=similarity,
base_loss=MarginLoss(margin=3.),
num_negatives=50,
)
# instantiate trainer
trainer = AlignmentModelTrainer(
model=model,
similarity=similarity,
dataset=dataset,
loss=loss,
optimizer_cls="adam",
optimizer_kwargs=dict(
lr=1.0,
),
)
# train
trainer.train(num_epochs=args.num_epochs)
# evaluate with different test set sizes
total_num_test_alignments = ea_test.shape[1]
test_sizes = list(range(1_000, total_num_test_alignments, 1_000))
results = dict(evaluate_matching_model(
model=model,
alignments={
k: ea_test[:, :k]
for k in test_sizes
},
similarity=similarity,
)[0])
# store results
for size, result in results.items():
# start experiment
with mlflow.start_run():
log_params_to_mlflow(config=dict(
dataset=args.dataset,
subset=args.subset,
num_epochs=args.num_epochs,
num_train_alignments=i_train,
num_test_alignments=ea_test[:, :size].shape[1],
seed=iteration,
))
log_metrics_to_mlflow(metrics=result)
if __name__ == '__main__':
main()
| 4,618
| 30.855172
| 87
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/executables/summarize.py
|
"""Script to generate the plot #test alignments vs. #train alignments."""
import argparse
import logging
import mlflow
import pandas
import seaborn
from matplotlib import pyplot as plt
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--tracking_uri', type=str, default='http://localhost:5000')
args = parser.parse_args()
mlflow.set_tracking_uri(uri=args.tracking_uri)
experiment_name = "adjusted_ranking_experiments"
experiment = mlflow.get_experiment_by_name(name=experiment_name)
if experiment is None:
raise ValueError(f"Could not find experiment {experiment_name} at {args.tracking_uri}")
df = mlflow.search_runs(
experiment_ids=[experiment.experiment_id],
)
# select relevant columns
rename = {
"params.num_train_alignments": "#train",
"params.num_test_alignments": "#test",
"metrics.mean_rank": "MR",
"metrics.hits_at_1": "H@1",
"metrics.adjusted_mean_rank_index": "AMRI",
}
df = df.rename(columns=rename)
order = [
"MR",
"H@1",
"AMRI",
]
# convert to int
for col in ["#train", "#test"]:
df[col] = pandas.to_numeric(df[col])
plt.rc('font', size=14)
fig, axes = plt.subplots(nrows=3, figsize=(8, 10), sharex=True)
for y, ax in zip(order, axes):
seaborn.lineplot(
data=df,
x="#test",
y=y,
hue="#train",
legend=None if y != "H@1" else 'full', # 'full',
palette="viridis",
ci=100,
ax=ax,
)
ax.grid()
if y == "MR":
ax.set_ylim(1, None)
ax.set_yscale('log')
else:
ax.set_ylim(0, 1)
plt.xlim(df["#test"].min(), df["#test"].max())
plt.subplots_adjust(wspace=None, hspace=None, left=.09, right=.95, bottom=0.07, top=.99)
plt.savefig('eval.pdf')
if __name__ == '__main__':
main()
| 2,006
| 26.875
| 95
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/executables/degree_investigation.py
|
"""Script to generate the evaluation for degree inductive bias."""
import numpy
import torch
from matplotlib import pyplot as plt
from scipy.stats import pearsonr, spearmanr
from kgm.data import SIDES, get_dataset_by_name
from kgm.models import GCNAlign, PureEmbeddingModel
def degree_vs_norm(
dataset
):
# calculate degree
degrees = dict()
for i, side in enumerate(SIDES):
graph = dataset.graphs[side]
degree = torch.ones(graph.num_entities, dtype=torch.long) # self-loops
for col in [0, 2]:
idx, cnt = torch.unique(graph.triples[:, col], return_counts=True)
degree[idx] += cnt
degrees[side] = degree
# just random vectors
pure_model = PureEmbeddingModel(
dataset=dataset,
embedding_dim=200,
)
# untrained gcn model on random vectors
gcn_model = GCNAlign(
dataset=dataset,
embedding_dim=200,
n_layers=2,
use_conv_weights=False,
)
for label, model in dict(
gcn=gcn_model,
pure=pure_model,
).items():
norm = {
side: vectors.norm(dim=-1).detach().numpy()
for side, vectors in model().items()
}
x, y = [], []
for side, deg in degrees.items():
x.append(deg)
y.append(norm[side])
x = numpy.concatenate(x)
y = numpy.concatenate(y)
print(label, spearmanr(y, x))
def degree_correlation(dataset):
# compute degree for all aligned nodes
degree = torch.empty_like(dataset.alignment.all)
for i, side in enumerate(SIDES):
graph = dataset.graphs[side]
deg = torch.ones(graph.num_entities, dtype=torch.long) # self-loops
for col in [0, 2]:
idx, cnt = torch.unique(graph.triples[:, col], return_counts=True)
deg[idx] += cnt
degree[i] = deg[dataset.alignment.all[i]]
# compute correlation
rho_p, p_p = pearsonr(*degree.numpy())
rho_s, p_s = spearmanr(*degree.numpy())
# plot
plt.clf()
plt.figure(figsize=(6, 6))
plt.scatter(*degree.numpy(), marker=".", color="black")
plt.yscale("log")
plt.xscale("log")
plt.axis("equal")
plt.xlabel("degree " + dataset.graphs[SIDES[0]].lang_code + " [log]")
plt.ylabel("degree " + dataset.graphs[SIDES[1]].lang_code + " [log]")
plt.title(rf"Pearson $\rho$={rho_p:2.2%} (p={p_p}); Spearman $\rho$={rho_s:2.2%} (p={p_s})")
plt.tight_layout()
plt.savefig("degree_correlation.pdf")
return degree
def main():
# get dataset
dataset = get_dataset_by_name(
dataset_name='dbp15k_jape',
subset_name='zh_en',
)
# degree correlation of aligned nodes
degree_correlation(dataset=dataset)
# degree vs. embedding norm
degree_vs_norm(dataset=dataset)
if __name__ == '__main__':
main()
| 2,854
| 27.55
| 96
|
py
|
pydot
|
pydot-master/setup.py
|
#!/usr/bin/env python
"""Installation script."""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import ast
import codecs
import os
import re
CURRENT_DIR = os.path.dirname(__file__)
def get_long_description():
readme_path = os.path.join(CURRENT_DIR, "README.md")
with codecs.open(readme_path, encoding="utf8") as ld_file:
return ld_file.read()
def get_version():
pydot_py = os.path.join(CURRENT_DIR, "src", "pydot", "__init__.py")
_version_re = re.compile(r"__version__\s+=\s+(?P<version>.*)")
with codecs.open(pydot_py, "r", encoding="utf8") as f:
match = _version_re.search(f.read())
version = match.group("version") if match is not None else '"unknown"'
return str(ast.literal_eval(version))
setup(
name="pydot",
version=get_version(),
package_dir={"": "src"},
packages=["pydot"],
description="Python interface to Graphviz's Dot",
author="Ero Carrera",
author_email="ero.carrera@gmail.com",
maintainer="Peter Nowee",
maintainer_email="peter@peternowee.com",
url="https://github.com/pydot/pydot",
project_urls={
"Changelog": "https://github.com/pydot/pydot/blob/master/ChangeLog",
"Bug Tracker": "https://github.com/pydot/pydot/issues",
},
license="MIT",
keywords="graphviz dot graphs visualization",
platforms=["any"],
python_requires=">=3.5",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Software Development :: Libraries :: Python Modules",
],
long_description=get_long_description(),
long_description_content_type="text/markdown",
install_requires=["pyparsing>=2.1.4"],
extras_require={
"dev": [
"chardet",
"black==21.5b2; python_version > '3.5'",
],
},
tests_require=["chardet"],
)
| 2,466
| 30.628205
| 78
|
py
|
pydot
|
pydot-master/src/pydot/core.py
|
"""An interface to GraphViz."""
import copy
import io
import errno
import os
import re
import subprocess
import sys
import tempfile
import warnings
import pydot
try:
from pydot import dot_parser
except Exception as e:
warnings.warn(
"`pydot` could not import `dot_parser`, "
"so `pydot` will be unable to parse DOT files. "
"The error was: {e}".format(e=e)
)
# fmt: off
GRAPH_ATTRIBUTES = {
"Damping", "K", "URL", "aspect", "bb", "bgcolor",
"center", "charset", "clusterrank", "colorscheme", "comment", "compound",
"concentrate", "defaultdist", "dim", "dimen", "diredgeconstraints",
"dpi", "epsilon", "esep", "fontcolor", "fontname", "fontnames",
"fontpath", "fontsize", "id", "label", "labeljust", "labelloc",
"landscape", "layers", "layersep", "layout", "levels", "levelsgap",
"lheight", "lp", "lwidth", "margin", "maxiter", "mclimit", "mindist",
"mode", "model", "mosek", "nodesep", "nojustify", "normalize", "nslimit",
"nslimit1", "ordering", "orientation", "outputorder", "overlap",
"overlap_scaling", "pack", "packmode", "pad", "page", "pagedir",
"quadtree", "quantum", "rankdir", "ranksep", "ratio", "remincross",
"repulsiveforce", "resolution", "root", "rotate", "searchsize", "sep",
"showboxes", "size", "smoothing", "sortv", "splines", "start",
"stylesheet", "target", "truecolor", "viewport", "voro_margin",
# for subgraphs
"rank"
}
EDGE_ATTRIBUTES = {
"URL", "arrowhead", "arrowsize", "arrowtail",
"color", "colorscheme", "comment", "constraint", "decorate", "dir",
"edgeURL", "edgehref", "edgetarget", "edgetooltip", "fontcolor",
"fontname", "fontsize", "headURL", "headclip", "headhref", "headlabel",
"headport", "headtarget", "headtooltip", "href", "id", "label",
"labelURL", "labelangle", "labeldistance", "labelfloat", "labelfontcolor",
"labelfontname", "labelfontsize", "labelhref", "labeltarget",
"labeltooltip", "layer", "len", "lhead", "lp", "ltail", "minlen",
"nojustify", "penwidth", "pos", "samehead", "sametail", "showboxes",
"style", "tailURL", "tailclip", "tailhref", "taillabel", "tailport",
"tailtarget", "tailtooltip", "target", "tooltip", "weight",
"rank"
}
NODE_ATTRIBUTES = {
"URL", "color", "colorscheme", "comment",
"distortion", "fillcolor", "fixedsize", "fontcolor", "fontname",
"fontsize", "group", "height", "id", "image", "imagescale", "label",
"labelloc", "layer", "margin", "nojustify", "orientation", "penwidth",
"peripheries", "pin", "pos", "rects", "regular", "root", "samplepoints",
"shape", "shapefile", "showboxes", "sides", "skew", "sortv", "style",
"target", "tooltip", "vertices", "width", "z",
# The following are attributes dot2tex
"texlbl", "texmode"
}
CLUSTER_ATTRIBUTES = {
"K", "URL", "bgcolor", "color", "colorscheme",
"fillcolor", "fontcolor", "fontname", "fontsize", "label", "labeljust",
"labelloc", "lheight", "lp", "lwidth", "nojustify", "pencolor",
"penwidth", "peripheries", "sortv", "style", "target", "tooltip"
}
# fmt: on
DEFAULT_PROGRAMS = {
"dot",
"twopi",
"neato",
"circo",
"fdp",
"sfdp",
}
def is_windows():
# type: () -> bool
return os.name == "nt"
def is_anaconda():
# type: () -> bool
import glob
conda_pattern = os.path.join(sys.prefix, "conda-meta\\graphviz*.json")
return glob.glob(conda_pattern) != []
def get_executable_extension():
# type: () -> str
if is_windows():
return ".bat" if is_anaconda() else ".exe"
else:
return ""
def call_graphviz(program, arguments, working_dir, **kwargs):
# explicitly inherit `$PATH`, on Windows too,
# with `shell=False`
if program in DEFAULT_PROGRAMS:
extension = get_executable_extension()
program += extension
if arguments is None:
arguments = []
env = {
"PATH": os.environ.get("PATH", ""),
"LD_LIBRARY_PATH": os.environ.get("LD_LIBRARY_PATH", ""),
"SYSTEMROOT": os.environ.get("SYSTEMROOT", ""),
}
program_with_args = [program] + arguments
process = subprocess.Popen(
program_with_args,
env=env,
cwd=working_dir,
shell=False,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
**kwargs,
)
stdout_data, stderr_data = process.communicate()
return stdout_data, stderr_data, process
#
# Extended version of ASPN's Python Cookbook Recipe:
# Frozen dictionaries.
# https://code.activestate.com/recipes/414283/
#
# This version freezes dictionaries used as values within dictionaries.
#
class frozendict(dict):
def _blocked_attribute(obj):
raise AttributeError("A frozendict cannot be modified.")
_blocked_attribute = property(_blocked_attribute)
__delitem__ = __setitem__ = clear = _blocked_attribute
pop = popitem = setdefault = update = _blocked_attribute
def __new__(cls, *args, **kw):
new = dict.__new__(cls)
args_ = []
for arg in args:
if isinstance(arg, dict):
arg = copy.copy(arg)
for k in arg:
v = arg[k]
if isinstance(v, frozendict):
arg[k] = v
elif isinstance(v, dict):
arg[k] = frozendict(v)
elif isinstance(v, list):
v_ = list()
for elm in v:
if isinstance(elm, dict):
v_.append(frozendict(elm))
else:
v_.append(elm)
arg[k] = tuple(v_)
args_.append(arg)
else:
args_.append(arg)
dict.__init__(new, *args_, **kw)
return new
def __init__(self, *args, **kw):
pass
def __hash__(self):
try:
return self._cached_hash
except AttributeError:
h = self._cached_hash = hash(tuple(sorted(self.items())))
return h
def __repr__(self):
return "frozendict(%s)" % dict.__repr__(self)
dot_keywords = ["graph", "subgraph", "digraph", "node", "edge", "strict"]
id_re_alpha_nums = re.compile("^[_a-zA-Z][a-zA-Z0-9_,]*$", re.UNICODE)
id_re_alpha_nums_with_ports = re.compile(
'^[_a-zA-Z][a-zA-Z0-9_,:"]*[a-zA-Z0-9_,"]+$', re.UNICODE
)
id_re_num = re.compile("^[0-9,]+$", re.UNICODE)
id_re_with_port = re.compile("^([^:]*):([^:]*)$", re.UNICODE)
id_re_dbl_quoted = re.compile('^".*"$', re.S | re.UNICODE)
id_re_html = re.compile("^<.*>$", re.S | re.UNICODE)
def needs_quotes(s):
"""Checks whether a string is a dot language ID.
It will check whether the string is solely composed
by the characters allowed in an ID or not.
If the string is one of the reserved keywords it will
need quotes too but the user will need to add them
manually.
"""
# If the name is a reserved keyword it will need quotes but pydot
# can't tell when it's being used as a keyword or when it's simply
# a name. Hence the user needs to supply the quotes when an element
# would use a reserved keyword as name. This function will return
# false indicating that a keyword string, if provided as-is, won't
# need quotes.
if s in dot_keywords:
return False
chars = [ord(c) for c in s if ord(c) > 0x7F or ord(c) == 0]
if chars and not id_re_dbl_quoted.match(s) and not id_re_html.match(s):
return True
for test_re in [
id_re_alpha_nums,
id_re_num,
id_re_dbl_quoted,
id_re_html,
id_re_alpha_nums_with_ports,
]:
if test_re.match(s):
return False
m = id_re_with_port.match(s)
if m:
return needs_quotes(m.group(1)) or needs_quotes(m.group(2))
return True
def quote_if_necessary(s):
"""Enclose attribute value in quotes, if needed."""
if isinstance(s, bool):
if s is True:
return "True"
return "False"
if not isinstance(s, str):
return s
if not s:
return s
if needs_quotes(s):
replace = {
'"': r"\"",
"\n": r"\n",
"\r": r"\r",
}
for (a, b) in replace.items():
s = s.replace(a, b)
return '"' + s + '"'
return s
def graph_from_dot_data(s):
"""Load graphs from DOT description in string `s`.
@param s: string in [DOT language](
https://en.wikipedia.org/wiki/DOT_(graph_description_language))
@return: Graphs that result from parsing.
@rtype: `list` of `pydot.Dot`
"""
return dot_parser.parse_dot_data(s)
def graph_from_dot_file(path, encoding=None):
"""Load graphs from DOT file at `path`.
@param path: to DOT file
@param encoding: as passed to `io.open`.
For example, `'utf-8'`.
@return: Graphs that result from parsing.
@rtype: `list` of `pydot.Dot`
"""
with io.open(path, "rt", encoding=encoding) as f:
s = f.read()
graphs = graph_from_dot_data(s)
return graphs
def graph_from_edges(edge_list, node_prefix="", directed=False):
"""Creates a basic graph out of an edge list.
The edge list has to be a list of tuples representing
the nodes connected by the edge.
The values can be anything: bool, int, float, str.
If the graph is undirected by default, it is only
calculated from one of the symmetric halves of the matrix.
"""
if directed:
graph = Dot(graph_type="digraph")
else:
graph = Dot(graph_type="graph")
for edge in edge_list:
if isinstance(edge[0], str):
src = node_prefix + edge[0]
else:
src = node_prefix + str(edge[0])
if isinstance(edge[1], str):
dst = node_prefix + edge[1]
else:
dst = node_prefix + str(edge[1])
e = Edge(src, dst)
graph.add_edge(e)
return graph
def graph_from_adjacency_matrix(matrix, node_prefix="", directed=False):
"""Creates a basic graph out of an adjacency matrix.
The matrix has to be a list of rows of values
representing an adjacency matrix.
The values can be anything: bool, int, float, as long
as they can evaluate to True or False.
"""
node_orig = 1
if directed:
graph = Dot(graph_type="digraph")
else:
graph = Dot(graph_type="graph")
for row in matrix:
if not directed:
skip = matrix.index(row)
r = row[skip:]
else:
skip = 0
r = row
node_dest = skip + 1
for e in r:
if e:
graph.add_edge(
Edge(
"%s%s" % (node_prefix, node_orig),
"%s%s" % (node_prefix, node_dest),
)
)
node_dest += 1
node_orig += 1
return graph
def graph_from_incidence_matrix(matrix, node_prefix="", directed=False):
"""Creates a basic graph out of an incidence matrix.
The matrix has to be a list of rows of values
representing an incidence matrix.
The values can be anything: bool, int, float, as long
as they can evaluate to True or False.
"""
if directed:
graph = Dot(graph_type="digraph")
else:
graph = Dot(graph_type="graph")
for row in matrix:
nodes = []
c = 1
for node in row:
if node:
nodes.append(c * node)
c += 1
nodes.sort()
if len(nodes) == 2:
graph.add_edge(
Edge(
"%s%s" % (node_prefix, abs(nodes[0])),
"%s%s" % (node_prefix, nodes[1]),
)
)
if not directed:
graph.set_simplify(True)
return graph
class Common(object):
"""Common information to several classes.
Should not be directly used, several classes are derived from
this one.
"""
def __getstate__(self):
dict = copy.copy(self.obj_dict)
return dict
def __setstate__(self, state):
self.obj_dict = state
def __get_attribute__(self, attr):
"""Look for default attributes for this node"""
attr_val = self.obj_dict["attributes"].get(attr, None)
if attr_val is None:
# get the defaults for nodes/edges
default_node_name = self.obj_dict["type"]
# The defaults for graphs are set on a node named 'graph'
if default_node_name in ("subgraph", "digraph", "cluster"):
default_node_name = "graph"
g = self.get_parent_graph()
if g is not None:
defaults = g.get_node(default_node_name)
else:
return None
# Multiple defaults could be set by having repeated 'graph [...]'
# 'node [...]', 'edge [...]' statements. In such case, if the
# same attribute is set in different statements, only the first
# will be returned. In order to get all, one would call the
# get_*_defaults() methods and handle those. Or go node by node
# (of the ones specifying defaults) and modify the attributes
# individually.
#
if not isinstance(defaults, (list, tuple)):
defaults = [defaults]
for default in defaults:
attr_val = default.obj_dict["attributes"].get(attr, None)
if attr_val:
return attr_val
else:
return attr_val
return None
def set_parent_graph(self, parent_graph):
self.obj_dict["parent_graph"] = parent_graph
def get_parent_graph(self):
return self.obj_dict.get("parent_graph", None)
def set(self, name, value):
"""Set an attribute value by name.
Given an attribute 'name' it will set its value to 'value'.
There's always the possibility of using the methods:
set_'name'(value)
which are defined for all the existing attributes.
"""
self.obj_dict["attributes"][name] = value
def get(self, name):
"""Get an attribute value by name.
Given an attribute 'name' it will get its value.
There's always the possibility of using the methods:
get_'name'()
which are defined for all the existing attributes.
"""
return self.obj_dict["attributes"].get(name, None)
def get_attributes(self):
"""Get attributes of the object"""
return self.obj_dict["attributes"]
def set_sequence(self, seq):
"""Set sequence"""
self.obj_dict["sequence"] = seq
def get_sequence(self):
"""Get sequence"""
return self.obj_dict["sequence"]
def create_attribute_methods(self, obj_attributes):
for attr in obj_attributes:
# Generate all the Setter methods.
#
self.__setattr__(
"set_" + attr,
lambda x, a=attr: self.obj_dict["attributes"].__setitem__(
a, x
),
)
# Generate all the Getter methods.
#
self.__setattr__(
"get_" + attr, lambda a=attr: self.__get_attribute__(a)
)
class Node(Common):
"""A graph node.
This class represents a graph's node with all its attributes.
node(name, attribute=value, ...)
name: node's name
All the attributes defined in the Graphviz dot language should
be supported.
"""
def __init__(self, name="", obj_dict=None, **attrs):
#
# Nodes will take attributes of
# all other types because the defaults
# for any GraphViz object are dealt with
# as if they were Node definitions
#
if obj_dict is not None:
self.obj_dict = obj_dict
else:
self.obj_dict = dict()
# Copy the attributes
#
self.obj_dict["attributes"] = dict(attrs)
self.obj_dict["type"] = "node"
self.obj_dict["parent_graph"] = None
self.obj_dict["parent_node_list"] = None
self.obj_dict["sequence"] = None
# Remove the compass point
#
port = None
if isinstance(name, str) and not name.startswith('"'):
idx = name.find(":")
if idx > 0 and idx + 1 < len(name):
name, port = name[:idx], name[idx:]
if isinstance(name, int):
name = str(name)
self.obj_dict["name"] = quote_if_necessary(name)
self.obj_dict["port"] = port
self.create_attribute_methods(NODE_ATTRIBUTES)
def __str__(self):
return self.to_string()
def set_name(self, node_name):
"""Set the node's name."""
self.obj_dict["name"] = node_name
def get_name(self):
"""Get the node's name."""
return self.obj_dict["name"]
def get_port(self):
"""Get the node's port."""
return self.obj_dict["port"]
def add_style(self, style):
styles = self.obj_dict["attributes"].get("style", None)
if not styles and style:
styles = [style]
else:
styles = styles.split(",")
styles.append(style)
self.obj_dict["attributes"]["style"] = ",".join(styles)
def to_string(self):
"""Return string representation of node in DOT language."""
# RMF: special case defaults for node, edge and graph properties.
#
node = quote_if_necessary(self.obj_dict["name"])
node_attr = list()
for attr in sorted(self.obj_dict["attributes"]):
value = self.obj_dict["attributes"][attr]
if value == "":
value = '""'
if value is not None:
node_attr.append("%s=%s" % (attr, quote_if_necessary(value)))
else:
node_attr.append(attr)
# No point in having nodes setting any defaults if the don't set
# any attributes...
#
if node in ("graph", "node", "edge") and len(node_attr) == 0:
return ""
node_attr = ", ".join(node_attr)
if node_attr:
node += " [" + node_attr + "]"
return node + ";"
class Edge(Common):
"""A graph edge.
This class represents a graph's edge with all its attributes.
edge(src, dst, attribute=value, ...)
src: source node, subgraph or cluster
dst: destination node, subgraph or cluster
`src` and `dst` can be specified as a `Node`, `Subgraph` or
`Cluster` object, or as the name string of such a component.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_label, set_fontname
or directly by using the instance's special dictionary:
Edge.obj_dict['attributes'][attribute name], i.e.
edge_instance.obj_dict['attributes']['label']
edge_instance.obj_dict['attributes']['fontname']
"""
def __init__(self, src="", dst="", obj_dict=None, **attrs):
self.obj_dict = dict()
if isinstance(src, (Node, Subgraph, Cluster)):
src = src.get_name()
if isinstance(dst, (Node, Subgraph, Cluster)):
dst = dst.get_name()
points = (quote_if_necessary(src), quote_if_necessary(dst))
self.obj_dict["points"] = points
if obj_dict is None:
# Copy the attributes
self.obj_dict["attributes"] = dict(attrs)
self.obj_dict["type"] = "edge"
self.obj_dict["parent_graph"] = None
self.obj_dict["parent_edge_list"] = None
self.obj_dict["sequence"] = None
else:
self.obj_dict = obj_dict
self.create_attribute_methods(EDGE_ATTRIBUTES)
def __str__(self):
return self.to_string()
def get_source(self):
"""Get the edges source node name."""
return self.obj_dict["points"][0]
def get_destination(self):
"""Get the edge's destination node name."""
return self.obj_dict["points"][1]
def __hash__(self):
return hash(hash(self.get_source()) + hash(self.get_destination()))
def __eq__(self, edge):
"""Compare two edges.
If the parent graph is directed, arcs linking
node A to B are considered equal and A->B != B->A
If the parent graph is undirected, any edge
connecting two nodes is equal to any other
edge connecting the same nodes, A->B == B->A
"""
if not isinstance(edge, Edge):
raise pydot.Error("Can not compare an edge to a non-edge object.")
if self.get_parent_graph().get_top_graph_type() == "graph":
# If the graph is undirected, the edge has neither
# source nor destination.
#
if (
self.get_source() == edge.get_source()
and self.get_destination() == edge.get_destination()
) or (
edge.get_source() == self.get_destination()
and edge.get_destination() == self.get_source()
):
return True
else:
if (
self.get_source() == edge.get_source()
and self.get_destination() == edge.get_destination()
):
return True
return False
def parse_node_ref(self, node_str):
if not isinstance(node_str, str):
return node_str
if node_str.startswith('"') and node_str.endswith('"'):
return node_str
node_port_idx = node_str.rfind(":")
if (
node_port_idx > 0
and node_str[0] == '"'
and node_str[node_port_idx - 1] == '"'
):
return node_str
if node_port_idx > 0:
a = node_str[:node_port_idx]
b = node_str[node_port_idx + 1 :]
node = quote_if_necessary(a)
node += ":" + quote_if_necessary(b)
return node
return node_str
def to_string(self):
"""Return string representation of edge in DOT language."""
src = self.parse_node_ref(self.get_source())
dst = self.parse_node_ref(self.get_destination())
if isinstance(src, frozendict):
edge = [Subgraph(obj_dict=src).to_string()]
elif isinstance(src, int):
edge = [str(src)]
else:
edge = [src]
if (
self.get_parent_graph()
and self.get_parent_graph().get_top_graph_type()
and self.get_parent_graph().get_top_graph_type() == "digraph"
):
edge.append("->")
else:
edge.append("--")
if isinstance(dst, frozendict):
edge.append(Subgraph(obj_dict=dst).to_string())
elif isinstance(dst, int):
edge.append(str(dst))
else:
edge.append(dst)
edge_attr = list()
for attr in sorted(self.obj_dict["attributes"]):
value = self.obj_dict["attributes"][attr]
if value == "":
value = '""'
if value is not None:
edge_attr.append("%s=%s" % (attr, quote_if_necessary(value)))
else:
edge_attr.append(attr)
edge_attr = ", ".join(edge_attr)
if edge_attr:
edge.append(" [" + edge_attr + "]")
return " ".join(edge) + ";"
class Graph(Common):
"""Class representing a graph in Graphviz's dot language.
This class implements the methods to work on a representation
of a graph in Graphviz's dot language.
graph( graph_name='G', graph_type='digraph',
strict=False, suppress_disconnected=False, attribute=value, ...)
graph_name:
the graph's name
graph_type:
can be 'graph' or 'digraph'
suppress_disconnected:
defaults to False, which will remove from the
graph any disconnected nodes.
simplify:
if True it will avoid displaying equal edges, i.e.
only one edge between two nodes. removing the
duplicated ones.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_size, set_fontname
or using the instance's attributes:
Graph.obj_dict['attributes'][attribute name], i.e.
graph_instance.obj_dict['attributes']['label']
graph_instance.obj_dict['attributes']['fontname']
"""
def __init__(
self,
graph_name="G",
obj_dict=None,
graph_type="digraph",
strict=False,
suppress_disconnected=False,
simplify=False,
**attrs
):
if obj_dict is not None:
self.obj_dict = obj_dict
else:
self.obj_dict = dict()
self.obj_dict["attributes"] = dict(attrs)
if graph_type not in ["graph", "digraph"]:
raise pydot.Error(
(
'Invalid type "{t}". '
"Accepted graph types are: "
"graph, digraph"
).format(t=graph_type)
)
self.obj_dict["name"] = quote_if_necessary(graph_name)
self.obj_dict["type"] = graph_type
self.obj_dict["strict"] = strict
self.obj_dict["suppress_disconnected"] = suppress_disconnected
self.obj_dict["simplify"] = simplify
self.obj_dict["current_child_sequence"] = 1
self.obj_dict["nodes"] = dict()
self.obj_dict["edges"] = dict()
self.obj_dict["subgraphs"] = dict()
self.set_parent_graph(self)
self.create_attribute_methods(GRAPH_ATTRIBUTES)
def __str__(self):
return self.to_string()
def get_graph_type(self):
return self.obj_dict["type"]
def get_top_graph_type(self):
parent = self
while True:
parent_ = parent.get_parent_graph()
if parent_ == parent:
break
parent = parent_
return parent.obj_dict["type"]
def set_graph_defaults(self, **attrs):
self.add_node(Node("graph", **attrs))
def get_graph_defaults(self, **attrs):
graph_nodes = self.get_node("graph")
if isinstance(graph_nodes, (list, tuple)):
return [node.get_attributes() for node in graph_nodes]
return graph_nodes.get_attributes()
def set_node_defaults(self, **attrs):
"""Define default node attributes.
These attributes only apply to nodes added to the graph after
calling this method.
"""
self.add_node(Node("node", **attrs))
def get_node_defaults(self, **attrs):
graph_nodes = self.get_node("node")
if isinstance(graph_nodes, (list, tuple)):
return [node.get_attributes() for node in graph_nodes]
return graph_nodes.get_attributes()
def set_edge_defaults(self, **attrs):
self.add_node(Node("edge", **attrs))
def get_edge_defaults(self, **attrs):
graph_nodes = self.get_node("edge")
if isinstance(graph_nodes, (list, tuple)):
return [node.get_attributes() for node in graph_nodes]
return graph_nodes.get_attributes()
def set_simplify(self, simplify):
"""Set whether to simplify or not.
If True it will avoid displaying equal edges, i.e.
only one edge between two nodes. removing the
duplicated ones.
"""
self.obj_dict["simplify"] = simplify
def get_simplify(self):
"""Get whether to simplify or not.
Refer to set_simplify for more information.
"""
return self.obj_dict["simplify"]
def set_type(self, graph_type):
"""Set the graph's type, 'graph' or 'digraph'."""
self.obj_dict["type"] = graph_type
def get_type(self):
"""Get the graph's type, 'graph' or 'digraph'."""
return self.obj_dict["type"]
def set_name(self, graph_name):
"""Set the graph's name."""
self.obj_dict["name"] = graph_name
def get_name(self):
"""Get the graph's name."""
return self.obj_dict["name"]
def set_strict(self, val):
"""Set graph to 'strict' mode.
This option is only valid for top level graphs.
"""
self.obj_dict["strict"] = val
def get_strict(self, val):
"""Get graph's 'strict' mode (True, False).
This option is only valid for top level graphs.
"""
return self.obj_dict["strict"]
def set_suppress_disconnected(self, val):
"""Suppress disconnected nodes in the output graph.
This option will skip nodes in
the graph with no incoming or outgoing
edges. This option works also
for subgraphs and has effect only in the
current graph/subgraph.
"""
self.obj_dict["suppress_disconnected"] = val
def get_suppress_disconnected(self, val):
"""Get if suppress disconnected is set.
Refer to set_suppress_disconnected for more information.
"""
return self.obj_dict["suppress_disconnected"]
def get_next_sequence_number(self):
seq = self.obj_dict["current_child_sequence"]
self.obj_dict["current_child_sequence"] += 1
return seq
def add_node(self, graph_node):
"""Adds a node object to the graph.
It takes a node object as its only argument and returns
None.
"""
if not isinstance(graph_node, Node):
raise TypeError(
"add_node() received "
+ "a non node class object: "
+ str(graph_node)
)
node = self.get_node(graph_node.get_name())
if not node:
self.obj_dict["nodes"][graph_node.get_name()] = [
graph_node.obj_dict
]
graph_node.set_parent_graph(self.get_parent_graph())
else:
self.obj_dict["nodes"][graph_node.get_name()].append(
graph_node.obj_dict
)
graph_node.set_sequence(self.get_next_sequence_number())
def del_node(self, name, index=None):
"""Delete a node from the graph.
Given a node's name all node(s) with that same name
will be deleted if 'index' is not specified or set
to None.
If there are several nodes with that same name and
'index' is given, only the node in that position
will be deleted.
'index' should be an integer specifying the position
of the node to delete. If index is larger than the
number of nodes with that name, no action is taken.
If nodes are deleted it returns True. If no action
is taken it returns False.
"""
if isinstance(name, Node):
name = name.get_name()
if name in self.obj_dict["nodes"]:
if index is not None and index < len(self.obj_dict["nodes"][name]):
del self.obj_dict["nodes"][name][index]
return True
else:
del self.obj_dict["nodes"][name]
return True
return False
def get_node(self, name):
"""Retrieve a node from the graph.
Given a node's name the corresponding Node
instance will be returned.
If one or more nodes exist with that name a list of
Node instances is returned.
An empty list is returned otherwise.
"""
match = list()
if name in self.obj_dict["nodes"]:
match.extend(
[
Node(obj_dict=obj_dict)
for obj_dict in self.obj_dict["nodes"][name]
]
)
return match
def get_nodes(self):
"""Get the list of Node instances."""
return self.get_node_list()
def get_node_list(self):
"""Get the list of Node instances.
This method returns the list of Node instances
composing the graph.
"""
node_objs = list()
for node in self.obj_dict["nodes"]:
obj_dict_list = self.obj_dict["nodes"][node]
node_objs.extend([Node(obj_dict=obj_d) for obj_d in obj_dict_list])
return node_objs
def add_edge(self, graph_edge):
"""Adds an edge object to the graph.
It takes a edge object as its only argument and returns
None.
"""
if not isinstance(graph_edge, Edge):
raise TypeError(
"add_edge() received a non edge class object: "
+ str(graph_edge)
)
edge_points = (graph_edge.get_source(), graph_edge.get_destination())
if edge_points in self.obj_dict["edges"]:
edge_list = self.obj_dict["edges"][edge_points]
edge_list.append(graph_edge.obj_dict)
else:
self.obj_dict["edges"][edge_points] = [graph_edge.obj_dict]
graph_edge.set_sequence(self.get_next_sequence_number())
graph_edge.set_parent_graph(self.get_parent_graph())
def del_edge(self, src_or_list, dst=None, index=None):
"""Delete an edge from the graph.
Given an edge's (source, destination) node names all
matching edges(s) will be deleted if 'index' is not
specified or set to None.
If there are several matching edges and 'index' is
given, only the edge in that position will be deleted.
'index' should be an integer specifying the position
of the edge to delete. If index is larger than the
number of matching edges, no action is taken.
If edges are deleted it returns True. If no action
is taken it returns False.
"""
if isinstance(src_or_list, (list, tuple)):
if dst is not None and isinstance(dst, int):
index = dst
src, dst = src_or_list
else:
src, dst = src_or_list, dst
if isinstance(src, Node):
src = src.get_name()
if isinstance(dst, Node):
dst = dst.get_name()
if (src, dst) in self.obj_dict["edges"]:
if index is not None and index < len(
self.obj_dict["edges"][(src, dst)]
):
del self.obj_dict["edges"][(src, dst)][index]
return True
else:
del self.obj_dict["edges"][(src, dst)]
return True
return False
def get_edge(self, src_or_list, dst=None):
"""Retrieved an edge from the graph.
Given an edge's source and destination the corresponding
Edge instance(s) will be returned.
If one or more edges exist with that source and destination
a list of Edge instances is returned.
An empty list is returned otherwise.
"""
if isinstance(src_or_list, (list, tuple)) and dst is None:
edge_points = tuple(src_or_list)
edge_points_reverse = (edge_points[1], edge_points[0])
else:
edge_points = (src_or_list, dst)
edge_points_reverse = (dst, src_or_list)
match = list()
if edge_points in self.obj_dict["edges"] or (
self.get_top_graph_type() == "graph"
and edge_points_reverse in self.obj_dict["edges"]
):
edges_obj_dict = self.obj_dict["edges"].get(
edge_points,
self.obj_dict["edges"].get(edge_points_reverse, None),
)
for edge_obj_dict in edges_obj_dict:
match.append(
Edge(
edge_points[0], edge_points[1], obj_dict=edge_obj_dict
)
)
return match
def get_edges(self):
return self.get_edge_list()
def get_edge_list(self):
"""Get the list of Edge instances.
This method returns the list of Edge instances
composing the graph.
"""
edge_objs = list()
for edge in self.obj_dict["edges"]:
obj_dict_list = self.obj_dict["edges"][edge]
edge_objs.extend([Edge(obj_dict=obj_d) for obj_d in obj_dict_list])
return edge_objs
def add_subgraph(self, sgraph):
"""Adds an subgraph object to the graph.
It takes a subgraph object as its only argument and returns
None.
"""
if not isinstance(sgraph, Subgraph) and not isinstance(
sgraph, Cluster
):
raise TypeError(
"add_subgraph() received a non subgraph class object:"
+ str(sgraph)
)
if sgraph.get_name() in self.obj_dict["subgraphs"]:
sgraph_list = self.obj_dict["subgraphs"][sgraph.get_name()]
sgraph_list.append(sgraph.obj_dict)
else:
self.obj_dict["subgraphs"][sgraph.get_name()] = [sgraph.obj_dict]
sgraph.set_sequence(self.get_next_sequence_number())
sgraph.set_parent_graph(self.get_parent_graph())
def get_subgraph(self, name):
"""Retrieved a subgraph from the graph.
Given a subgraph's name the corresponding
Subgraph instance will be returned.
If one or more subgraphs exist with the same name, a list of
Subgraph instances is returned.
An empty list is returned otherwise.
"""
match = list()
if name in self.obj_dict["subgraphs"]:
sgraphs_obj_dict = self.obj_dict["subgraphs"].get(name)
for obj_dict_list in sgraphs_obj_dict:
match.append(Subgraph(obj_dict=obj_dict_list))
return match
def get_subgraphs(self):
return self.get_subgraph_list()
def get_subgraph_list(self):
"""Get the list of Subgraph instances.
This method returns the list of Subgraph instances
in the graph.
"""
sgraph_objs = list()
for sgraph in self.obj_dict["subgraphs"]:
obj_dict_list = self.obj_dict["subgraphs"][sgraph]
sgraph_objs.extend(
[Subgraph(obj_dict=obj_d) for obj_d in obj_dict_list]
)
return sgraph_objs
def set_parent_graph(self, parent_graph):
self.obj_dict["parent_graph"] = parent_graph
for k in self.obj_dict["nodes"]:
obj_list = self.obj_dict["nodes"][k]
for obj in obj_list:
obj["parent_graph"] = parent_graph
for k in self.obj_dict["edges"]:
obj_list = self.obj_dict["edges"][k]
for obj in obj_list:
obj["parent_graph"] = parent_graph
for k in self.obj_dict["subgraphs"]:
obj_list = self.obj_dict["subgraphs"][k]
for obj in obj_list:
Graph(obj_dict=obj).set_parent_graph(parent_graph)
def to_string(self):
"""Return string representation of graph in DOT language.
@return: graph and subelements
@rtype: `str`
"""
graph = list()
if self.obj_dict.get("strict", None) is not None:
if self == self.get_parent_graph() and self.obj_dict["strict"]:
graph.append("strict ")
graph_type = self.obj_dict["type"]
if graph_type == "subgraph" and not self.obj_dict.get(
"show_keyword", True
):
graph_type = ""
s = "{type} {name} {{\n".format(
type=graph_type, name=self.obj_dict["name"]
)
graph.append(s)
for attr in sorted(self.obj_dict["attributes"]):
if self.obj_dict["attributes"].get(attr, None) is not None:
val = self.obj_dict["attributes"].get(attr)
if val == "":
val = '""'
if val is not None:
graph.append("%s=%s" % (attr, quote_if_necessary(val)))
else:
graph.append(attr)
graph.append(";\n")
edges_done = set()
edge_obj_dicts = list()
for k in self.obj_dict["edges"]:
edge_obj_dicts.extend(self.obj_dict["edges"][k])
if edge_obj_dicts:
edge_src_set, edge_dst_set = list(
zip(*[obj["points"] for obj in edge_obj_dicts])
)
edge_src_set, edge_dst_set = set(edge_src_set), set(edge_dst_set)
else:
edge_src_set, edge_dst_set = set(), set()
node_obj_dicts = list()
for k in self.obj_dict["nodes"]:
node_obj_dicts.extend(self.obj_dict["nodes"][k])
sgraph_obj_dicts = list()
for k in self.obj_dict["subgraphs"]:
sgraph_obj_dicts.extend(self.obj_dict["subgraphs"][k])
obj_list = [
(obj["sequence"], obj)
for obj in (edge_obj_dicts + node_obj_dicts + sgraph_obj_dicts)
]
obj_list.sort(key=lambda x: x[0])
for idx, obj in obj_list:
if obj["type"] == "node":
node = Node(obj_dict=obj)
if self.obj_dict.get("suppress_disconnected", False):
if (
node.get_name() not in edge_src_set
and node.get_name() not in edge_dst_set
):
continue
graph.append(node.to_string() + "\n")
elif obj["type"] == "edge":
edge = Edge(obj_dict=obj)
if self.obj_dict.get("simplify", False) and edge in edges_done:
continue
graph.append(edge.to_string() + "\n")
edges_done.add(edge)
else:
sgraph = Subgraph(obj_dict=obj)
graph.append(sgraph.to_string() + "\n")
graph.append("}\n")
return "".join(graph)
class Subgraph(Graph):
"""Class representing a subgraph in Graphviz's dot language.
This class implements the methods to work on a representation
of a subgraph in Graphviz's dot language.
subgraph(graph_name='subG',
suppress_disconnected=False,
attribute=value,
...)
graph_name:
the subgraph's name
suppress_disconnected:
defaults to false, which will remove from the
subgraph any disconnected nodes.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_size, set_fontname
or using the instance's attributes:
Subgraph.obj_dict['attributes'][attribute name], i.e.
subgraph_instance.obj_dict['attributes']['label']
subgraph_instance.obj_dict['attributes']['fontname']
"""
# RMF: subgraph should have all the
# attributes of graph so it can be passed
# as a graph to all methods
#
def __init__(
self,
graph_name="",
obj_dict=None,
suppress_disconnected=False,
simplify=False,
**attrs
):
Graph.__init__(
self,
graph_name=graph_name,
obj_dict=obj_dict,
suppress_disconnected=suppress_disconnected,
simplify=simplify,
**attrs,
)
if obj_dict is None:
self.obj_dict["type"] = "subgraph"
class Cluster(Graph):
"""Class representing a cluster in Graphviz's dot language.
This class implements the methods to work on a representation
of a cluster in Graphviz's dot language.
cluster(graph_name='subG',
suppress_disconnected=False,
attribute=value,
...)
graph_name:
the cluster's name
(the string 'cluster' will be always prepended)
suppress_disconnected:
defaults to false, which will remove from the
cluster any disconnected nodes.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_color, set_fontname
or using the instance's attributes:
Cluster.obj_dict['attributes'][attribute name], i.e.
cluster_instance.obj_dict['attributes']['label']
cluster_instance.obj_dict['attributes']['fontname']
"""
def __init__(
self,
graph_name="subG",
obj_dict=None,
suppress_disconnected=False,
simplify=False,
**attrs
):
Graph.__init__(
self,
graph_name=graph_name,
obj_dict=obj_dict,
suppress_disconnected=suppress_disconnected,
simplify=simplify,
**attrs,
)
if obj_dict is None:
self.obj_dict["type"] = "subgraph"
self.obj_dict["name"] = quote_if_necessary("cluster_" + graph_name)
self.create_attribute_methods(CLUSTER_ATTRIBUTES)
class Dot(Graph):
"""A container for handling a dot language file.
This class implements methods to write and process
a dot language file. It is a derived class of
the base class 'Graph'.
"""
def __init__(self, *argsl, **argsd):
Graph.__init__(self, *argsl, **argsd)
self.shape_files = list()
self.formats = [
"canon",
"cmap",
"cmapx",
"cmapx_np",
"dia",
"dot",
"fig",
"gd",
"gd2",
"gif",
"hpgl",
"imap",
"imap_np",
"ismap",
"jpe",
"jpeg",
"jpg",
"mif",
"mp",
"pcl",
"pdf",
"pic",
"plain",
"plain-ext",
"png",
"ps",
"ps2",
"svg",
"svgz",
"vml",
"vmlz",
"vrml",
"vtx",
"wbmp",
"xdot",
"xlib",
]
self.prog = "dot"
# Automatically creates all
# the methods enabling the creation
# of output in any of the supported formats.
for frmt in self.formats:
def new_method(f=frmt, prog=self.prog, encoding=None):
"""Refer to docstring of method `create`."""
return self.create(format=f, prog=prog, encoding=encoding)
name = "create_{fmt}".format(fmt=frmt)
self.__setattr__(name, new_method)
for frmt in self.formats + ["raw"]:
def new_method(path, f=frmt, prog=self.prog, encoding=None):
"""Refer to docstring of method `write.`"""
self.write(path, format=f, prog=prog, encoding=encoding)
name = "write_{fmt}".format(fmt=frmt)
self.__setattr__(name, new_method)
def __getstate__(self):
dict = copy.copy(self.obj_dict)
return dict
def __setstate__(self, state):
self.obj_dict = state
def set_shape_files(self, file_paths):
"""Add the paths of the required image files.
If the graph needs graphic objects to
be used as shapes or otherwise
those need to be in the same folder as
the graph is going to be rendered
from. Alternatively the absolute path to
the files can be specified when
including the graphics in the graph.
The files in the location pointed to by
the path(s) specified as arguments
to this method will be copied to
the same temporary location where the
graph is going to be rendered.
"""
if isinstance(file_paths, str):
self.shape_files.append(file_paths)
if isinstance(file_paths, (list, tuple)):
self.shape_files.extend(file_paths)
def set_prog(self, prog):
"""Sets the default program.
Sets the default program in charge of processing
the dot file into a graph.
"""
self.prog = prog
def write(self, path, prog=None, format="raw", encoding=None):
"""Writes a graph to a file.
Given a filename 'path' it will open/create and truncate
such file and write on it a representation of the graph
defined by the dot object in the format specified by
'format' and using the encoding specified by `encoding` for text.
The format 'raw' is used to dump the string representation
of the Dot object, without further processing.
The output can be processed by any of graphviz tools, defined
in 'prog', which defaults to 'dot'
Returns True or False according to the success of the write
operation.
There's also the preferred possibility of using:
write_'format'(path, prog='program')
which are automatically defined for all the supported formats.
[write_ps(), write_gif(), write_dia(), ...]
The encoding is passed to `open` [1].
[1] https://docs.python.org/3/library/functions.html#open
"""
if prog is None:
prog = self.prog
if format == "raw":
s = self.to_string()
with io.open(path, mode="wt", encoding=encoding) as f:
f.write(s)
else:
s = self.create(prog, format, encoding=encoding)
with io.open(path, mode="wb") as f:
f.write(s)
return True
def create(self, prog=None, format="ps", encoding=None):
"""Creates and returns a binary image for the graph.
create will write the graph to a temporary dot file in the
encoding specified by `encoding` and process it with the
program given by 'prog' (which defaults to 'twopi'), reading
the binary image output and return it as `bytes`.
There's also the preferred possibility of using:
create_'format'(prog='program')
which are automatically defined for all the supported formats,
for example:
- `create_ps()`
- `create_gif()`
- `create_dia()`
If 'prog' is a list, instead of a string,
then the fist item is expected to be the program name,
followed by any optional command-line arguments for it:
[ 'twopi', '-Tdot', '-s10' ]
@param prog: either:
- name of GraphViz executable that
can be found in the `$PATH`, or
- absolute path to GraphViz executable.
If you have added GraphViz to the `$PATH` and
use its executables as installed
(without renaming any of them)
then their names are:
- `'dot'`
- `'twopi'`
- `'neato'`
- `'circo'`
- `'fdp'`
- `'sfdp'`
On Windows, these have the notorious ".exe" extension that,
only for the above strings, will be added automatically.
The `$PATH` is inherited from `os.env['PATH']` and
passed to `subprocess.Popen` using the `env` argument.
If you haven't added GraphViz to your `$PATH` on Windows,
then you may want to give the absolute path to the
executable (for example, to `dot.exe`) in `prog`.
"""
if prog is None:
prog = self.prog
assert prog is not None
if isinstance(prog, (list, tuple)):
prog, args = prog[0], prog[1:]
else:
args = []
# temp file
tmp_fd, tmp_name = tempfile.mkstemp()
os.close(tmp_fd)
self.write(tmp_name, encoding=encoding)
tmp_dir = os.path.dirname(tmp_name)
# For each of the image files...
for img in self.shape_files:
# Get its data
f = open(img, "rb")
f_data = f.read()
f.close()
# And copy it under a file with the same name in
# the temporary directory
f = open(os.path.join(tmp_dir, os.path.basename(img)), "wb")
f.write(f_data)
f.close()
arguments = ["-T{}".format(format)] + args + [tmp_name]
try:
stdout_data, stderr_data, process = call_graphviz(
program=prog,
arguments=arguments,
working_dir=tmp_dir,
)
except OSError as e:
if e.errno == errno.ENOENT:
args = list(e.args)
args[1] = '"{prog}" not found in path.'.format(prog=prog)
raise OSError(*args)
else:
raise
# clean file litter
for img in self.shape_files:
os.unlink(os.path.join(tmp_dir, os.path.basename(img)))
os.unlink(tmp_name)
if process.returncode != 0:
message = (
'"{prog}" with args {arguments} returned code: {code}\n\n'
"stdout, stderr:\n {out}\n{err}\n"
).format(
prog=prog,
arguments=arguments,
code=process.returncode,
out=stdout_data,
err=stderr_data,
)
print(message)
assert (
process.returncode == 0
), '"{prog}" with args {arguments} returned code: {code}'.format(
prog=prog,
arguments=arguments,
code=process.returncode,
)
return stdout_data
| 54,064
| 28.837196
| 79
|
py
|
pydot
|
pydot-master/src/pydot/exceptions.py
|
"""Exception classes for pydot."""
class PydotException(Exception):
"""Base class for exceptions in Pydot.
This base class will not be raised directly.
Catch this base class to catch all derived exceptions, though be
aware that pydot may raise Python built-in exceptions or pyparsing
exceptions as well.
"""
class Error(PydotException):
"""General error handling class."""
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
| 516
| 21.478261
| 70
|
py
|
pydot
|
pydot-master/src/pydot/__init__.py
|
"""An interface to GraphViz."""
__author__ = "Ero Carrera"
__version__ = "2.0.0.dev0"
__license__ = "MIT"
from pydot.exceptions import *
from pydot.core import *
| 164
| 17.333333
| 31
|
py
|
pydot
|
pydot-master/src/pydot/dot_parser.py
|
"""Graphviz's dot language parser.
The dotparser parses GraphViz files in
dot and dot files and transforms them
into a class representation defined by `pydot`.
Author: Michael Krause <michael@krause-software.de>
Fixes by: Ero Carrera <ero.carrera@gmail.com>
"""
from pyparsing import (
nestedExpr,
Literal,
CaselessLiteral,
Word,
OneOrMore,
Forward,
Group,
Optional,
Combine,
restOfLine,
cStyleComment,
nums,
alphanums,
printables,
ParseException,
ParseResults,
CharsNotIn,
QuotedString,
)
import pydot
__author__ = ["Michael Krause", "Ero Carrera"]
__license__ = "MIT"
class P_AttrList(object):
def __init__(self, toks):
self.attrs = {}
i = 0
while i < len(toks):
attrname = toks[i]
if i + 2 < len(toks) and toks[i + 1] == "=":
attrvalue = toks[i + 2]
i += 3
else:
attrvalue = None
i += 1
self.attrs[attrname] = attrvalue
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.attrs)
class DefaultStatement(P_AttrList):
def __init__(self, default_type, attrs):
self.default_type = default_type
self.attrs = attrs
def __repr__(self):
return "%s(%s, %r)" % (
self.__class__.__name__,
self.default_type,
self.attrs,
)
top_graphs = list()
def push_top_graph_stmt(s, loc, toks):
attrs = {}
g = None
for element in toks:
if (
isinstance(element, (ParseResults, tuple, list))
and len(element) == 1
and isinstance(element[0], str)
):
element = element[0]
if element == "strict":
attrs["strict"] = True
elif element in ["graph", "digraph"]:
attrs = {}
g = pydot.Dot(graph_type=element, **attrs)
attrs["type"] = element
top_graphs.append(g)
elif isinstance(element, str):
g.set_name(element)
elif isinstance(element, pydot.Subgraph):
g.obj_dict["attributes"].update(element.obj_dict["attributes"])
g.obj_dict["edges"].update(element.obj_dict["edges"])
g.obj_dict["nodes"].update(element.obj_dict["nodes"])
g.obj_dict["subgraphs"].update(element.obj_dict["subgraphs"])
g.set_parent_graph(g)
elif isinstance(element, P_AttrList):
attrs.update(element.attrs)
elif isinstance(element, (ParseResults, list)):
add_elements(g, element)
else:
raise ValueError(
"Unknown element statement: {s}".format(s=element)
)
for g in top_graphs:
update_parent_graph_hierarchy(g)
if len(top_graphs) == 1:
return top_graphs[0]
return top_graphs
def update_parent_graph_hierarchy(g, parent_graph=None, level=0):
if parent_graph is None:
parent_graph = g
for key_name in ("edges",):
if isinstance(g, pydot.frozendict):
item_dict = g
else:
item_dict = g.obj_dict
if key_name not in item_dict:
continue
for key, objs in item_dict[key_name].items():
for obj in objs:
if (
"parent_graph" in obj
and obj["parent_graph"].get_parent_graph() == g
):
if obj["parent_graph"] is g:
pass
else:
obj["parent_graph"].set_parent_graph(parent_graph)
if key_name == "edges" and len(key) == 2:
for idx, vertex in enumerate(obj["points"]):
if isinstance(
vertex,
(pydot.Graph, pydot.Subgraph, pydot.Cluster),
):
vertex.set_parent_graph(parent_graph)
if isinstance(vertex, pydot.frozendict):
if vertex["parent_graph"] is g:
pass
else:
vertex["parent_graph"].set_parent_graph(
parent_graph
)
def add_defaults(element, defaults):
d = element.__dict__
for key, value in defaults.items():
if not d.get(key):
d[key] = value
def add_elements(
g, toks, defaults_graph=None, defaults_node=None, defaults_edge=None
):
if defaults_graph is None:
defaults_graph = {}
if defaults_node is None:
defaults_node = {}
if defaults_edge is None:
defaults_edge = {}
for elm_idx, element in enumerate(toks):
if isinstance(element, (pydot.Subgraph, pydot.Cluster)):
add_defaults(element, defaults_graph)
g.add_subgraph(element)
elif isinstance(element, pydot.Node):
add_defaults(element, defaults_node)
g.add_node(element)
elif isinstance(element, pydot.Edge):
add_defaults(element, defaults_edge)
g.add_edge(element)
elif isinstance(element, ParseResults):
for e in element:
add_elements(
g, [e], defaults_graph, defaults_node, defaults_edge
)
elif isinstance(element, DefaultStatement):
if element.default_type == "graph":
default_graph_attrs = pydot.Node("graph", **element.attrs)
g.add_node(default_graph_attrs)
elif element.default_type == "node":
default_node_attrs = pydot.Node("node", **element.attrs)
g.add_node(default_node_attrs)
elif element.default_type == "edge":
default_edge_attrs = pydot.Node("edge", **element.attrs)
g.add_node(default_edge_attrs)
defaults_edge.update(element.attrs)
else:
raise ValueError(
"Unknown DefaultStatement: {s}".format(
s=element.default_type
)
)
elif isinstance(element, P_AttrList):
g.obj_dict["attributes"].update(element.attrs)
else:
raise ValueError(
"Unknown element statement: {s}".format(s=element)
)
def push_graph_stmt(s, loc, toks):
g = pydot.Subgraph("")
add_elements(g, toks)
return g
def push_subgraph_stmt(s, loc, toks):
g = pydot.Subgraph("")
for e in toks:
if len(e) == 3:
e[2].set_name(e[1])
if e[0] == "subgraph":
e[2].obj_dict["show_keyword"] = True
return e[2]
else:
if e[0] == "subgraph":
e[1].obj_dict["show_keyword"] = True
return e[1]
return g
def push_default_stmt(s, loc, toks):
# The pydot class instances should be marked as
# default statements to be inherited by actual
# graphs, nodes and edges.
#
default_type = toks[0][0]
if len(toks) > 1:
attrs = toks[1].attrs
else:
attrs = {}
if default_type in ["graph", "node", "edge"]:
return DefaultStatement(default_type, attrs)
else:
raise ValueError("Unknown default statement: {s}".format(s=toks))
def push_attr_list(s, loc, toks):
p = P_AttrList(toks)
return p
def get_port(node):
if len(node) > 1:
if isinstance(node[1], ParseResults):
if len(node[1][0]) == 2:
if node[1][0][0] == ":":
return node[1][0][1]
return None
def do_node_ports(node):
node_port = ""
if len(node) > 1:
node_port = "".join([str(a) + str(b) for a, b in node[1]])
return node_port
def push_edge_stmt(s, loc, toks):
tok_attrs = [a for a in toks if isinstance(a, P_AttrList)]
attrs = {}
for a in tok_attrs:
attrs.update(a.attrs)
e = []
if isinstance(toks[0][0], pydot.Graph):
n_prev = pydot.frozendict(toks[0][0].obj_dict)
else:
n_prev = toks[0][0] + do_node_ports(toks[0])
if isinstance(toks[2][0], ParseResults):
n_next_list = [[n.get_name()] for n in toks[2][0]]
for n_next in [n for n in n_next_list]:
n_next_port = do_node_ports(n_next)
e.append(pydot.Edge(n_prev, n_next[0] + n_next_port, **attrs))
elif isinstance(toks[2][0], pydot.Graph):
e.append(
pydot.Edge(n_prev, pydot.frozendict(toks[2][0].obj_dict), **attrs)
)
elif isinstance(toks[2][0], pydot.Node):
node = toks[2][0]
if node.get_port() is not None:
name_port = node.get_name() + ":" + node.get_port()
else:
name_port = node.get_name()
e.append(pydot.Edge(n_prev, name_port, **attrs))
# if the target of this edge is the name of a node
elif isinstance(toks[2][0], str):
for n_next in [n for n in tuple(toks)[2::2]]:
if isinstance(n_next, P_AttrList) or not isinstance(
n_next[0], str
):
continue
n_next_port = do_node_ports(n_next)
e.append(pydot.Edge(n_prev, n_next[0] + n_next_port, **attrs))
n_prev = n_next[0] + n_next_port
else:
raise Exception(
"Edge target {r} with type {s} unsupported.".format(
r=toks[2][0], s=type(toks[2][0])
)
)
return e
def push_node_stmt(s, loc, toks):
if len(toks) == 2:
attrs = toks[1].attrs
else:
attrs = {}
node_name = toks[0]
if isinstance(node_name, list) or isinstance(node_name, tuple):
if len(node_name) > 0:
node_name = node_name[0]
n = pydot.Node(str(node_name), **attrs)
return n
graphparser = None
def graph_definition():
global graphparser
if not graphparser:
# punctuation
colon = Literal(":")
lbrace = Literal("{")
rbrace = Literal("}")
lbrack = Literal("[")
rbrack = Literal("]")
lparen = Literal("(")
rparen = Literal(")")
equals = Literal("=")
comma = Literal(",")
dot = Literal(".")
slash = Literal("/")
bslash = Literal("\\")
star = Literal("*")
semi = Literal(";")
at = Literal("@")
minus = Literal("-")
# keywords
strict_ = CaselessLiteral("strict")
graph_ = CaselessLiteral("graph")
digraph_ = CaselessLiteral("digraph")
subgraph_ = CaselessLiteral("subgraph")
node_ = CaselessLiteral("node")
edge_ = CaselessLiteral("edge")
# token definitions
identifier = Word(alphanums + "_.").setName("identifier")
double_quoted_string = QuotedString(
'"', multiline=True, unquoteResults=False, escChar="\\"
)
noncomma = "".join([c for c in printables if c != ","])
alphastring_ = OneOrMore(CharsNotIn(noncomma + " "))
def parse_html(s, loc, toks):
return "<%s>" % "".join(toks[0])
opener = "<"
closer = ">"
html_text = (
nestedExpr(opener, closer, (CharsNotIn(opener + closer)))
.setParseAction(parse_html)
.leaveWhitespace()
)
ID = (
identifier | html_text | double_quoted_string | alphastring_
).setName("ID")
float_number = Combine(
Optional(minus) + OneOrMore(Word(nums + "."))
).setName("float_number")
righthand_id = (float_number | ID).setName("righthand_id")
port_angle = (at + ID).setName("port_angle")
port_location = (
OneOrMore(Group(colon + ID))
| Group(colon + lparen + ID + comma + ID + rparen)
).setName("port_location")
port = (
Group(port_location + Optional(port_angle))
| Group(port_angle + Optional(port_location))
).setName("port")
node_id = ID + Optional(port)
a_list = OneOrMore(
ID + Optional(equals + righthand_id) + Optional(comma.suppress())
).setName("a_list")
attr_list = OneOrMore(
lbrack.suppress() + Optional(a_list) + rbrack.suppress()
).setName("attr_list")
attr_stmt = (Group(graph_ | node_ | edge_) + attr_list).setName(
"attr_stmt"
)
edgeop = (Literal("--") | Literal("->")).setName("edgeop")
stmt_list = Forward()
graph_stmt = Group(
lbrace.suppress()
+ Optional(stmt_list)
+ rbrace.suppress()
+ Optional(semi.suppress())
).setName("graph_stmt")
edge_point = Forward()
edgeRHS = OneOrMore(edgeop + edge_point)
edge_stmt = edge_point + edgeRHS + Optional(attr_list)
subgraph = Group(subgraph_ + Optional(ID) + graph_stmt).setName(
"subgraph"
)
edge_point << Group(subgraph | graph_stmt | node_id).setName(
"edge_point"
)
node_stmt = (
node_id + Optional(attr_list) + Optional(semi.suppress())
).setName("node_stmt")
assignment = (ID + equals + righthand_id).setName("assignment")
stmt = (
assignment
| edge_stmt
| attr_stmt
| subgraph
| graph_stmt
| node_stmt
).setName("stmt")
stmt_list << OneOrMore(stmt + Optional(semi.suppress()))
graphparser = OneOrMore(
(
Optional(strict_)
+ Group((graph_ | digraph_))
+ Optional(ID)
+ graph_stmt
).setResultsName("graph")
)
singleLineComment = Group("//" + restOfLine) | Group("#" + restOfLine)
# actions
graphparser.ignore(singleLineComment)
graphparser.ignore(cStyleComment)
assignment.setParseAction(push_attr_list)
a_list.setParseAction(push_attr_list)
edge_stmt.setParseAction(push_edge_stmt)
node_stmt.setParseAction(push_node_stmt)
attr_stmt.setParseAction(push_default_stmt)
subgraph.setParseAction(push_subgraph_stmt)
graph_stmt.setParseAction(push_graph_stmt)
graphparser.setParseAction(push_top_graph_stmt)
return graphparser
def parse_dot_data(s):
"""Parse DOT description in (unicode) string `s`.
@return: Graphs that result from parsing.
@rtype: `list` of `pydot.Dot`
"""
global top_graphs
top_graphs = list()
try:
graphparser = graph_definition()
graphparser.parseWithTabs()
tokens = graphparser.parseString(s)
return list(tokens)
except ParseException as err:
print(err.line)
print(" " * (err.column - 1) + "^")
print(err)
return None
| 15,074
| 25.871658
| 78
|
py
|
pydot
|
pydot-master/test/pydot_unittest.py
|
# coding=utf-8
"""Unit testing of `pydot`."""
# TODO:
# -test graph generation APIs (from adjacency, etc..)
# -test del_node, del_edge methods
# -test Common.set method
import argparse
from hashlib import sha256
import io
import os
import pickle
import string
import sys
import warnings
import chardet
import pydot
import unittest
TEST_PROGRAM = "dot"
TESTS_DIR_1 = "my_tests"
TESTS_DIR_2 = "graphs"
class TestGraphAPI(unittest.TestCase):
def setUp(self):
self._reset_graphs()
def _reset_graphs(self):
self.graph_directed = pydot.Graph("testgraph", graph_type="digraph")
def test_keep_graph_type(self):
g = pydot.Dot(graph_name="Test", graph_type="graph")
self.assertEqual(g.get_type(), "graph")
g = pydot.Dot(graph_name="Test", graph_type="digraph")
self.assertEqual(g.get_type(), "digraph")
def test_add_style(self):
g = pydot.Dot(graph_name="Test", graph_type="graph")
node = pydot.Node("mynode")
node.add_style("abc")
self.assertEqual(node.get_style(), "abc")
node.add_style("def")
self.assertEqual(node.get_style(), "abc,def")
node.add_style("ghi")
self.assertEqual(node.get_style(), "abc,def,ghi")
def test_create_simple_graph_with_node(self):
g = pydot.Dot()
g.set_type("digraph")
node = pydot.Node("legend")
node.set("shape", "box")
g.add_node(node)
node.set("label", "mine")
s = g.to_string()
expected = "digraph G {\nlegend [label=mine, shape=box];\n}\n"
assert s == expected
def test_attribute_with_implicit_value(self):
d = 'digraph {\na -> b[label="hi", decorate];\n}'
graphs = pydot.graph_from_dot_data(d)
(g,) = graphs
attrs = g.get_edges()[0].get_attributes()
self.assertEqual("decorate" in attrs, True)
def test_subgraphs(self):
g = pydot.Graph()
s = pydot.Subgraph("foo")
self.assertEqual(g.get_subgraphs(), [])
self.assertEqual(g.get_subgraph_list(), [])
g.add_subgraph(s)
self.assertEqual(g.get_subgraphs()[0].get_name(), s.get_name())
self.assertEqual(g.get_subgraph_list()[0].get_name(), s.get_name())
def test_graph_pickling(self):
g = pydot.Graph()
s = pydot.Subgraph("foo")
g.add_subgraph(s)
g.add_edge(pydot.Edge("A", "B"))
g.add_edge(pydot.Edge("A", "C"))
g.add_edge(pydot.Edge(("D", "E")))
g.add_node(pydot.Node("node!"))
pickle.dumps(g)
def test_unicode_ids(self):
node1 = '"aánñoöüé€"'
node2 = '"îôø®çßΩ"'
g = pydot.Dot()
g.set_charset("latin1")
g.add_node(pydot.Node(node1))
g.add_node(pydot.Node(node2))
g.add_edge(pydot.Edge(node1, node2))
self.assertEqual(g.get_node(node1)[0].get_name(), node1)
self.assertEqual(g.get_node(node2)[0].get_name(), node2)
self.assertEqual(g.get_edges()[0].get_source(), node1)
self.assertEqual(g.get_edges()[0].get_destination(), node2)
graphs = pydot.graph_from_dot_data(g.to_string())
(g2,) = graphs
self.assertEqual(g2.get_node(node1)[0].get_name(), node1)
self.assertEqual(g2.get_node(node2)[0].get_name(), node2)
self.assertEqual(g2.get_edges()[0].get_source(), node1)
self.assertEqual(g2.get_edges()[0].get_destination(), node2)
def test_graph_simplify(self):
# Fail example: pydot 1.0.2. GH pydot/pydot#92 OP patch 1.
g = pydot.Graph()
g.add_edge(pydot.Edge("a", "b"))
g.add_edge(pydot.Edge("a", "b"))
g.add_edge(pydot.Edge("b", "a"))
g.add_edge(pydot.Edge("b", "a"))
test_combinations = [
(
"graph",
False,
"graph G { a -- b; a -- b; b -- a; b -- a; }",
),
(
"graph",
True,
"graph G { a -- b; }",
),
(
"digraph",
False,
"digraph G { a -> b; a -> b; b -> a; b -> a; }",
),
(
"digraph",
True,
"digraph G { a -> b; b -> a; }",
),
]
expected_concat = observed_concat = ""
for (graph_type, simplify, expected) in test_combinations:
expected_concat += "graph_type %s, simplify %s: %s\n" % (
graph_type,
simplify,
expected,
)
g.set_type(graph_type)
g.set_simplify(simplify)
try:
observed = " ".join(g.to_string().split())
except (NameError, TypeError) as e:
observed = "%s: %s" % (type(e).__name__, e)
observed_concat += "graph_type %s, simplify %s: %s\n" % (
graph_type,
simplify,
observed,
)
self.maxDiff = None
self.assertMultiLineEqual(expected_concat, observed_concat)
def test_graph_with_shapefiles(self):
shapefile_dir = os.path.join(test_dir, "from-past-to-future")
# image files are omitted from sdist
if not os.path.isdir(shapefile_dir):
warnings.warn(
"Skipping tests that involve images, "
"they can be found in the `git` repository."
)
return
dot_file = os.path.join(shapefile_dir, "from-past-to-future.dot")
pngs = [
os.path.join(shapefile_dir, fname)
for fname in os.listdir(shapefile_dir)
if fname.endswith(".png")
]
f = open(dot_file, "rt")
graph_data = f.read()
f.close()
graphs = pydot.graph_from_dot_data(graph_data)
(g,) = graphs
g.set_shape_files(pngs)
jpe_data = g.create(format="jpe")
hexdigest = sha256(jpe_data).hexdigest()
hexdigest_original = self._render_with_graphviz(
dot_file, encoding="ascii"
)
self.assertEqual(hexdigest, hexdigest_original)
def test_multiple_graphs(self):
graph_data = "graph A { a->b };\ngraph B {c->d}"
graphs = pydot.graph_from_dot_data(graph_data)
n = len(graphs)
assert n == 2, n
names = [g.get_name() for g in graphs]
assert names == ["A", "B"], names
def _render_with_graphviz(self, filename, encoding):
with io.open(filename, "rt", encoding=encoding) as stdin:
stdout_data, stderr_data, process = pydot.call_graphviz(
program=TEST_PROGRAM,
arguments=["-Tjpe"],
working_dir=os.path.dirname(filename),
stdin=stdin,
)
assert process.returncode == 0, stderr_data
return sha256(stdout_data).hexdigest()
def _render_with_pydot(self, filename, encoding):
c = pydot.graph_from_dot_file(filename, encoding=encoding)
jpe_data = bytearray()
for g in c:
jpe_data.extend(
g.create(prog=TEST_PROGRAM, format="jpe", encoding=encoding)
)
return sha256(jpe_data).hexdigest()
def test_my_regression_tests(self):
path = os.path.join(test_dir, TESTS_DIR_1)
self._render_and_compare_dot_files(path)
def test_graphviz_regression_tests(self):
path = os.path.join(test_dir, TESTS_DIR_2)
self._render_and_compare_dot_files(path)
def _render_and_compare_dot_files(self, directory):
# files that confuse `chardet`
encodings = {"Latin1.dot": "latin-1"}
dot_files = [
fname for fname in os.listdir(directory) if fname.endswith(".dot")
]
for fname in dot_files:
fpath = os.path.join(directory, fname)
with open(fpath, "rb") as f:
s = f.read()
estimate = chardet.detect(s)
encoding = encodings.get(fname, estimate["encoding"])
os.sys.stdout.write("#")
os.sys.stdout.flush()
pydot_sha = self._render_with_pydot(fpath, encoding)
graphviz_sha = self._render_with_graphviz(fpath, encoding)
assert pydot_sha == graphviz_sha, (pydot_sha, graphviz_sha)
def test_numeric_node_id(self):
self._reset_graphs()
self.graph_directed.add_node(pydot.Node(1))
self.assertEqual(self.graph_directed.get_nodes()[0].get_name(), "1")
def test_quoted_node_id(self):
self._reset_graphs()
self.graph_directed.add_node(pydot.Node('"node"'))
self.assertEqual(
self.graph_directed.get_nodes()[0].get_name(), '"node"'
)
def test_quoted_node_id_to_string_no_attributes(self):
self._reset_graphs()
self.graph_directed.add_node(pydot.Node('"node"'))
self.assertEqual(
self.graph_directed.get_nodes()[0].to_string(), '"node";'
)
def test_keyword_node_id(self):
self._reset_graphs()
self.graph_directed.add_node(pydot.Node("node"))
self.assertEqual(self.graph_directed.get_nodes()[0].get_name(), "node")
def test_keyword_node_id_to_string_no_attributes(self):
self._reset_graphs()
self.graph_directed.add_node(pydot.Node("node"))
self.assertEqual(self.graph_directed.get_nodes()[0].to_string(), "")
def test_keyword_node_id_to_string_with_attributes(self):
self._reset_graphs()
self.graph_directed.add_node(pydot.Node("node", shape="box"))
self.assertEqual(
self.graph_directed.get_nodes()[0].to_string(), "node [shape=box];"
)
def test_names_of_a_thousand_nodes(self):
self._reset_graphs()
names = {"node_%05d" % i for i in range(10 ** 3)}
for name in names:
self.graph_directed.add_node(pydot.Node(name, label=name))
self.assertEqual(
{n.get_name() for n in self.graph_directed.get_nodes()}, names
)
def test_executable_not_found_exception(self):
graph = pydot.Dot("graphname", graph_type="digraph")
self.assertRaises(Exception, graph.create, prog="dothehe")
def test_graph_add_node_argument_type(self):
self._reset_graphs()
self.assertRaises(TypeError, self.graph_directed.add_node, 1)
self.assertRaises(TypeError, self.graph_directed.add_node, "a")
def test_graph_add_edge_argument_type(self):
self._reset_graphs()
self.assertRaises(TypeError, self.graph_directed.add_edge, 1)
self.assertRaises(TypeError, self.graph_directed.add_edge, "a")
def test_graph_add_subgraph_argument_type(self):
self._reset_graphs()
self.assertRaises(TypeError, self.graph_directed.add_subgraph, 1)
self.assertRaises(TypeError, self.graph_directed.add_subgraph, "a")
def test_quoting(self):
g = pydot.Dot()
g.add_node(pydot.Node("test", label=string.printable))
data = g.create(format="jpe")
self.assertEqual(len(data) > 0, True)
def test_dot_args(self):
g = pydot.Dot()
u = pydot.Node("a")
g.add_node(u)
g.write_svg("test.svg", prog=["twopi", "-Goverlap=scale"])
def test_edge_equality_basics_3_same_points_not_not_equal(self):
# Fail example: pydot 1.4.1 on Python 2.
g = pydot.Graph()
e1 = pydot.Edge("a", "b")
e2 = pydot.Edge("a", "b")
g.add_edge(e1)
g.add_edge(e2)
self.assertFalse(e1 != e2)
def test_edge_point_namestr(self):
self._reset_graphs()
self.graph_directed.add_edge(pydot.Edge("a", "b"))
self.assertEqual(
self.graph_directed.get_edges()[0].to_string(), "a -> b;"
)
def test_edge_point_object_node(self):
self._reset_graphs()
self.graph_directed.add_edge(
pydot.Edge(pydot.Node("a"), pydot.Node("b"))
)
self.assertEqual(
self.graph_directed.get_edges()[0].to_string(), "a -> b;"
)
def test_edge_point_object_subgraph(self):
self._reset_graphs()
self.graph_directed.add_edge(
pydot.Edge(pydot.Subgraph("a"), pydot.Subgraph("b"))
)
self.assertEqual(
self.graph_directed.get_edges()[0].to_string(), "a -> b;"
)
def test_edge_point_object_cluster(self):
self._reset_graphs()
self.graph_directed.add_edge(
pydot.Edge(pydot.Cluster("a"), pydot.Cluster("b"))
)
self.assertEqual(
self.graph_directed.get_edges()[0].to_string(),
"cluster_a -> cluster_b;",
)
def test_graph_from_adjacency_matrix(self):
g = pydot.graph_from_adjacency_matrix(
[[0, 1, 0], [1, 0, 0], [0, 1, 1]], directed=True
)
s = " ".join(g.to_string().split())
self.assertEqual(s, "digraph G { 1 -> 2; 2 -> 1; 3 -> 2; 3 -> 3; }")
g = pydot.graph_from_adjacency_matrix(
[[0, 1, 0], [1, 0, 0], [0, 0, 1]], directed=False
)
s = " ".join(g.to_string().split())
self.assertEqual(s, "graph G { 1 -- 2; 3 -- 3; }")
def test_graph_from_incidence_matrix(self):
g = pydot.graph_from_incidence_matrix(
[[-1, 1, 0], [1, -1, 0], [0, 1, -1]], directed=True
)
s = " ".join(g.to_string().split())
self.assertEqual(s, "digraph G { 1 -> 2; 2 -> 1; 3 -> 2; }")
g = pydot.graph_from_incidence_matrix(
[[1, 1, 0], [0, 1, 1]], directed=False
)
s = " ".join(g.to_string().split())
self.assertEqual(s, "graph G { 1 -- 2; 2 -- 3; }")
def test_version(self):
self.assertIsInstance(pydot.__version__, str)
def parse_args():
"""Parse arguments. Deprecated since pydot 2.0."""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("--no-check", action="store_true")
args, unknown = parser.parse_known_args()
if args.no_check:
print(
"WARNING: The --no-check option became redundant with pydot 2.0 "
"and will be removed in a future major release of pydot.\n",
file=sys.stderr,
)
# avoid confusing `unittest`
sys.argv = [sys.argv[0]] + unknown
if __name__ == "__main__":
parse_args()
test_dir = os.path.dirname(sys.argv[0])
print("The tests are using `pydot` from: {pd}".format(pd=pydot))
unittest.main(verbosity=2)
| 14,517
| 33.484561
| 79
|
py
|
OPM
|
OPM-master/napari_opm_pymmcoreplus.py
|
'''
Initial work on napari interface to OPM using pymmcore-plus, magic-gui, and magic-class
Relevant hardware:
NI DAQ
Hamamatsu Fusion-BT
Coherent OBIS LaserBoxxx
This will work with any setup that can setup the camera as master, lasers can driven ON/OFF during camera readout time using digital input,
galvo can be moved during camera readout time, and DAQ can update digital/analog lines based on trigger from camera during camera readout time.
For different hardware, the specific calls will have to modified to get the hardware triggering working.
D. Shepherd - 12/2021
'''
from pymmcore_plus import RemoteMMCore
from magicclass import magicclass, set_design
from magicgui import magicgui
import napari
from pathlib import Path
import numpy as np
import PyDAQmx as daq
import ctypes as ct
from reconstruction.image_post_processing import deskew
from napari.qt.threading import thread_worker
import time
# OPM control UI element
@magicclass(labels=False)
@set_design(text="ASU Snouty-OPM control")
class OpmControl:
# initialize
def __init__(self):
self.active_channel = "Off"
self.channel_powers = np.zeros(5,dtype=np.int8)
self.channel_states=[False,False,False,False,False]
self.exposure_ms = 10.0 # unit: ms
self.scan_axis_step_um = 0.4 # unit: um
self.scan_axis_calibration = 0.043 # unit: V / um
self.galvo_neutral_volt = -.27 # unit: V
self.scan_axis_range_um = 50.0 # unit: um
self.camera_pixel_size_um = .115 # unit: um
self.opm_tilt = 30 # unit: degrees
self.ROI_uleft_corner_x = int(200) # unit: camera pixels
self.ROI_uleft_corner_y = int(896) # unit: camera pixels
self.ROI_width_x = int(1900) # unit: camera pixels
self.ROI_width_y = int(512) # unit: camera pixels
self.path_to_mm_config = Path('C:/Program Files/Micro-Manager-2.0gamma/temp_HamDCAM.cfg')
self.channel_labels = ["405", "488", "561", "635", "730"]
self.do_ind = [0, 1, 2, 3, 4] # digital output line corresponding to each channel
self.debug=False
self.powers_changed = True
self.channels_changed = True
self.ROI_changed = True
self.exposure_changed = True
self.footprint_changed = True
self.galvo_step_changed = True
self.DAQ_running = False
# start pymmcore-plus
def __post_init__(self):
self.mmc = RemoteMMCore()
self.mmc.loadSystemConfiguration(str(self.path_to_mm_config))
# set 2D acquistion thread worker
def _set_worker_2d(self,worker_2d):
self.worker_2d = worker_2d
self.worker_2d_started = False
self.worker_2d_running = False
# set 3D acquistion thread worker
def _set_worker_3d(self,worker_3d):
self.worker_3d = worker_3d
self.worker_3d_started = False
self.worker_3d_running = False
# set viewer
def _set_viewer(self,viewer):
self.viewer = viewer
# update viewer layers
def _update_layers(self,new_image):
channel_names = ['405nm','488nm','561nm','635nm','730nm']
colormaps = ['bop purple','bop blue','bop orange','red','grey']
for c in self.active_channel_indices:
channel_name = channel_names[c]
try:
self.viewer.layers[channel_name].data = new_image[c,:]
except:
self.viewer.add_image(new_image[c,:], name=channel_name, blending='additive', colormap=colormaps[c],contrast_limits=[100,.9*np.max(new_image[c,:])])
@thread_worker
def _acquire_2d_data(self):
while True:
# parse which channels are active
self.active_channel_indices = [ind for ind, st in zip(self.do_ind, self.channel_states) if st]
self.n_active_channels = len(self.active_channel_indices)
if self.n_active_channels == 0:
yield None
if self.debug:
print("%d active channels: " % self.n_active_channels, end="")
for ind in self.active_channel_indices:
print("%s " % self.channel_labels[ind], end="")
print("")
if self.powers_changed:
self._set_mmc_laser_power()
self.powers_changed = False
with RemoteMMCore() as mmc_2d:
if self.ROI_changed:
current_ROI = mmc_2d.getROI()
if not(current_ROI[2]==2304) or not(current_ROI[3]==2304):
mmc_2d.clearROI()
mmc_2d.waitForDevice('OrcaFusionBT')
mmc_2d.setROI(int(self.ROI_uleft_corner_x),int(self.ROI_uleft_corner_y),int(self.ROI_width_x),int(self.ROI_width_y))
mmc_2d.waitForDevice('OrcaFusionBT')
self.ROI_changed = False
# set exposure time
if self.exposure_changed:
mmc_2d.setExposure(self.exposure_ms)
self.exposure_changed = False
# get raw image size
raw_image_2d = np.zeros([self.do_ind[-1],self.ROI_width_y,self.ROI_width_x],dtype=np.uint16)
for c in self.active_channel_indices:
mmc_2d.snapImage()
raw_image_2d[c,:] = mmc_2d.getImage()
time.sleep(.01)
yield raw_image_2d
@thread_worker
def _acquire_3d_data(self):
while True:
with RemoteMMCore() as mmc_3d:
#------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------Begin setup of scan parameters--------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
# parse which channels are active
self.active_channel_indices = [ind for ind, st in zip(self.do_ind, self.channel_states) if st]
self.n_active_channels = len(self.active_channel_indices)
if self.debug:
print("%d active channels: " % self.n_active_channels, end="")
for ind in self.active_channel_indices:
print("%s " % self.channel_labels[ind], end="")
print("")
n_timepoints = 1
if self.ROI_changed:
current_ROI = mmc_3d.getROI()
if not(current_ROI[2]==2304) or not(current_ROI[3]==2304):
mmc_3d.clearROI()
mmc_3d.waitForDevice('OrcaFusionBT')
mmc_3d.setROI(int(self.ROI_uleft_corner_x),int(self.ROI_uleft_corner_y),int(self.ROI_width_x),int(self.ROI_width_y))
mmc_3d.waitForDevice('OrcaFusionBT')
self.ROI_changed = False
# set exposure time
if self.exposure_changed:
mmc_3d.setExposure(self.exposure_ms)
self.exposure_changed = False
if self.powers_changed:
self._set_mmc_laser_power()
self.powers_changed = False
if self.footprint_changed:
# determine sweep footprint
self.min_volt = -(self.scan_axis_range_um * self.scan_axis_calibration / 2.) + self.galvo_neutral_volt # unit: volts
self.scan_axis_step_volts = self.scan_axis_step_um * self.scan_axis_calibration # unit: V
self.scan_axis_range_volts = self.scan_axis_range_um * self.scan_axis_calibration # unit: V
self.scan_steps = np.rint(self.scan_axis_range_volts / self.scan_axis_step_volts).astype(np.int16) # galvo steps
if self.channels_changed or self.footprint_changed or not(self.DAQ_running):
if self.DAQ_running:
self._stop_DAQ()
self._create_DAQ_arrays()
self._start_DAQ()
self.raw_image_stack = np.zeros([self.do_ind[-1],self.scan_steps,self.ROI_width_y,self.ROI_width_x]).astype(np.uint16)
# change step size from physical space (nm) to camera space (pixels)
pixel_step = self.scan_axis_step_um/self.camera_pixel_size_um # (pixels)
# calculate the number of pixels scanned during stage scan
scan_end = self.scan_steps * pixel_step # (pixels)
# calculate properties for final image
final_ny = np.int64(np.ceil(scan_end+self.ROI_width_y*np.cos(self.opm_tilt*np.pi/180))) # (pixels)
final_nz = np.int64(np.ceil(self.ROI_width_y*np.sin(self.opm_tilt*np.pi/180))) # (pixels)
final_nx = np.int64(self.ROI_width_x) # (pixels)
deskewed_image = np.zeros([self.do_ind[-1],final_nz,final_ny,final_nx]).astype(np.uint16)
self.channels_changed = False
self.footprint_changed = False
if self.debug:
# output experiment info
print("Scan axis range: %.1f um = %0.3fV, Scan axis step: %.1f nm = %0.3fV , Number of galvo positions: %d" %
(self.scan_axis_range_um, self.scan_axis_range_volts, self.scan_axis_step_um * 1000, self.scan_axis_step_volts, self.scan_steps))
print('Galvo neutral (Volt): ' + str(self.galvo_neutral_volt)+', Min voltage (volt): '+str(self.min_volt))
print('Time points: ' + str(n_timepoints))
#------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------End setup of scan parameters----------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------Start acquisition and deskew----------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
# run hardware triggered acquisition
mmc_3d.startSequenceAcquisition(int(self.n_active_channels*self.scan_steps),0,True)
for z in range(self.scan_steps):
for c in self.active_channel_indices:
while mmc_3d.getRemainingImageCount()==0:
pass
self.raw_image_stack[c,z,:] = mmc_3d.popNextImage()
mmc_3d.stopSequenceAcquisition()
# deskew parameters
deskew_parameters = np.empty([3])
deskew_parameters[0] = self.opm_tilt # (degrees)
deskew_parameters[1] = self.scan_axis_step_um*100 # (nm)
deskew_parameters[2] = self.camera_pixel_size_um*100 # (nm)
for c in self.active_channel_indices:
deskewed_image[c,:] = deskew(np.flipud(self.raw_image_stack[c,:]),*deskew_parameters).astype(np.uint16)
yield deskewed_image
#------------------------------------------------------------------------------------------------------------------------------------
#-----------------------------------------------End acquisition and deskew-----------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
# laser to hardware control
def _lasers_to_hardware(self):
with RemoteMMCore() as mmc_lasers_hardware:
# turn all lasers off
mmc_lasers_hardware.setConfig('Laser','Off')
mmc_lasers_hardware.waitForConfig('Laser','Off')
# set all laser to external triggering
mmc_lasers_hardware.setConfig('Modulation-405','External-Digital')
mmc_lasers_hardware.waitForConfig('Modulation-405','External-Digital')
mmc_lasers_hardware.setConfig('Modulation-488','External-Digital')
mmc_lasers_hardware.waitForConfig('Modulation-488','External-Digital')
mmc_lasers_hardware.setConfig('Modulation-561','External-Digital')
mmc_lasers_hardware.waitForConfig('Modulation-561','External-Digital')
mmc_lasers_hardware.setConfig('Modulation-637','External-Digital')
mmc_lasers_hardware.waitForConfig('Modulation-637','External-Digital')
mmc_lasers_hardware.setConfig('Modulation-730','External-Digital')
mmc_lasers_hardware.waitForConfig('Modulation-730','External-Digital')
# turn all lasers on
mmc_lasers_hardware.setConfig('Laser','AllOn')
mmc_lasers_hardware.waitForConfig('Laser','AllOn')
# lasers to software control
def _lasers_to_software(self):
with RemoteMMCore() as mmc_lasers_software:
# turn all lasers off
mmc_lasers_software.setConfig('Laser','Off')
mmc_lasers_software.waitForConfig('Laser','Off')
# set all lasers back to software control
mmc_lasers_software.setConfig('Modulation-405','CW (constant power)')
mmc_lasers_software.waitForConfig('Modulation-405','CW (constant power)')
mmc_lasers_software.setConfig('Modulation-488','CW (constant power)')
mmc_lasers_software.waitForConfig('Modulation-488','CW (constant power)')
mmc_lasers_software.setConfig('Modulation-561','CW (constant power)')
mmc_lasers_software.waitForConfig('Modulation-561','CW (constant power)')
mmc_lasers_software.setConfig('Modulation-637','CW (constant power)')
mmc_lasers_software.waitForConfig('Modulation-637','CW (constant power)')
mmc_lasers_software.setConfig('Modulation-730','CW (constant power)')
mmc_lasers_software.waitForConfig('Modulation-730','CW (constant power)')
# reset galvo controller back to neutral voltage
def _reset_galvo(self):
# put the galvo back to neutral
# first, set the galvo to the initial point if it is not already
taskAO_last = daq.Task()
taskAO_last.CreateAOVoltageChan("/Dev1/ao0","",-6.0,6.0,daq.DAQmx_Val_Volts,None)
taskAO_last.WriteAnalogScalarF64(True, -1, self.galvo_neutral_volt, None)
taskAO_last.StopTask()
taskAO_last.ClearTask()
def _set_mmc_laser_power(self):
with RemoteMMCore() as mmc_laser_power:
mmc_laser_power.setProperty(r'Coherent-Scientific Remote',r'Laser 405-100C - PowerSetpoint (%)',float(self.channel_powers[0]))
mmc_laser_power.setProperty(r'Coherent-Scientific Remote',r'Laser 488-150C - PowerSetpoint (%)',float(self.channel_powers[1]))
mmc_laser_power.setProperty(r'Coherent-Scientific Remote',r'Laser OBIS LS 561-150 - PowerSetpoint (%)',float(self.channel_powers[2]))
mmc_laser_power.setProperty(r'Coherent-Scientific Remote',r'Laser 637-140C - PowerSetpoint (%)',float(self.channel_powers[3]))
mmc_laser_power.setProperty(r'Coherent-Scientific Remote',r'Laser 730-30C - PowerSetpoint (%)',float(self.channel_powers[4]))
def _create_DAQ_arrays(self):
# setup DAQ
nvoltage_steps = self.scan_steps
# 2 time steps per frame, except for first frame plus one final frame to reset voltage
#samples_per_ch = (nvoltage_steps * 2 - 1) + 1
self.samples_per_ch = (nvoltage_steps * 2 * self.n_active_channels - 1) + 1
self.DAQ_sample_rate_Hz = 10000
#retriggerable = True
num_DI_channels = 8
# Generate values for DO
self.dataDO = np.zeros((self.samples_per_ch, num_DI_channels), dtype=np.uint8)
for ii, ind in enumerate(self.active_channel_indices):
self.dataDO[2*ii::2*self.n_active_channels, ind] = 1
self.dataDO[-1, :] = 0
# generate voltage steps
max_volt = self.min_volt + self.scan_axis_range_volts # 2
voltage_values = np.linspace(self.min_volt, max_volt, nvoltage_steps)
# Generate values for AO
waveform = np.zeros(self.samples_per_ch)
# one less voltage value for first frame
waveform[0:2*self.n_active_channels - 1] = voltage_values[0]
if len(voltage_values) > 1:
# (2 * # active channels) voltage values for all other frames
waveform[2*self.n_active_channels - 1:-1] = np.kron(voltage_values[1:], np.ones(2 * self.n_active_channels))
# set back to initial value at end
waveform[-1] = voltage_values[0]
self.waveform = waveform
def _start_DAQ(self):
try:
# ----- DIGITAL input -------
self.taskDI = daq.Task()
self.taskDI.CreateDIChan("/Dev1/PFI0", "", daq.DAQmx_Val_ChanForAllLines)
## Configure change detectin timing (from wave generator)
self.taskDI.CfgInputBuffer(0) # must be enforced for change-detection timing, i.e no buffer
self.taskDI.CfgChangeDetectionTiming("/Dev1/PFI0", "/Dev1/PFI0", daq.DAQmx_Val_ContSamps, 0)
## Set where the starting trigger
self.taskDI.CfgDigEdgeStartTrig("/Dev1/PFI0", daq.DAQmx_Val_Rising)
## Export DI signal to unused PFI pins, for clock and start
self.taskDI.ExportSignal(daq.DAQmx_Val_ChangeDetectionEvent, "/Dev1/PFI2")
self.taskDI.ExportSignal(daq.DAQmx_Val_StartTrigger, "/Dev1/PFI1")
# ----- DIGITAL output ------
self.taskDO = daq.Task()
# TO DO: Write each laser line separately!
self.taskDO.CreateDOChan("/Dev1/port0/line0:7", "", daq.DAQmx_Val_ChanForAllLines)
## Configure timing (from DI task)
self.taskDO.CfgSampClkTiming("/Dev1/PFI2", self.DAQ_sample_rate_Hz, daq.DAQmx_Val_Rising, daq.DAQmx_Val_ContSamps, self.samples_per_ch)
## Write the output waveform
samples_per_ch_ct_digital = ct.c_int32()
self.taskDO.WriteDigitalLines(self.samples_per_ch, False, 10.0, daq.DAQmx_Val_GroupByChannel, self.dataDO, ct.byref(samples_per_ch_ct_digital), None)
# ------- ANALOG output -----------
# first, set the galvo to the initial point if it is not already
self.taskAO_first = daq.Task()
self.taskAO_first.CreateAOVoltageChan("/Dev1/ao0", "", -6.0, 6.0, daq.DAQmx_Val_Volts, None)
self.taskAO_first.WriteAnalogScalarF64(True, -1, self.waveform[0], None)
self.taskAO_first.StopTask()
self.taskAO_first.ClearTask()
# now set up the task to ramp the galvo
self.taskAO = daq.Task()
self.taskAO.CreateAOVoltageChan("/Dev1/ao0", "", -6.0, 6.0, daq.DAQmx_Val_Volts, None)
## Configure timing (from DI task)
self.taskAO.CfgSampClkTiming("/Dev1/PFI2", self.DAQ_sample_rate_Hz, daq.DAQmx_Val_Rising, daq.DAQmx_Val_ContSamps, self.samples_per_ch)
## Write the output waveform
samples_per_ch_ct = ct.c_int32()
self.taskAO.WriteAnalogF64(self.samples_per_ch, False, 10.0, daq.DAQmx_Val_GroupByScanNumber, self.waveform, ct.byref(samples_per_ch_ct), None)
## ------ Start both tasks ----------
self.taskAO.StartTask()
self.taskDO.StartTask()
self.taskDI.StartTask()
self.DAQ_running = True
except daq.DAQError as err:
print("DAQmx Error %s"%err)
def _stop_DAQ(self):
# stop DAQ
try:
## Stop and clear both tasks
self.taskDI.StopTask()
self.taskDO.StopTask()
self.taskAO.StopTask()
self.taskDI.ClearTask()
self.taskAO.ClearTask()
self.taskDO.ClearTask()
self.DAQ_running = False
except daq.DAQError as err:
print("DAQmx Error %s"%err)
def _setup_camera(self):
with RemoteMMCore() as mmc_camera_setup:
# give camera time to change modes if necessary
mmc_camera_setup.setConfig('Camera-Setup','ScanMode3')
mmc_camera_setup.waitForConfig('Camera-Setup','ScanMode3')
# set camera to internal trigger
mmc_camera_setup.setConfig('Camera-TriggerSource','INTERNAL')
mmc_camera_setup.waitForConfig('Camera-TriggerSource','INTERNAL')
# set camera to internal trigger
# give camera time to change modes if necessary
mmc_camera_setup.setProperty('OrcaFusionBT',r'OUTPUT TRIGGER KIND[0]','EXPOSURE')
mmc_camera_setup.setProperty('OrcaFusionBT',r'OUTPUT TRIGGER KIND[1]','EXPOSURE')
mmc_camera_setup.setProperty('OrcaFusionBT',r'OUTPUT TRIGGER KIND[2]','EXPOSURE')
mmc_camera_setup.setProperty('OrcaFusionBT',r'OUTPUT TRIGGER POLARITY[0]','POSITIVE')
mmc_camera_setup.setProperty('OrcaFusionBT',r'OUTPUT TRIGGER POLARITY[1]','POSITIVE')
mmc_camera_setup.setProperty('OrcaFusionBT',r'OUTPUT TRIGGER POLARITY[2]','POSITIVE')
# startup instrument
def _startup(self):
self._set_mmc_laser_power()
self._lasers_to_hardware()
self._reset_galvo()
self._setup_camera()
# shutdown instrument
def _shutdown(self):
self._set_mmc_laser_power()
self._lasers_to_software()
if self.DAQ_running:
self._stop_DAQ()
self._reset_galvo()
# set exposure time
@magicgui(
auto_call=True,
exposure_ms={"widget_type": "FloatSpinBox", "min": 1, "max": 500,'label': 'Camera exposure (ms)'},
layout='horizontal'
)
def set_exposure(self, exposure_ms=10.0):
if not(exposure_ms == self.exposure_ms):
self.exposure_ms=exposure_ms
self.exposure_changed = True
else:
self.exposure_changed = False
# set camera crop
@magicgui(
auto_call=True,
uleft_corner_x={"widget_type": "SpinBox", "min": 0, "max": 2304,'label': 'ROI center (non-tilt)'},
uleft_corner_y={"widget_type": "SpinBox", "min": 0, "max": 2304,'label': 'ROI center (tilt)'},
width_x={"widget_type": "SpinBox", "min": 0, "max": 2304,'label': 'ROI width (non-tilt)'},
width_y={"widget_type": "SpinBox", "min": 0, "max": 2304,'label': 'ROI height (tilt)'},
layout='vertical'
)
def set_ROI(self, uleft_corner_x=200,uleft_corner_y=896,width_x=1800,width_y=512):
if not(int(uleft_corner_x)==self.ROI_uleft_corner_x) or not(int(uleft_corner_y)==self.ROI_uleft_corner_y) or not(int(width_x)==self.ROI_width_x) or not(int(width_y)==self.ROI_width_y):
self.ROI_uleft_corner_x=int(uleft_corner_x)
self.ROI_uleft_corner_y=int(uleft_corner_y)
self.ROI_width_x=int(width_x)
self.ROI_width_y=int(width_y)
self.ROI_changed = True
else:
self.ROI_changed = False
# set laser power(s)
@magicgui(
auto_call=True,
power_405={"widget_type": "FloatSpinBox", "min": 0, "max": 100, "label": '405nm power (%)'},
power_488={"widget_type": "FloatSpinBox", "min": 0, "max": 100, "label": '488nm power (%)'},
power_561={"widget_type": "FloatSpinBox", "min": 0, "max": 100, "label": '561nm power (%)'},
power_635={"widget_type": "FloatSpinBox", "min": 0, "max": 100, "label": '635nm power (%)'},
power_730={"widget_type": "FloatSpinBox", "min": 0, "max": 100, "label": '730nm power (%)'},
layout='vertical'
)
def set_laser_power(self, power_405=0.0, power_488=0.0, power_561=0.0, power_635=0.0, power_730=0.0,):
channel_powers = [power_405,power_488,power_561,power_635,power_730]
if not(channel_powers[:] == self.channel_powers[:]):
self.channel_powers=channel_powers
self.powers_changed = True
else:
self.powers_changed = False
# set active laser(s)
@magicgui(
auto_call=True,
active_channels = {"widget_type": "Select", "choices": ["Off","405","488","561","635","730"], "allow_multiple": True, "label": "Active channels"}
)
def set_active_channel(self, active_channels):
states = [False,False,False,False,False]
for channel in active_channels:
if channel == 'Off':
states = [False,False,False,False,False]
break
if channel == '405':
states[0]='True'
elif channel == '488':
states[1]='True'
elif channel == '561':
states[2]='True'
elif channel == '635':
states[3]='True'
elif channel == '730':
states[4]='True'
if not(states==self.channel_states):
self.channel_states=states
self.channels_changed = True
else:
self.channels_changed = False
# set lateral galvo footprint
@magicgui(
auto_call=True,
galvo_footprint_um={"widget_type": "FloatSpinBox", "min": 5, "max": 200, "label": 'Galvo sweep (um)'},
layout='horizontal'
)
def set_galvo_sweep(self, galvo_footprint_um=50.0):
if not(galvo_footprint_um==self.scan_axis_range_um):
self.galvo_footprint_um=galvo_footprint_um
self.footprint_changed = True
else:
self.footprint_changed = False
# set lateral galvo step size
@magicgui(
auto_call=True,
galvo_step={"widget_type": "FloatSpinBox", "min": 0, "max": 1, "label": 'Galvo step (um)'},
layout='horizontal'
)
def set_galvo_step(self, galvo_step=0.4):
if not(galvo_step==self.galvo_step):
self.galvo_step=galvo_step
self.galvo_step_changed = True
else:
self.galvo_step_changed = False
# control continuous 2D imaging (software triggering)
@magicgui(
auto_call=True,
live_mode_2D={"widget_type": "PushButton", "label": 'Start/Stop Live (2D)'},
layout='horizontal'
)
def live_mode_2D(self,live_mode_2D=False):
if not(self.worker_3d_running):
if self.worker_2d_running:
self.worker_2d.pause()
self.worker_2d_running = False
else:
if not(self.worker_2d_started):
self.worker_2d_started = True
self.worker_2d_running = True
self.worker_2d.start()
else:
self.worker_2d.resume()
self.worker_2d_running = True
# control continuous 3D volume (hardware triggering)
@magicgui(
auto_call=True,
live_mode_3D={"widget_type": "PushButton", "label": 'Start/Stop live (3D)'},
layout='horizontal'
)
def live_mode_3D(self,live_mode_3D):
if not(self.worker_2d_running):
if self.worker_3d_running:
self.worker_3d.pause()
self.worker_3d_running = False
self._stop_DAQ()
self._reset_galvo()
else:
if not(self.worker_3d_started):
self.worker_3d.start()
self.worker_3d_started = True
self.worker_3d_running = True
else:
self.worker_3d.resume()
self.worker_3d_running = True
def main():
# setup OPM GUI and Napari viewer
instrument_control_widget = OpmControl()
# these methods have to be private to not show using magic-class. Maybe a better solution is available?
instrument_control_widget._startup()
viewer = napari.Viewer()
# these methods have to be private to not show using magic-class. Maybe a better solution is available?
instrument_control_widget._set_viewer(viewer)
# setup 2D imaging thread worker
# these methods have to be private to not show using magic-class. Maybe a better solution is available?
worker_2d = instrument_control_widget._acquire_2d_data()
worker_2d.yielded.connect(instrument_control_widget._update_layers)
instrument_control_widget._set_worker_2d(worker_2d)
# setup 3D imaging thread worker
# these methods have to be private to not show using magic-class. Maybe a better solution is available?
worker_3d = instrument_control_widget._acquire_3d_data()
worker_3d.yielded.connect(instrument_control_widget._update_layers)
instrument_control_widget._set_worker_3d(worker_3d)
viewer.window.add_dock_widget(instrument_control_widget,name='ASU Snouty-OPM control')
# start Napari
napari.run()
# shutdown threads
worker_2d.quit()
worker_3d.quit()
# shutdown instrument
# these methods have to be private to not show using magic-class. Maybe a better solution is available?
instrument_control_widget._shutdown()
if __name__ == "__main__":
main()
| 30,110
| 44.970992
| 192
|
py
|
OPM
|
OPM-master/napari-control/opm_iterative_control.py
|
#!/usr/bin/python
'''
----------------------------------------------------------------------------------------
ASU OPM with iterative fluidics control via Napari, magic-class, and magic-gui
----------------------------------------------------------------------------------------
Peter Brown
Franky Djutanta
Douglas Shepherd
12/11/2021
douglas.shepherd@asu.edu
----------------------------------------------------------------------------------------
'''
from src.OPMIterative import OPMIterative
from src.OPMStageMonitor import OPMStageMonitor
from src.OPMStageScan import OPMStageScan
import napari
from pathlib import Path
from pymmcore_plus import RemoteMMCore
def main(path_to_mm_config=Path('C:/Program Files/Micro-Manager-2.0gamma/temp_HamDCAM.cfg')):
# launch pymmcore server
with RemoteMMCore() as mmc:
mmc.loadSystemConfiguration(str(path_to_mm_config))
# create Napari viewer
viewer = napari.Viewer(title='ASU OPM control -- iterative multiplexing')
# setup OPM widgets
iterative_control_widget = OPMIterative()
stage_display_widget = OPMStageMonitor()
instrument_control_widget = OPMStageScan(iterative_control_widget)
instrument_control_widget._set_viewer(viewer)
# startup instrument
instrument_control_widget._startup()
# create thread workers
# 2D
worker_2d = instrument_control_widget._acquire_2d_data()
worker_2d.yielded.connect(instrument_control_widget._update_layers)
instrument_control_widget._set_worker_2d(worker_2d)
# 3D
worker_3d = instrument_control_widget._acquire_3d_data()
worker_3d.yielded.connect(instrument_control_widget._update_layers)
instrument_control_widget._set_worker_3d(worker_3d)
# iterative 3D
instrument_control_widget._create_worker_iterative()
# instrument setup
worker_iterative_setup = iterative_control_widget._return_experiment_setup()
worker_iterative_setup.returned.connect(instrument_control_widget._set_iterative_configuration)
iterative_control_widget._set_worker_iterative_setup(worker_iterative_setup)
# add widgets to Napari viewer
viewer.window.add_dock_widget(iterative_control_widget,area='bottom',name='Iterative setup')
viewer.window.add_dock_widget(stage_display_widget,area='bottom',name='Stage monitor')
viewer.window.add_dock_widget(instrument_control_widget,area='right',name='Instrument setup')
# start Napari
napari.run(max_loop_level=2)
worker_2d.quit()
worker_3d.quit()
instrument_control_widget._shutdown()
if __name__ == "__main__":
main()
| 2,756
| 37.291667
| 111
|
py
|
OPM
|
OPM-master/napari-control/opm_timelapse_reconstruction.py
|
import napari
from src.OPMMirrorReconstruction import OPMMirrorReconstruction
def main():
# setup OPM GUI and Napari viewer
reconstruction_widget = OPMMirrorReconstruction()
viewer = napari.Viewer()
# these methods have to be private to not show using magic-class. Maybe a better solution is available?
reconstruction_widget._set_viewer(viewer)
# create processing worker
reconstruction_widget._create_processing_worker()
viewer.window.add_dock_widget(reconstruction_widget,name='ASU Snouty-OPM timelapse reconstruction')
# start Napari
napari.run()
if __name__ == "__main__":
main()
| 634
| 27.863636
| 107
|
py
|
OPM
|
OPM-master/napari-control/opm_timelapse_control.py
|
#!/usr/bin/python
'''
----------------------------------------------------------------------------------------
ASU OPM timelapse via pymmcore-plus, napari, magic-class, and magic-gui
----------------------------------------------------------------------------------------
Peter Brown
Franky Djutanta
Douglas Shepherd
12/11/2021
douglas.shepherd@asu.edu
----------------------------------------------------------------------------------------
'''
from src.OPMMirrorScan import OPMMirrorScan
import napari
from pymmcore_widgets import StageWidget
from pathlib import Path
import sys
# need to add MM path to load some dlls
#sys.path.append(r"C:\Program Files\Micro-Manager-2.0gamma")
# def main(path_to_mm_config=Path('C:/Program Files/Micro-Manager-2.0gamma/opm_new.cfg')):
def main(path_to_mm_config=Path(r'C:\Users\qi2lab\Documents\micro-manager_configs\OPM_20230320.cfg')):
instrument_control_widget = OPMMirrorScan()
# setup OPM GUI and Napari viewer
instrument_control_widget.mmc.loadSystemConfiguration(str(path_to_mm_config))
# these methods have to be private to not show using magic-class. Maybe a better solution is available?
instrument_control_widget._startup()
viewer = napari.Viewer(title='ASU Snouty-OPM timelapse acquisition control')
# these methods have to be private to not show using magic-class. Maybe a better solution is available?
instrument_control_widget._set_viewer(viewer)
# setup 2D imaging thread worker
# these methods have to be private to not show using magic-class. Maybe a better solution is available?
worker_2d = instrument_control_widget._acquire_2d_data()
worker_2d.yielded.connect(instrument_control_widget._update_layers)
instrument_control_widget._set_worker_2d(worker_2d)
# setup 3D imaging thread worker
# these methods have to be private to not show using magic-class. Maybe a better solution is available?
worker_3d = instrument_control_widget._acquire_3d_data()
worker_3d.yielded.connect(instrument_control_widget._update_layers)
instrument_control_widget._set_worker_3d(worker_3d)
instrument_control_widget._create_3d_t_worker()
viewer.window.add_dock_widget(instrument_control_widget,name='Instrument control')
stage_03 = StageWidget('MCL NanoDrive Z Stage')
viewer.window.add_dock_widget(stage_03,name='O3 Zstage')
stage_01 = StageWidget('ZStage:M:37')
viewer.window.add_dock_widget(stage_01,name='O1 Zstage')
# start Napari
napari.run()
# shutdown acquistion threads
worker_2d.quit()
worker_3d.quit()
# shutdown instrument
# these methods have to be private to not show using magic-class. Maybe a better solution is available?
instrument_control_widget._shutdown()
if __name__ == "__main__":
main()
| 2,809
| 39.142857
| 107
|
py
|
OPM
|
OPM-master/napari-control/src/OPMMirrorReconstruction.py
|
'''
Napari interface to process OPM timelapse data
TO DO: - Change to OME-Zarr output
- Add OME-tiff resave option
- Add ability to load old data
- Add option for number of iterations & TV setting for clij2-fft
Last update: 12/2022; Update to use clij2-fft, remove flatfield (due to Powell lens), remove dexp dependence
D. Shepherd - 12/2021
'''
from magicclass import magicclass, MagicTemplate
from magicgui import magicgui
from magicgui.tqdm import trange
from pathlib import Path
import numpy as np
from src.utils.image_post_processing import deskew
from napari.qt.threading import thread_worker
from napari.utils import progress
import zarr
import dask.array as da
from src.utils.data_io import read_metadata, return_data_from_zarr_to_numpy, return_opm_psf
from skimage.measure import block_reduce
from itertools import compress
import gc
from numcodecs import Blosc
try:
import cupy as cp
CP_AVAILABLE = True
except:
CP_AVAILABLE = False
if CP_AVAILABLE:
try:
from cucim.skimage.exposure import match_histograms
CUCIM_AVAILABLE = True
except:
from skimage.exposure import match_histograms
CUCIM_AVAILABLE = False
else:
from skimage.exposure import match_histograms
CUCIM_AVAILABLE = False
# OPM control UI element
@magicclass(labels=False)
class OPMMirrorReconstruction(MagicTemplate):
def __init__(self):
self.decon = False
self.match_histograms = False
self.debug = False
self.channel_idxs=[0,1,2,3,4]
self.active_channels=[False,False,False,False,False]
# set viewer
def _set_viewer(self,viewer):
self.viewer = viewer
@thread_worker
def _process_data(self):
# create parameter array from scan parameters saved by acquisition code
df_metadata = read_metadata(self.data_path / Path('scan_metadata.csv'))
root_name = df_metadata['root_name']
scan_type = df_metadata['scan_type']
theta = df_metadata['theta']
scan_step = df_metadata['scan_step']
pixel_size = df_metadata['pixel_size']
num_t = df_metadata['num_t']
num_y = df_metadata['num_y']
num_z = df_metadata['num_z']
num_ch = df_metadata['num_ch']
num_images = df_metadata['scan_axis_positions']
y_pixels = df_metadata['y_pixels']
x_pixels = df_metadata['x_pixels']
chan_405_active = df_metadata['405_active']
chan_488_active = df_metadata['488_active']
chan_561_active = df_metadata['561_active']
chan_635_active = df_metadata['635_active']
chan_730_active = df_metadata['730_active']
active_channels = [chan_405_active,chan_488_active,chan_561_active,chan_635_active,chan_730_active]
channel_idxs = [0,1,2,3,4]
channels_in_data = list(compress(channel_idxs, active_channels))
n_active_channels = len(channels_in_data)
self.active_channels = active_channels
self.channels_in_data = channels_in_data
# calculate pixel sizes of deskewed image in microns
deskewed_x_pixel = pixel_size
deskewed_y_pixel = pixel_size
deskewed_z_pixel = pixel_size
if self.debug:
print('Deskewed pixel sizes before downsampling (um). x='+str(deskewed_x_pixel)+', y='+str(deskewed_y_pixel)+', z='+str(deskewed_z_pixel)+'.')
# deskew parameters
deskew_parameters = np.empty([3])
deskew_parameters[0] = theta # (degrees)
deskew_parameters[1] = scan_step*100 # (nm)
deskew_parameters[2] = pixel_size*100 # (nm)
# amount of down sampling in z
z_down_sample = 1
# load dataset
dataset_zarr = zarr.open(self.data_path / Path(root_name+'.zarr'),mode='r')
# create output directory
if self.decon == 0:
output_dir_path = self.data_path / 'deskew_output'
elif self.decon == 1:
output_dir_path = self.data_path / 'deskew_decon_output'
output_dir_path.mkdir(parents=True, exist_ok=True)
# create name for zarr directory
zarr_output_path = output_dir_path / Path('OPM_processed.zarr')
# calculate size of one volume
# change step size from physical space (nm) to camera space (pixels)
pixel_step = scan_step/pixel_size # (pixels)
# calculate the number of pixels scanned during stage scan
scan_end = num_images * pixel_step # (pixels)
# calculate properties for final image
ny = np.int64(np.ceil(scan_end+y_pixels*np.cos(theta*np.pi/180))) # (pixels)
nz = np.int64(np.ceil(y_pixels*np.sin(theta*np.pi/180))) # (pixels)
nx = np.int64(x_pixels) # (pixels)
# create and open zarr file
compressor = Blosc(cname='zstd', clevel=3, shuffle=Blosc.BITSHUFFLE)
opm_data = zarr.open(str(zarr_output_path),
mode="w",
shape=(num_t, num_ch, nz, ny, nx),
chunks=(1, 1, 1, int(ny), int(nx)),
dimension_separator='/',
compressor = compressor,
dtype=np.uint16)
# if decon is requested, try to import microvolution wrapper or clij2-fft library
if self.decon:
#from src.utils.opm_psf import generate_skewed_psf
from src.utils.image_post_processing import lr_deconvolution
skewed_psf = []
for ch_idx in np.flatnonzero(active_channels):
skewed_psf.append(return_opm_psf(ch_idx))
if self.match_histograms:
reference_images = np.zeros((num_ch, nz, ny, nx), dtype=np.uint16)
for t_idx in trange(num_t,desc='t',position=0):
for ch_idx in trange(n_active_channels,desc='c',position=1, leave=False):
# pull data stack into memory
if self.debug:
print('Process timepoint '+str(t_idx)+'; channel '+str(ch_idx) +'.')
raw_data = return_data_from_zarr_to_numpy(dataset_zarr, t_idx, ch_idx, num_images, y_pixels,x_pixels)
# run deconvolution on deskewed image
if self.decon:
if self.debug:
print('Deconvolve.')
decon = lr_deconvolution(raw_data,skewed_psf[ch_idx],iterations=100)
else:
decon = raw_data
pass
del raw_data
# deskew
if self.debug:
print('Deskew.')
deskewed = deskew(np.flipud(decon),*deskew_parameters)
del decon
# downsample in z due to oversampling when going from OPM to coverslip geometry
if z_down_sample==1:
deskewed_downsample = deskewed
else:
if self.debug:
print('Downsample.')
deskewed_downsample = block_reduce(deskewed, block_size=(z_down_sample,1,1), func=np.mean)
del deskewed
if self.match_histograms:
if self.debug:
print('Match histogram.')
if t_idx == 0:
reference_images[ch_idx,:] = deskewed_downsample
deskewed_matched = deskewed_downsample
else:
if CUCIM_AVAILABLE:
reference_image_cp = cp.asarray(reference_images[ch_idx,:],dtype=cp.uint16)
deskewed_downsample_cp = cp.asarray(deskewed_downsample,dtype=cp.uint16)
deskewed_matched = cp.asnumpy(match_histograms(deskewed_downsample_cp,reference_image_cp)).astype(np.uint16)
del reference_image_cp, deskewed_downsample_cp
gc.collect()
cp.clear_memo()
cp._default_memory_pool.free_all_blocks()
else:
deskewed_matched = match_histograms(reference_images[ch_idx,:],deskewed_downsample)
else:
deskewed_matched = deskewed_downsample
del deskewed_downsample
if self.debug:
print('Write data into Zarr container')
opm_data[t_idx, ch_idx, :, :, :] = deskewed_matched
# free up memory
del deskewed_matched
gc.collect()
# exit
self.dataset_zarr = zarr_output_path
self.scale = [1,deskewed_z_pixel,deskewed_y_pixel,deskewed_x_pixel]
def _create_processing_worker(self):
worker_processing = self._process_data()
self._set_worker_processing(worker_processing)
# set 3D timelapse acquistion thread worker
def _set_worker_processing(self,worker_processing):
self.worker_processing = worker_processing
# update viewer
def _update_viewer(self,display_data):
# clean up viewer
self.viewer.layers.clear()
# channel names and colormaps to match control software
channel_names = ['405nm','488nm','561nm','635nm','730nm']
colormaps = ['bop purple','bop blue','bop orange','red','grey']
active_channel_names=[]
active_colormaps=[]
dataset = da.from_zarr(zarr.open(self.dataset_zarr,mode='r'))
# iterate through active channels and populate viewer
for channel in self.channels_in_data:
active_channel_names.append(channel_names[channel])
active_colormaps.append(colormaps[channel])
self.viewer.add_image(
dataset,
channel_axis=1,
name=active_channel_names,
scale = self.scale,
blending='additive',
colormap=active_colormaps)
self.viewer.scale_bar.visible = True
self.viewer.scale_bar.unit = 'um'
# set deconvoluton option
@magicgui(
auto_call=True,
use_decon = {"widget_type": "CheckBox", "label": "Deconvolution"},
layout="horizontal"
)
def set_deconvolution_option(self,use_decon = False):
self.decon = use_decon
# set histogram matching option
@magicgui(
auto_call=True,
use_match_histograms = {"widget_type": "CheckBox", "label": "Match histograms"},
layout="horizontal"
)
def set_histogram_option(self,use_match_histograms = False):
self.match_histograms = use_match_histograms
# set path to dataset for procesing
@magicgui(
auto_call=False,
data_path={"widget_type": "FileEdit","mode": "d", "label": 'Data to process:'},
layout='vertical',
call_button="Set"
)
def run_data_processing(self, data_path='d:/'):
self.data_path = data_path
df_metadata = read_metadata(self.data_path / Path('scan_metadata.csv'))
self.time_points = df_metadata['num_t']
# control data processing
@magicgui(
auto_call=True,
start_processing={"widget_type": "PushButton", "label": 'Run reconstruction'},
layout='horizontal'
)
def run_processing(self,start_processing):
if not(self.data_path is None):
self.worker_processing.start()
self.worker_processing.returned.connect(self._create_processing_worker)
self.worker_processing.returned.connect(self._update_viewer)
'''
# load already processed dataset
@magicgui(
auto_call=False,
zarr_path={"widget_type": "FileEdit","mode": "d", "label": 'Processed data to load:'},
layout='vertical',
call_button="Set"
)
def load_processed_data(self, zarr_path='d:/'):
self.zarr_path = zarr_path
#self._load_existing_zarr()
'''
| 12,061
| 37.292063
| 154
|
py
|
OPM
|
OPM-master/napari-control/src/OPM_main.py
|
#!/usr/bin/python
from magicgui import magicgui
from magicclass import magicclass, set_design
from pathlib import Path
@magicclass(labels=False)
@set_design(text="Stage monitor")
class OPMmain():
def __init__(self):
self.path_to_fluidics_flush_program = Path('C:/Users/qi2lab/Documents/GitHub/common_fluidics_programs/wash_fluidics_wells1-9.csv')
self.path_to_mm_config = Path('C:/Program Files/Micro-Manager-2.0gamma/temp_HamDCAM.cfg')
self.pump_COM_port = 'COM5'
self.valve_COM_port = 'COM6'
self.pump_parameters = {'pump_com_port': self.pump_COM_port,
'pump_ID': 30,
'verbose': True,
'simulate_pump': False,
'serial_verbose': False,
'flip_flow_direction': False}
@magicgui(
auto_call=False,
opm_mode = {
"widget_type": "Select",
"choices": [
"Flush fluidics",
"Run iterative experiment",
"Run timelapse experiment",
"Reconstruct iterative experiment",
"Reconstruct timelapse experiment"],
"allow_multiple": False,
"label": "OPM mode"},
call_button ='Select mode'
)
def select_mode(self,opm_mode):
if opm_mode[0]=="Flush fluidics":
# import fluidics libraries
from src.hardware.APump import APump
from src.hardware.HamiltonMVP import HamiltonMVP
import src.utils.data_io as data_io
from src.utils.fluidics_control import run_fluidic_program
# connect to pump
pump_controller = APump(self.pump_parameters)
# set pump to remote control
pump_controller.enableRemoteControl(True)
# connect to valves
valve_controller = HamiltonMVP(com_port=self.valve_COM_port)
# initialize valves
valve_controller.autoAddress()
# load fluidics flush program
df_fluidics = data_io.read_fluidics_program(self.path_to_fluidics_flush_program)
# run fluidics flush
for r_idx in range(int(df_fluidics['round'].max())):
success_fluidics = run_fluidic_program(r_idx,df_fluidics,valve_controller,pump_controller)
if not(success_fluidics):
raise Exception('Error in fluidics.')
elif opm_mode[0]=="Run iterative experiment":
import opm_iterative_control
opm_iterative_control.main(self.path_to_mm_config)
elif opm_mode[0]=="Run timelapse experiment":
self.close()
import opm_timelapse_control
opm_timelapse_control.main(self.path_to_mm_config)
elif opm_mode[0]=="Reconstruct iterative experiment":
#import
#opm_iterative_reconstruct()
pass
elif opm_mode[0]=="Reconstruct timelapse experiment":
import opm_timelapse_reconstruction
opm_timelapse_reconstruction.main()
def main():
opmmain = OPMmain()
opmmain.show(run=True)
if __name__ == "__main__":
main()
| 3,264
| 37.411765
| 138
|
py
|
OPM
|
OPM-master/napari-control/src/OPMMirrorScan.py
|
from pymmcore_plus import CMMCorePlus
from magicclass import magicclass, MagicTemplate
from magicgui import magicgui
from magicgui.tqdm import trange
from napari.qt.threading import thread_worker
#from superqt.utils import ensure_main_thread
from pathlib import Path
import numpy as np
import time
import zarr
from src.hardware.OPMNIDAQ import OPMNIDAQ
from src.hardware.PicardShutter import PicardShutter
from src.utils.autofocus_remote_unit import manage_O3_focus
from src.utils.data_io import write_metadata
from src.utils.image_post_processing import deskew
from datetime import datetime
# OPM control UI element
@magicclass(labels=False)
class OPMMirrorScan(MagicTemplate):
# initialize
def __init__(self):
# OPM parameters
self.active_channel = "Off"
self.channel_powers = np.zeros(5,dtype=np.int8)
self.channel_states=[False,False,False,False,False]
self.exposure_ms = 10.0 # unit: ms
self.scan_axis_step_um = 0.4 # unit: um
self.scan_axis_calibration = 0.03925 # unit: V / um
self.galvo_neutral_volt = 0. # unit: V
self.scan_mirror_footprint_um = 50.0 # unit: um
self.camera_pixel_size_um = .115 # unit: um
self.opm_tilt = 30 # unit: degrees
# camera parameters
self.camera_name = 'OrcaFusionBT' # camera name in MM config
self.ROI_uleft_corner_x = int(168) # unit: camera pixels
self.ROI_uleft_corner_y = int(928) # unit: camera pixels
self.ROI_width_x = int(1900) # unit: camera pixels
self.ROI_width_y = int(512) # unit: camera pixels
# O3 piezo stage name
self.O3_stage_name='MCL NanoDrive Z Stage'
# shutter ID
self.shutter_id = 712
# default save path
self.save_path = Path('D:/')
self.channel_labels = ["405", "488", "561", "635", "730"]
self.do_ind = [0, 1, 2, 3, 4] # digital output line corresponding to each channel
self.debug=False
# flags for instrument setup
self.powers_changed = True
self.channels_changed = True
self.ROI_changed = True
self.exposure_changed = True
self.footprint_changed = True
self.DAQ_running = False
self.save_path_setup = False
self.mmc = CMMCorePlus.instance()
# set 2D acquistion thread worker
def _set_worker_2d(self,worker_2d):
self.worker_2d = worker_2d
self.worker_2d_started = False
self.worker_2d_running = False
# set 3D acquistion thread worker
def _set_worker_3d(self,worker_3d):
self.worker_3d = worker_3d
self.worker_3d_started = False
self.worker_3d_running = False
def _create_3d_t_worker(self):
worker_3d_t = self._acquire_3d_t_data()
self._set_worker_3d_t(worker_3d_t)
# set 3D timelapse acquistion thread worker
def _set_worker_3d_t(self,worker_3d_t):
self.worker_3d_t = worker_3d_t
self.worker_3d_t_running = False
# set viewer
def _set_viewer(self,viewer):
self.viewer = viewer
# create and save metadata
def _save_metadata(self):
scan_param_data = [{'root_name': str("OPM_data"),
'scan_type': 'galvo',
'theta': self.opm_tilt,
'exposure_ms': self.exposure_ms,
'scan_step': self.scan_axis_step_um,
'pixel_size': self.camera_pixel_size_um,
'galvo_scan_range_um': self.scan_mirror_footprint_um,
'galvo_volts_per_um': self.scan_axis_calibration,
'num_t': int(self.n_timepoints),
'time_delay': float(self.wait_time),
'num_y': 1,
'num_z': 1,
'num_ch': int(self.n_active_channels),
'scan_axis_positions': int(self.scan_steps),
'y_pixels': self.ROI_width_y,
'x_pixels': self.ROI_width_x,
'405_active': self.channel_states[0],
'488_active': self.channel_states[1],
'561_active': self.channel_states[2],
'635_active': self.channel_states[3],
'730_active': self.channel_states[4],
'405_power': self.channel_powers[0],
'488_power': self.channel_powers[1],
'561_power': self.channel_powers[2],
'635_power': self.channel_powers[3],
'730_power': self.channel_powers[4],
}]
write_metadata(scan_param_data[0], self.output_dir_path / Path('scan_metadata.csv'))
# update viewer layers
def _update_layers(self,values):
current_channel = values[0]
new_image = values[1]
channel_names = ['405nm','488nm','561nm','635nm','730nm']
colormaps = ['bop purple','bop blue','bop orange','red','grey']
channel_name = channel_names[current_channel]
colormap = colormaps[current_channel]
try:
self.viewer.layers[channel_name].data = new_image
except:
self.viewer.add_image(new_image, name=channel_name, blending='additive', colormap=colormap,contrast_limits=[110,.9*np.max(new_image)])
@thread_worker
def _acquire_2d_data(self):
while True:
# parse which channels are active
active_channel_indices = [ind for ind, st in zip(self.do_ind, self.channel_states) if st]
n_active_channels = len(active_channel_indices)
if n_active_channels == 0:
yield None
if self.debug:
print("%d active channels: " % n_active_channels, end="")
for ind in active_channel_indices:
print("%s " % self.channel_labels[ind], end="")
print("")
if self.powers_changed:
self._set_mmc_laser_power()
self.powers_changed = False
if self.channels_changed:
if self.DAQ_running:
self.opmdaq.stop_waveform_playback()
self.DAQ_running = False
self.opmdaq.reset_scan_mirror()
self.opmdaq.set_scan_type('stage')
self.opmdaq.set_channels_to_use(self.channel_states)
self.opmdaq.set_interleave_mode(True)
self.opmdaq.generate_waveforms()
self.opmdaq.start_waveform_playback()
self.DAQ_running=True
self.channels_changed = False
if not(self.DAQ_running):
self.opmdaq.start_waveform_playback()
self.DAQ_running=True
if self.ROI_changed:
self._crop_camera()
self.ROI_changed = False
# set exposure time
if self.exposure_changed:
self.mmc.setExposure(self.exposure_ms)
self.exposure_changed = False
for c in active_channel_indices:
self.mmc.snapImage()
raw_image_2d = self.mmc.getImage()
time.sleep(.05)
yield c, raw_image_2d
@thread_worker
def _acquire_3d_data(self):
while True:
#------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------Begin setup of scan parameters--------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
# parse which channels are active
active_channel_indices = [ind for ind, st in zip(self.do_ind, self.channel_states) if st]
n_active_channels = len(active_channel_indices)
if self.debug:
print("%d active channels: " % n_active_channels, end="")
for ind in active_channel_indices:
print("%s " % self.channel_labels[ind], end="")
print("")
n_timepoints = 1
if self.ROI_changed:
self._crop_camera()
self.ROI_changed = False
# set exposure time
if self.exposure_changed:
self.mmc.setExposure(self.exposure_ms)
self.exposure_changed = False
if self.powers_changed:
self._set_mmc_laser_power()
self.powers_changed = False
if self.channels_changed or self.footprint_changed or not(self.DAQ_running):
if self.DAQ_running:
self.opmdaq.stop_waveform_playback()
self.DAQ_running = False
self.opmdaq.set_scan_type('mirror')
self.opmdaq.set_channels_to_use(self.channel_states)
self.opmdaq.set_interleave_mode(True)
scan_steps = self.opmdaq.set_scan_mirror_range(self.scan_axis_step_um,self.scan_mirror_footprint_um)
self.opmdaq.generate_waveforms()
self.channels_changed = False
self.footprint_changed = False
raw_image_stack = np.zeros([self.do_ind[-1],scan_steps,self.ROI_width_y,self.ROI_width_x]).astype(np.uint16)
if self.debug:
# output experiment info
print("Scan axis range: %.1f um, Scan axis step: %.1f nm, Number of galvo positions: %d" %
(self.scan_mirror_footprint_um, self.scan_axis_step_um * 1000, scan_steps))
print('Time points: ' + str(n_timepoints))
#------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------End setup of scan parameters----------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------Start acquisition and deskew----------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
self.opmdaq.start_waveform_playback()
self.DAQ_running = True
# run hardware triggered acquisition
self.mmc.startSequenceAcquisition(int(n_active_channels*scan_steps),0,True)
for z in range(scan_steps):
for c in active_channel_indices:
while self.mmc.getRemainingImageCount()==0:
pass
raw_image_stack[c,z,:] = self.mmc.popNextImage()
self.mmc.stopSequenceAcquisition()
self.opmdaq.stop_waveform_playback()
self.DAQ_running = False
# deskew parameters
deskew_parameters = np.empty([3])
deskew_parameters[0] = self.opm_tilt # (degrees)
deskew_parameters[1] = self.scan_axis_step_um*100 # (nm)
deskew_parameters[2] = self.camera_pixel_size_um*100 # (nm)
for c in active_channel_indices:
deskewed_image = deskew(np.flipud(raw_image_stack[c,:]),*deskew_parameters).astype(np.uint16)
yield c, deskewed_image
del raw_image_stack
#------------------------------------------------------------------------------------------------------------------------------------
#-----------------------------------------------End acquisition and deskew-----------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
@thread_worker
def _acquire_3d_t_data(self):
#------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------Begin setup of scan parameters--------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
# parse which channels are active
active_channel_indices = [ind for ind, st in zip(self.do_ind, self.channel_states) if st]
self.n_active_channels = len(active_channel_indices)
if self.debug:
print("%d active channels: " % self.n_active_channels, end="")
for ind in active_channel_indices:
print("%s " % self.channel_labels[ind], end="")
print("")
if self.ROI_changed:
self._crop_camera()
self.ROI_changed = False
# set exposure time
if self.exposure_changed:
self.mmc.setExposure(self.exposure_ms)
self.exposure_changed = False
if self.powers_changed:
self._set_mmc_laser_power()
self.powers_changed = False
if self.channels_changed or self.footprint_changed or not(self.DAQ_running):
if self.DAQ_running:
self.opmdaq.stop_waveform_playback()
self.DAQ_running = False
self.opmdaq.set_scan_type('mirror')
self.opmdaq.set_channels_to_use(self.channel_states)
self.opmdaq.set_interleave_mode(True)
self.scan_steps = self.opmdaq.set_scan_mirror_range(self.scan_axis_step_um,self.scan_mirror_footprint_um)
self.opmdaq.generate_waveforms()
self.channels_changed = False
self.footprint_changed = False
# create directory for timelapse
time_string = datetime.now().strftime("%Y_%m_%d-%I_%M_%S")
self.output_dir_path = self.save_path / Path('timelapse_'+time_string)
self.output_dir_path.mkdir(parents=True, exist_ok=True)
# create name for zarr directory
zarr_output_path = self.output_dir_path / Path('OPM_data.zarr')
# create and open zarr file
opm_data = zarr.open(str(zarr_output_path), mode="w", shape=(self.n_timepoints, self.n_active_channels, self.scan_steps, self.ROI_width_y, self.ROI_width_x), chunks=(1, 1, 1, self.ROI_width_y, self.ROI_width_x),compressor=None, dtype=np.uint16)
# construct metadata and save
self._save_metadata()
#------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------End setup of scan parameters----------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------------Start acquisition---------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
# turn off Z motor
exp_zstage_name = self.mmc.getFocusDevice()
self.mmc.setProperty(exp_zstage_name,'MotorOnOff','Off')
# set circular buffer to be large
self.mmc.clearCircularBuffer()
circ_buffer_mb = 90000
self.mmc.setCircularBufferMemoryFootprint(int(circ_buffer_mb))
# run hardware triggered acquisition
if self.wait_time == 0:
self.mmc.setExposure(self.exposure_ms)
self.opmdaq.start_waveform_playback()
self.DAQ_running = True
self.mmc.startSequenceAcquisition(int(self.n_timepoints*self.n_active_channels*self.scan_steps),0,True)
for t in trange(self.n_timepoints,desc="t", position=0):
for z in trange(self.scan_steps,desc="z", position=1, leave=False):
for c in range(self.n_active_channels):
while self.mmc.getRemainingImageCount()==0:
pass
opm_data[t, c, z, :, :] = self.mmc.popNextImage() # DPS to do: Pop full "chunk" into memory then use dask.array to write in parallel
self.mmc.stopSequenceAcquisition()
self.opmdaq.stop_waveform_playback()
self.DAQ_running = False
else:
af_counter = 0
self.current_O3_stage = manage_O3_focus(self.mmc,self.shutter_controller,self.O3_stage_name,verbose=True)
self.mmc.setExposure(self.exposure_ms)
for t in trange(self.n_timepoints,desc="t", position=0):
self.opmdaq.start_waveform_playback()
self.DAQ_running = True
self.mmc.startSequenceAcquisition(int(self.n_active_channels*self.scan_steps),0,True)
for z in trange(self.scan_steps,desc="z", position=1, leave=False):
for c in range(self.n_active_channels):
while self.mmc.getRemainingImageCount()==0:
pass
opm_data[t, c, z, :, :] = self.mmc.popNextImage()
self.mmc.stopSequenceAcquisition()
self.opmdaq.stop_waveform_playback()
self.DAQ_running = False
if af_counter == 0:
t_start = time.perf_counter()
self.current_O3_stage = manage_O3_focus(self.mmc,self.shutter_controller,self.O3_stage_name,verbose=True)
self.mmc.setExposure(self.exposure_ms)
t_end = time.perf_counter()
t_elapsed = t_end - t_start
time.sleep(self.wait_time-t_elapsed*2)
self.current_O3_stage = manage_O3_focus(self.mmc,self.shutter_controller,self.O3_stage_name,verbose=True)
self.mmc.setExposure(self.exposure_ms)
af_counter = 0
else:
time.sleep(self.wait_time)
af_counter = af_counter + 1
# turn on Z motors
exp_zstage_name = self.mmc.getFocusDevice()
self.mmc.setProperty(exp_zstage_name,'MotorOnOff','On')
#------------------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------End acquisition-------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
# set circular buffer to be small
self.mmc.clearCircularBuffer()
circ_buffer_mb = 4000
self.mmc.setCircularBufferMemoryFootprint(int(circ_buffer_mb))
def _crop_camera(self):
"""
Crop camera to GUI values
:return None:
"""
current_ROI = self.mmc.getROI()
if not(current_ROI[2]==2304) or not(current_ROI[3]==2304):
self.mmc.clearROI()
self.mmc.waitForDevice(self.camera_name)
self.mmc.setROI(int(self.ROI_uleft_corner_x),int(self.ROI_uleft_corner_y),int(self.ROI_width_x),int(self.ROI_width_y))
self.mmc.waitForDevice(self.camera_name)
def _lasers_to_hardware(self):
"""
Change lasers to hardware control
:return None:
"""
# turn all lasers off
self.mmc.setConfig('Laser','Off')
self.mmc.waitForConfig('Laser','Off')
# set all laser to external triggering
self.mmc.setConfig('Modulation-405','External-Digital')
self.mmc.waitForConfig('Modulation-405','External-Digital')
self.mmc.setConfig('Modulation-488','External-Digital')
self.mmc.waitForConfig('Modulation-488','External-Digital')
self.mmc.setConfig('Modulation-561','External-Digital')
self.mmc.waitForConfig('Modulation-561','External-Digital')
self.mmc.setConfig('Modulation-637','External-Digital')
self.mmc.waitForConfig('Modulation-637','External-Digital')
self.mmc.setConfig('Modulation-730','External-Digital')
self.mmc.waitForConfig('Modulation-730','External-Digital')
# turn all lasers on
self.mmc.setConfig('Laser','AllOn')
self.mmc.waitForConfig('Laser','AllOn')
def _lasers_to_software(self):
"""
Change lasers to software control
:return None:
"""
# turn all lasers off
self.mmc.setConfig('Laser','Off')
self.mmc.waitForConfig('Laser','Off')
# set all lasers back to software control
self.mmc.setConfig('Modulation-405','CW (constant power)')
self.mmc.waitForConfig('Modulation-405','CW (constant power)')
self.mmc.setConfig('Modulation-488','CW (constant power)')
self.mmc.waitForConfig('Modulation-488','CW (constant power)')
self.mmc.setConfig('Modulation-561','CW (constant power)')
self.mmc.waitForConfig('Modulation-561','CW (constant power)')
self.mmc.setConfig('Modulation-637','CW (constant power)')
self.mmc.waitForConfig('Modulation-637','CW (constant power)')
self.mmc.setConfig('Modulation-730','CW (constant power)')
self.mmc.waitForConfig('Modulation-730','CW (constant power)')
def _set_mmc_laser_power(self):
"""
Change laser power
:return None:
"""
self.mmc.setProperty(r'Coherent-Scientific Remote',r'Laser 405-100C - PowerSetpoint (%)',float(self.channel_powers[0]))
self.mmc.setProperty(r'Coherent-Scientific Remote',r'Laser 488-150C - PowerSetpoint (%)',float(self.channel_powers[1]))
self.mmc.setProperty(r'Coherent-Scientific Remote',r'Laser OBIS LS 561-150 - PowerSetpoint (%)',float(self.channel_powers[2]))
self.mmc.setProperty(r'Coherent-Scientific Remote',r'Laser 637-140C - PowerSetpoint (%)',float(self.channel_powers[3]))
self.mmc.setProperty(r'Coherent-Scientific Remote',r'Laser 730-30C - PowerSetpoint (%)',float(self.channel_powers[4]))
def _setup_camera(self):
"""
Setup camera readout and triggering for OPM
:return None:
"""
# give camera time to change modes if necessary
self.mmc.setConfig('Camera-Setup','ScanMode3')
self.mmc.waitForConfig('Camera-Setup','ScanMode3')
# set camera to internal trigger
self.mmc.setConfig('Camera-TriggerType','NORMAL')
self.mmc.waitForConfig('Camera-TriggerType','NORMAL')
trigger_value = self.mmc.getProperty(self.camera_name,'Trigger')
while not(trigger_value == 'NORMAL'):
self.mmc.setConfig('Camera-TriggerType','NORMAL')
self.mmc.waitForConfig('Camera-TriggerType','NORMAL')
time.sleep(2)
trigger_value = self.mmc.getProperty(self.camera_name,'Trigger')
# give camera time to change modes if necessary
self.mmc.setProperty(self.camera_name,r'OUTPUT TRIGGER KIND[0]','EXPOSURE')
self.mmc.setProperty(self.camera_name,r'OUTPUT TRIGGER KIND[1]','EXPOSURE')
self.mmc.setProperty(self.camera_name,r'OUTPUT TRIGGER KIND[2]','EXPOSURE')
self.mmc.setProperty(self.camera_name,r'OUTPUT TRIGGER POLARITY[0]','POSITIVE')
self.mmc.setProperty(self.camera_name,r'OUTPUT TRIGGER POLARITY[1]','POSITIVE')
self.mmc.setProperty(self.camera_name,r'OUTPUT TRIGGER POLARITY[2]','POSITIVE')
def _enforce_DCAM_internal_trigger(self):
"""
Enforce camera being in trigger = INTERNAL mode
:return None:
"""
# set camera to START mode upon input trigger
self.mmc.setConfig('Camera-TriggerSource','INTERNAL')
self.mmc.waitForConfig('Camera-TriggerSource','INTERNAL')
# check if camera actually changed
# we find that camera doesn't always go back to START mode and need to check it
trigger_value = self.mmc.getProperty(self.camera_name,'TRIGGER SOURCE')
while not(trigger_value == 'INTERNAL'):
self.mmc.setConfig('Camera-TriggerSource','INTERNAL')
self.mmc.waitForConfig('Camera-TriggerSource','INTERNAL')
trigger_value = self.mmc.getProperty(self.camera_name,'TRIGGER SOURCE')
def _startup(self):
"""
Startup OPM instrument in neutral state for all hardware
:return None:
"""
# set lasers to 0% power and hardware control
self._set_mmc_laser_power()
self._lasers_to_hardware()
# set camera to OPM specific setup
self._crop_camera()
self._setup_camera()
self._enforce_DCAM_internal_trigger()
# connect to DAQ
self.opmdaq = OPMNIDAQ()
# reset scan mirror position to neutral
self.opmdaq.reset_scan_mirror()
# connect to Picard shutter
self.shutter_controller = PicardShutter(shutter_id=self.shutter_id,verbose=False)
self.shutter_controller.closeShutter()
self.shutter_state = 0
self.mmc.setProperty(self.mmc.getFocusDevice(),'MotorOnOff','On')
def _shutdown(self):
"""
Shutdown OPM instrument in neutral state for all hardware
:return None:
"""
# set lasers to 0% power and software control
self._set_mmc_laser_power()
self._lasers_to_software()
# shutdown DAQ
if self.DAQ_running:
self.opmdaq.stop_waveform_playback()
self.opmdaq.reset_scan_mirror()
self.shutter_controller.shutDown()
@magicgui(
auto_call=False,
exposure_ms={"widget_type": "FloatSpinBox", "min": 1, "max": 500,'label': 'Camera exposure (ms)'},
layout='horizontal',
call_button='Update exposure'
)
def set_exposure(self, exposure_ms=10.0):
"""
Magicgui element to get camera exposure time
:param exposure_ms: float
camera exposure time
:return None:
"""
if not(exposure_ms == self.exposure_ms):
self.exposure_ms=exposure_ms
self.exposure_changed = True
else:
self.exposure_changed = False
@magicgui(
auto_call=False,
uleft_corner_x={"widget_type": "SpinBox", "min": 0, "max": 2304,'label': 'ROI center (non-tilt)'},
uleft_corner_y={"widget_type": "SpinBox", "min": 0, "max": 2304,'label': 'ROI center (tilt)'},
width_x={"widget_type": "SpinBox", "min": 0, "max": 2304,'label': 'ROI width (non-tilt)'},
width_y={"widget_type": "SpinBox", "min": 0, "max": 2304,'label': 'ROI height (tilt)'},
layout='vertical',
call_button="Update crop"
)
def set_ROI(self, uleft_corner_x=168,uleft_corner_y=928,width_x=1900,width_y=512):
"""
Magicgui element to get camera ROI
:param uleft_corner_x: int
upper left ROI x pixel
:param uleft_corner_y: int
upper left ROI y pixel
:param width_x: int
ROI width in pixels
:param width_y: int
ROI height in pixels = TILTED DIRECTION
:return None:
"""
if not(int(uleft_corner_x)==self.ROI_uleft_corner_x) or not(int(uleft_corner_y)==self.ROI_uleft_corner_y) or not(int(width_x)==self.ROI_width_x) or not(int(width_y)==self.ROI_width_y):
self.ROI_uleft_corner_x=int(uleft_corner_x)
self.ROI_uleft_corner_y=int(uleft_corner_y)
self.ROI_width_x=int(width_x)
self.ROI_width_y=int(width_y)
self.ROI_changed = True
else:
self.ROI_changed = False
@magicgui(
auto_call=False,
power_405={"widget_type": "FloatSpinBox", "min": 0, "max": 100, "label": '405nm power (%)'},
power_488={"widget_type": "FloatSpinBox", "min": 0, "max": 100, "label": '488nm power (%)'},
power_561={"widget_type": "FloatSpinBox", "min": 0, "max": 100, "label": '561nm power (%)'},
power_635={"widget_type": "FloatSpinBox", "min": 0, "max": 100, "label": '635nm power (%)'},
power_730={"widget_type": "FloatSpinBox", "min": 0, "max": 100, "label": '730nm power (%)'},
layout='vertical',
call_button='Update powers'
)
def set_laser_power(self, power_405=0.0, power_488=0.0, power_561=0.0, power_635=0.0, power_730=0.0):
"""
Magicgui element to get laser powers (0-100%)
:param power_405: float
405 nm laser power
:param power_488: float
488 nm laser power
:param power_561: float
561 nm laser power
:param power_635: float
635 nm laser power
:param power_730: float
730 nm laser power
:return None:
"""
channel_powers = [power_405,power_488,power_561,power_635,power_730]
if not(np.all(channel_powers == self.channel_powers)):
self.channel_powers=channel_powers
self.powers_changed = True
else:
self.powers_changed = False
@magicgui(
auto_call=True,
active_channels = {"widget_type": "Select", "choices": ["Off","405","488","561","635","730"], "allow_multiple": True, "label": "Active channels"}
)
def set_active_channel(self, active_channels):
"""
Magicgui element to set active lasers
:param active_channels: list
list of booleans, one for each laser channel
:return None:
"""
states = [False,False,False,False,False]
for channel in active_channels:
if channel == 'Off':
states[0]=True
elif channel == '405':
states[0]=True
elif channel == '488':
states[1]=True
elif channel == '561':
states[2]=True
elif channel == '635':
states[3]=True
elif channel == '730':
states[4]=True
if not(np.all(states == self.channel_states)):
self.channel_states=states
self.channels_changed = True
else:
self.channels_changed = False
@magicgui(
auto_call=False,
scan_mirror_footprint_um={"widget_type": "FloatSpinBox", "min": 5, "max": 250, "label": 'Mirror sweep (um)'},
layout='horizontal',
call_button='Update scan range'
)
def set_galvo_sweep(self, scan_mirror_footprint_um=25.0):
"""
Magicgui element to set scan footprint
:param scan_mirror_footprint_um: float
size of scan mirror sweep in microns
:return None:
"""
if not(scan_mirror_footprint_um==self.scan_mirror_footprint_um):
self.scan_mirror_footprint_um=scan_mirror_footprint_um
self.footprint_changed = True
else:
self.footprint_changed = False
# control continuous 2D imaging (software triggering)
@magicgui(
auto_call=True,
live_mode_2D={"widget_type": "PushButton", "label": 'Start/Stop Live (2D)'},
layout='horizontal'
)
def live_mode_2D(self,live_mode_2D=False):
if (np.any(self.channel_states)):
if not(self.worker_3d_running) and not(self.worker_3d_t_running):
if self.worker_2d_running:
self.worker_2d.pause()
if self.DAQ_running:
self.opmdaq.stop_waveform_playback()
self.DAQ_running=False
self.worker_2d_running = False
else:
if not(self.worker_2d_started):
self.worker_2d_started = True
self.worker_2d_running = True
self.worker_2d.start()
else:
self.worker_2d.resume()
self.worker_2d_running = True
else:
if self.worker_3d_running:
raise Exception('Stop live 3D acquisition first.')
elif self.worker_iterative_running:
raise Exception('Iterative acquisition in process.')
else:
raise Exception('Unknown error.')
else:
raise Exception('Set at least one active channel before starting.')
# control continuous 3D volume (hardware triggering)
@magicgui(
auto_call=True,
live_mode_3D={"widget_type": "PushButton", "label": 'Start/Stop live (3D)'},
layout='horizontal'
)
def live_mode_3D(self,live_mode_3D):
if (np.any(self.channel_states)):
if not(self.worker_2d_running) and not(self.worker_3d_t_running):
self.galvo_scan = True
if self.worker_3d_running:
self.worker_3d.pause()
self.worker_3d_running = False
if self.DAQ_running:
self.opmdaq.stop_waveform_playback()
self.DAQ_running = False
self.opmdaq.reset_scan_mirror()
else:
if not(self.worker_3d_started):
self.worker_3d.start()
self.worker_3d_started = True
self.worker_3d_running = True
else:
self.worker_3d.resume()
self.worker_3d_running = True
else:
if self.worker_2d_running:
raise Exception('Stop live 2D acquisition first.')
elif self.worker_iterative_running:
raise Exception('Iterative acquisition in process.')
else:
raise Exception('Unknown error.')
else:
raise Exception('Set at least one active channel before starting.')
# set timelapse parameters
@magicgui(
auto_call=True,
n_timepoints={"widget_type": "SpinBox", "min": 0, "max": 10000, "label": 'Timepoints to acquire'},
wait_time={"widget_type": "FloatSpinBox", "min": 0, "max": 720, "label": 'Delay between timepoints (s)'},
layout='horizontal'
)
def set_timepoints(self, n_timepoints=400,wait_time=0):
self.n_timepoints = n_timepoints
self.wait_time = wait_time
self.timelapse_setup = True
# set filepath for saving data
@magicgui(
auto_call=False,
save_path={"widget_type": "FileEdit","mode": "d", "label": 'Save path:'},
layout='horizontal',
call_button="Set"
)
def set_save_path(self, save_path=''):
self.save_path = Path(save_path)
self.save_path_setup = True
# control timelapse 3D volume (hardware triggering)
@magicgui(
auto_call=True,
timelapse_mode_3D={"widget_type": "PushButton", "label": 'Start acquistion'},
layout='horizontal'
)
def timelapse_mode_3D(self,timelapse_mode_3D):
if not(self.worker_2d_running) and not(self.worker_3d_running):
if (self.save_path_setup and self.timelapse_setup):
self.worker_3d_t.start()
self.worker_3d_t.returned.connect(self._create_3d_t_worker)
self.worker_3d_t_running = True
else:
raise Exception('Setup save path and timelapse first.')
else:
raise Exception('Stop active live mode first.')
@magicgui(
auto_call=True,
shutter_change={"widget_type": "PushButton", "label": 'Toggle alignment laser shutter.'},
layout='horizontal'
)
def shutter_change(self,shutter_change):
if self.shutter_state == 0:
self.shutter_controller.openShutter()
self.shutter_state = 1
else:
self.shutter_controller.closeShutter()
self.shutter_state = 0
@magicgui(
auto_call=True,
autofocus_O2O3={"widget_type": "PushButton", "label": 'Autofocus O2-O3'},
layout='horizontal'
)
def autofocus_O2O3(self,autofocus_O2O3):
if not(self.worker_2d_running) and not(self.worker_3d_running) and not(self.worker_3d_t_running):
if self.DAQ_running:
self.opmdaq.stop_waveform_playback()
self.current_O3_stage = manage_O3_focus(self.mmc,self.shutter_controller,self.O3_stage_name,verbose=True)
else:
raise Exception('Stop active live mode first.')
| 37,618
| 42.540509
| 252
|
py
|
OPM
|
OPM-master/napari-control/src/OPMStageMonitor.py
|
# napari imports
from magicclass import magicclass, set_design
from magicgui import magicgui, widgets
#general python imports
from pymmcore_plus import RemoteMMCore
# Stage monitor class
@magicclass(labels=False)
class OPMStageMonitor:
def __init__(self):
pass
x_stage_pos = widgets.LineEdit(label='x:', value=f"{0:.1f}")
y_stage_pos = widgets.LineEdit(label='y:', value=f"{0:.1f}")
z_stage_pos = widgets.LineEdit(label='z:', value=f"{0:.1f}")
@magicgui(
auto_call=True,
get_pos_xyz={"widget_type": "PushButton", "label": 'Get stage position'},
layout='horizontal'
)
def get_stage_pos(self, get_pos_xyz):
with RemoteMMCore() as mmc_stage_monitor:
x = mmc_stage_monitor.getXPosition()
y = mmc_stage_monitor.getYPosition()
z = mmc_stage_monitor.getZPosition()
self.x_stage_pos.value = (f"{x:.1f}")
self.y_stage_pos.value = (f"{y:.1f}")
self.z_stage_pos.value = (f"{z:.1f}")
| 1,018
| 29.878788
| 81
|
py
|
OPM
|
OPM-master/napari-control/src/OPMStageScan.py
|
# napari imports
from re import T
from magicclass import magicclass, set_design
from magicgui import magicgui
from magicgui.tqdm import trange
from napari.qt.threading import thread_worker
#general python imports
from pathlib import Path
import numpy as np
from datetime import datetime
import time
# data i/o imports
import zarr
from zarr import blosc
import npy2bdv
import gc
# hardware control imports
from pymmcore_plus import RemoteMMCore
from src.hardware.OPMNIDAQ import OPMNIDAQ
import src.hardware.ASI as ASIstage
from src.hardware.HamiltonMVP import HamiltonMVP
from src.hardware.APump import APump
# ASU OPM specific functions
from src.utils.fluidics_control import run_fluidic_program
from src.utils.data_io import write_metadata
from src.utils.image_post_processing import deskew
from src.OPMIterative import OPMIterative
# OPM control UI element
@magicclass(labels=False)
@set_design(text="ASU Snouty-OPM control")
class OPMStageScan:
# initialize
def __init__(self, OPMIterative: OPMIterative):
# OPM parameters
self.active_channel = "Off"
self.channel_powers = np.zeros(5,dtype=np.int8)
self.channel_states=[False,False,False,False,False]
self.exposure_ms = 10.0 # unit: ms
self.scan_axis_step_um = 0.4 # unit: um
self.scan_axis_calibration = 0.043 # unit: V / um
self.galvo_neutral_volt = -.15 # unit: V
self.scan_mirror_footprint_um = 50.0 # unit: um
self.camera_pixel_size_um = .115 # unit: um
self.opm_tilt = 30 # unit: degrees
# camera parameters
self.camera_name = 'OrcaFusionBT' # camera name in MM config
self.ROI_uleft_corner_x = int(200) # unit: camera pixels
self.ROI_uleft_corner_y = int(896) # unit: camera pixels
self.ROI_width_x = int(1900) # unit: camera pixels
self.ROI_width_y = int(512) # unit: camera pixels
# fluidics parameters
self.path_to_fluidics_program = None
# MM parameters
self.x_stage_name = 'XYStage:XY:31'
self.z_stage_name = 'ZStage:M:37'
self.channel_labels = ["405", "488", "561", "635", "730"]
self.do_ind = [0, 1, 2, 3, 4] # digital output line corresponding to each channel
# scan parameters
self.save_path = Path('D:/')
self.excess_stage_steps = 40 # excess stage steps to allow stage to come up to speed
# flags for instrument setup
self.powers_changed = True
self.channels_changed = True
self.ROI_changed = True
self.exposure_changed = True
self.footprint_changed = True
self.DAQ_running = False
self.iterative_setup = False
# debug flag
self.debug=False
# set 2D acquistion thread worker
def _set_worker_2d(self,worker_2d):
"""
Set worker for continuous 2D imaging
:param worker_2d: thread_worker
Napari thread worker
:return None:
"""
self.worker_2d = worker_2d
self.worker_2d_started = False
self.worker_2d_running = False
# set 3D acquistion thread worker
def _set_worker_3d(self,worker_3d):
"""
Set worker for continuous 3D imaging
:param worker_3d: thread_worker
Napari thread worker
:return None:
"""
self.worker_3d = worker_3d
self.worker_3d_started = False
self.worker_3d_running = False
def _create_worker_iterative(self):
"""
Create worker for iterative 3D imaging
:return None:
"""
worker_iterative = self._acquire_iterative_data()
self._set_worker_iterative(worker_iterative)
self.worker_iterative_running = False
# set 3D timelapse acquistion thread worker
def _set_worker_iterative(self,worker_iterative):
"""
Set worker for iterative 3D imaging
:param worker_3d_t: thread_worker
Napari thread worker
:return None:
"""
self.worker_iterative = worker_iterative
# set viewer
def _set_viewer(self,viewer):
"""
Set Napari viewer
:param viewer: Viewer
Napari viewer
:return None:
"""
self.viewer = viewer
# update viewer layers
def _update_layers(self,values):
"""
Update Napari viewer layers
:param values: tuple
yielded data containing channel and image to update
:return None:
"""
current_channel = values[0]
new_image = values[1]
channel_names = ['405nm','488nm','561nm','635nm','730nm']
colormaps = ['bop purple','bop blue','bop orange','red','grey']
channel_name = channel_names[current_channel]
colormap = colormaps[current_channel]
try:
self.viewer.layers[channel_name].data = new_image
except:
self.viewer.add_image(new_image, name=channel_name, blending='additive', colormap=colormap,contrast_limits=[110,.9*np.max(new_image)])
def _save_round_metadata(self,r_idx):
"""
Construct round metadata dictionary and save
:param r_idx: int
round index
:return None:
"""
scan_param_data = [{'root_name': str("OPM_stage_data"),
'scan_type': 'stage',
'theta': float(self.opm_tilt),
'exposure_ms': float(self.exposure_ms),
'pixel_size': float(self.camera_pixel_size_um),
'scan_axis_start': float(self.scan_axis_start_um),
'scan_axis_end': float(self.scan_axis_end_um),
'scan_axis_step': float(self.scan_axis_step_um),
'tile_axis_start': float(self.tile_axis_start_um),
'tile_axis_end': float(self.tile_axis_end_um),
'tile_axis_step': float(self.tile_axis_step_um),
'height_axis_start': float(self.height_axis_start_um),
'height_axis_end': float(self.height_axis_end_um),
'height_axis_step': float(self.height_axis_step_um),
'r_idx': int(r_idx),
'num_r': int(self.n_iterative_rounds),
'num_y': int(self.n_xy_tiles),
'num_z': int(self.n_z_tiles),
'num_ch': int(self.n_active_channels),
'scan_axis_positions': int(self.scan_steps),
'scan_axis_speed': float(self.scan_axis_speed),
'y_pixels': int(self.y_pixels),
'x_pixels': int(self.x_pixels),
'405_active': bool(self.channel_states[0]),
'488_active': bool(self.channel_states[1]),
'561_active': bool(self.channel_states[2]),
'635_active': bool(self.channel_states[3]),
'730_active': bool(self.channel_states[4]),
'405_power': float(self.channel_powers[0]),
'488_power': float(self.channel_powers[1]),
'561_power': float(self.channel_powers[2]),
'635_power': float(self.channel_powers[3]),
'730_power': float(self.channel_powers[4]),
}]
write_metadata(scan_param_data[0], self.metadata_dir_path / Path('scan_'+str(r_idx).zfill(3)+'_metadata.csv'))
def _save_stage_positions(self,r_idx,tile_xy_idz,tile_z_idx,current_stage_data):
"""
Construct stage position metadata dictionary and save
:param r_idx: int
round index
:param tile_xy_idx: int
xy tile index
:param tile_xy_idx: int
z tile index
:param current_stage_data: dict
dictionary of stage positions
:return None:
"""
write_metadata(current_stage_data[0], self.metadata_dir_path / Path('stage_r'+str(r_idx).zfill(3)+'_y'+str(tile_xy_idz).zfill(3)+'_z'+str(tile_z_idx).zfill(3)+'_metadata.csv'))
def _save_full_metadata(self):
"""
Save full metadata dictionaries
:return None:
"""
write_metadata(self.codebook[0], self.metadata_dir_path / Path('full_codebook_metadata.csv'))
write_metadata(self.scan_settings[0], self.metadata_dir_path / Path('full_scan_settings_metadata.csv'))
@thread_worker
def _acquire_2d_data(self):
while True:
# parse which channels are active
active_channel_indices = [ind for ind, st in zip(self.do_ind, self.channel_states) if st]
n_active_channels = len(active_channel_indices)
if n_active_channels == 0:
yield None
if self.debug:
print("%d active channels: " % n_active_channels, end="")
for ind in active_channel_indices:
print("%s " % self.channel_labels[ind], end="")
print("")
if self.powers_changed:
self._set_mmc_laser_power()
self.powers_changed = False
if self.channels_changed:
if self.DAQ_running:
self.opmdaq.stop_waveform_playback()
self.DAQ_running = False
self.opmdaq.reset_scan_mirror()
self.opmdaq.set_scan_type('stage')
self.opmdaq.set_channels_to_use(self.channel_states)
self.opmdaq.set_interleave_mode(True)
self.opmdaq.generate_waveforms()
self.opmdaq.start_waveform_playback()
self.DAQ_running=True
self.channels_changed = False
if not(self.DAQ_running):
self.opmdaq.start_waveform_playback()
self.DAQ_running=True
with RemoteMMCore() as mmc_2d:
if self.ROI_changed:
self._crop_camera()
self.ROI_changed = False
# set exposure time
if self.exposure_changed:
mmc_2d.setExposure(self.exposure_ms)
self.exposure_changed = False
for c in active_channel_indices:
mmc_2d.snapImage()
raw_image_2d = mmc_2d.getImage()
time.sleep(.05)
yield c, raw_image_2d
@thread_worker
def _acquire_3d_data(self):
while True:
with RemoteMMCore() as mmc_3d:
#------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------Begin setup of scan parameters--------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
# parse which channels are active
active_channel_indices = [ind for ind, st in zip(self.do_ind, self.channel_states) if st]
n_active_channels = len(active_channel_indices)
if self.debug:
print("%d active channels: " % n_active_channels, end="")
for ind in active_channel_indices:
print("%s " % self.channel_labels[ind], end="")
print("")
n_timepoints = 1
if self.ROI_changed:
self._crop_camera()
self.ROI_changed = False
# set exposure time
if self.exposure_changed:
mmc_3d.setExposure(self.exposure_ms)
self.exposure_changed = False
if self.powers_changed:
self._set_mmc_laser_power()
self.powers_changed = False
if self.channels_changed or self.footprint_changed or not(self.DAQ_running):
if self.DAQ_running:
self.opmdaq.stop_waveform_playback()
self.DAQ_running = False
self.opmdaq.set_scan_type('mirror')
self.opmdaq.set_channels_to_use(self.channel_states)
self.opmdaq.set_interleave_mode(True)
scan_steps = self.opmdaq.set_scan_mirror_range(self.scan_axis_step_um,self.scan_mirror_footprint_um)
self.opmdaq.generate_waveforms()
self.channels_changed = False
self.footprint_changed = False
raw_image_stack = np.zeros([self.do_ind[-1],scan_steps,self.ROI_width_y,self.ROI_width_x]).astype(np.uint16)
if self.debug:
# output experiment info
print("Scan axis range: %.1f um, Scan axis step: %.1f nm, Number of galvo positions: %d" %
(self.scan_mirror_footprint_um, self.scan_axis_step_um * 1000, scan_steps))
print('Time points: ' + str(n_timepoints))
#------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------End setup of scan parameters----------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------Start acquisition and deskew----------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
self.opmdaq.start_waveform_playback()
self.DAQ_running = True
# run hardware triggered acquisition
mmc_3d.startSequenceAcquisition(int(n_active_channels*scan_steps),0,True)
for z in range(scan_steps):
for c in active_channel_indices:
while mmc_3d.getRemainingImageCount()==0:
pass
raw_image_stack[c,z,:] = mmc_3d.popNextImage()
mmc_3d.stopSequenceAcquisition()
self.opmdaq.stop_waveform_playback()
self.DAQ_running = False
# deskew parameters
deskew_parameters = np.empty([3])
deskew_parameters[0] = self.opm_tilt # (degrees)
deskew_parameters[1] = self.scan_axis_step_um*100 # (nm)
deskew_parameters[2] = self.camera_pixel_size_um*100 # (nm)
for c in active_channel_indices:
deskewed_image = deskew(np.flipud(raw_image_stack[c,:]),*deskew_parameters).astype(np.uint16)
yield c, deskewed_image
del raw_image_stack
#------------------------------------------------------------------------------------------------------------------------------------
#-----------------------------------------------End acquisition and deskew-----------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
@thread_worker
def _acquire_iterative_data(self):
# unwrap settings from iterative setup widget
self.scan_settings_in_use=self.scan_settings[0]
exposure_ms = self.scan_settings_in_use['exposure_ms']
scan_axis_start_um = self.scan_settings_in_use['scan_axis_start_um']
scan_axis_end_um = self.scan_settings_in_use['scan_axis_end_um']
scan_axis_step_um = self.scan_settings_in_use['scan_axis_step_um']
tile_axis_start_um = self.scan_settings_in_use['tile_axis_start_um']
tile_axis_end_um = self.scan_settings_in_use['tile_axis_end_um']
tile_axis_step_um =self.scan_settings_in_use['tile_axis_step_um']
height_axis_start_um = self.scan_settings_in_use['height_axis_start_um']
height_axis_end_um = self.scan_settings_in_use['height_axis_end_um']
height_axis_step_um = self.scan_settings_in_use['height_axis_step_um']
n_iterative_rounds = self.scan_settings_in_use['n_iterative_rounds']
nuclei_round = self.scan_settings_in_use['nuclei_round']
num_xy_tiles = self.scan_settings_in_use['num_xy_tiles']
num_z_tiles = self.scan_settings_in_use['num_z_tiles']
n_active_channels_readout = self.scan_settings_in_use['n_active_channels_readout']
n_active_channels_nuclei = self.scan_settings_in_use['n_active_channels_nuclei']
scan_axis_positions = self.scan_settings_in_use['scan_axis_positions']
scan_axis_speed_readout = self.scan_settings_in_use['scan_axis_speed_readout']
scan_axis_speed_nuclei = self.scan_settings_in_use['scan_axis_speed_nuclei']
y_pixels = self.scan_settings_in_use['y_pixels']
x_pixels = self.scan_settings_in_use['x_pixels']
channel_states_readout = [
self.scan_settings_in_use['405_active_readout'],
self.scan_settings_in_use['488_active_readout'],
self.scan_settings_in_use['561_active_readout'],
self.scan_settings_in_use['635_active_readout'],
self.scan_settings_in_use['730_active_readout']
]
channel_powers_readout = [
self.scan_settings_in_use['405_power_readout'],
self.scan_settings_in_use['488_power_readout'],
self.scan_settings_in_use['561_power_readout'],
self.scan_settings_in_use['635_power_readout'],
self.scan_settings_in_use['730_power_readout']
]
channel_states_nuclei = [
self.scan_settings_in_use['405_active_nuclei'],
self.scan_settings_in_use['488_active_nuclei'],
self.scan_settings_in_use['561_active_nuclei'],
self.scan_settings_in_use['635_active_nuclei'],
self.scan_settings_in_use['730_active_nuclei']
]
channel_powers_nuclei = [
self.scan_settings_in_use['405_power_nuclei'],
self.scan_settings_in_use['488_power_nuclei'],
self.scan_settings_in_use['561_power_nuclei'],
self.scan_settings_in_use['635_power_nuclei'],
self.scan_settings_in_use['730_power_nuclei']
]
# scan parameters that do not change between readout & nuclei rounds
self.exposure_ms = exposure_ms
self.pixel_size = self.camera_pixel_size_um
self.scan_axis_start_um = scan_axis_start_um
self.scan_axis_end_um = scan_axis_end_um
self.scan_axis_step_um = scan_axis_step_um
self.tile_axis_start_um = tile_axis_start_um
self.tile_axis_end_um = tile_axis_end_um
self.tile_axis_step_um = tile_axis_step_um
self.height_axis_start_um = height_axis_start_um
self.height_axis_end_um = height_axis_end_um
self.height_axis_step_um = height_axis_step_um
self.n_iterative_rounds = n_iterative_rounds
self.n_xy_tiles = num_xy_tiles
self.n_z_tiles = num_z_tiles
self.y_pixels = y_pixels
self.x_pixels = x_pixels
self.scan_steps = scan_axis_positions
#------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------Begin setup of scan parameters--------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
# create directory for iterative imaging
time_string = datetime.now().strftime("%Y_%m_%d-%I_%M_%S")
output_dir_path = self.save_path / Path('iterative_'+time_string)
output_dir_path.mkdir(parents=True, exist_ok=True)
# # create BDV H5 for registration of fiducial
# bdv_output_dir_path = output_dir_path / Path('fiducial_data')
# bdv_output_dir_path.mkdir(parents=True, exist_ok=True)
# bdv_output_path = bdv_output_dir_path / Path('fiducial_bdv.h5')
# bdv_writer = npy2bdv.BdvWriter(str(bdv_output_path),
# nchannels=1,
# ntiles=(self.n_xy_tiles*self.n_z_tiles),
# subsamp=((1,1,1),),
# blockdim=((1, y_pixels//2, x_pixels//2),),
# compression=None)
# create blank affine transformation to use for stage translation in BDV H5 XML
# unit_matrix = np.array(((1.0, 0.0, 0.0, 0.0), # change the 4. value for x_translation (px)
# (0.0, 1.0, 0.0, 0.0), # change the 4. value for y_translation (px)
# (0.0, 0.0, 1.0, 0.0)))# change the 4. value for z_translation (px)
# create metadata directory in output directory
self.metadata_dir_path = output_dir_path / Path('metadata')
self.metadata_dir_path.mkdir(parents=True, exist_ok=True)
# create zarr data directory in output directory
zarr_dir_path = output_dir_path / Path('raw_data')
zarr_dir_path.mkdir(parents=True, exist_ok=True)
#------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------End setup of scan parameters----------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------------Start acquisition---------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
with RemoteMMCore() as mmc_iterative_setup:
# set circular buffer to be large
mmc_iterative_setup.clearCircularBuffer()
circ_buffer_mb = 8000
mmc_iterative_setup.setCircularBufferMemoryFootprint(int(circ_buffer_mb))
mmc_iterative_setup.setTimeoutMs(120000)
ASIstage.setup_start_trigger_output(mmc_iterative_setup)
ASIstage.check_if_busy(mmc_iterative_setup)
# set scan stage speed
ASIstage.set_1d_stage_scan(mmc_iterative_setup)
ASIstage.check_if_busy(mmc_iterative_setup)
ASIstage.set_1d_stage_scan_area(mmc_iterative_setup,self.scan_axis_start_um/1000.,self.scan_axis_end_um/1000.)
ASIstage.check_if_busy(mmc_iterative_setup)
# x stage is always start of scan
stage_x = scan_axis_start_um
# loop through rounds
for r_idx in range(self.n_iterative_rounds):
if r_idx==0:
# scan parameters that change between readout and nuclei rounds
self.n_active_channels = n_active_channels_readout
self.scan_steps = scan_axis_positions
self.scan_axis_speed = scan_axis_speed_readout
self.channel_states = channel_states_readout
self.channel_powers = channel_powers_readout
# setup instrument for readout rounds
self._crop_camera()
self._set_mmc_laser_power()
if self.DAQ_running:
self.opmdaq.stop_waveform_playback()
self.DAQ_running = False
self.opmdaq.reset_scan_mirror()
self.opmdaq.set_scan_type('stage')
self.opmdaq.set_channels_to_use(self.channel_states)
self.opmdaq.set_interleave_mode(True)
self.opmdaq.generate_waveforms()
elif r_idx == (nuclei_round - 1):
# scan parameters that change between readout and nuclei rounds
self.n_active_channels = n_active_channels_nuclei
self.scan_axis_speed = scan_axis_speed_nuclei
self.channel_states = channel_states_nuclei
self.channel_powers = channel_powers_nuclei
# setup instrument for nuclei round
self._set_mmc_laser_power()
if self.DAQ_running:
self.opmdaq.stop_waveform_playback()
self.DAQ_running = False
self.opmdaq.reset_scan_mirror()
self.opmdaq.set_scan_type('stage')
self.opmdaq.set_channels_to_use(self.channel_states)
self.opmdaq.set_interleave_mode(True)
self.opmdaq.generate_waveforms()
if (r_idx>0):
# run fluidics program for this round
success_fluidics = False
success_fluidics = run_fluidic_program(r_idx, self.df_fluidics, self.valve_controller, self.pump_controller)
else:
success_fluidics = True
if (success_fluidics):
# configur blosc compressor
# blosc.use_threads=True
# blosc.set_nthreads = 8
bdv_tile_idx = 0
for xy_idx in trange(self.n_xy_tiles,desc="xy tile",position=0):
with RemoteMMCore() as mmc_iterative_execute:
mmc_iterative_execute.setExposure(exposure_ms)
ASIstage.set_axis_speed(mmc_iterative_execute,'X',0.1)
ASIstage.check_if_busy(mmc_iterative_execute)
ASIstage.set_axis_speed(mmc_iterative_execute,'Y',0.1)
ASIstage.check_if_busy(mmc_iterative_execute)
stage_y = tile_axis_start_um+(xy_idx*tile_axis_step_um)
ASIstage.set_xy_position(mmc_iterative_execute,stage_x,stage_y)
ASIstage.set_axis_speed(mmc_iterative_execute,'X',np.round(self.scan_axis_speed,4))
ASIstage.check_if_busy(mmc_iterative_execute)
# create Zarr for this round
zarr_output_path = zarr_dir_path / Path('OPM_stage_data_r'+str(r_idx).zfill(3)+'_xy'+str(xy_idx).zfill(3)+'.zarr')
# create and open zarr file
opm_round_data = zarr.open(
str(zarr_output_path),
mode="w",
shape=(self.n_z_tiles, self.n_active_channels, self.scan_steps, self.y_pixels, self.x_pixels),
chunks=(1, 1, 1, self.y_pixels, self.x_pixels),
dtype=np.uint16)
for z_idx in trange(self.n_z_tiles,desc="z tile",position=1,leave=False):
stage_z = height_axis_start_um+(z_idx*height_axis_step_um)
# move stage
ASIstage.set_z_position(mmc_iterative_execute,stage_z)
# grab actual stage position
actual_stage_x, actual_stage_y, actual_stage_z = ASIstage.get_xyz_position(mmc_iterative_execute)
# create current stage position
current_stage_data = [{'stage_x': float(actual_stage_x),
'stage_y': float(actual_stage_y),
'stage_z': float(actual_stage_z)}]
# create affine xform for stage position
# affine_matrix = unit_matrix
# affine_matrix[1,3] = (stage_x)/(self.camera_pixel_size_um) # x-translation
# affine_matrix[0,3] = (stage_y)/(self.camera_pixel_size_um) # y-translation
# affine_matrix[2,3] = (stage_z)/(self.camera_pixel_size_um) # z-translation
# Create virtual stack within BDV H5 to place fused z planes into
# bdv_writer.append_view(stack=None,
# virtual_stack_dim=(self.scan_steps,self.y_pixels,self.x_pixels),
# time=r_idx,
# channel=0,
# tile=bdv_tile_idx,
# voxel_size_xyz=(self.camera_pixel_size_um, self.camera_pixel_size_um, self.scan_axis_step_um),
# voxel_units='um',
# calibration = (1,1,np.abs(self.scan_axis_step_um/self.camera_pixel_size_um)),
# m_affine=affine_matrix,
# name_affine = 'tile '+str(bdv_tile_idx)+' translation')
# enforce camera in external trigger mode
mmc_iterative_execute.setConfig('Camera-TriggerSource','EXTERNAL')
mmc_iterative_execute.waitForConfig('Camera-TriggerSource','EXTERNAL')
trigger_value = mmc_iterative_execute.getProperty(self.camera_name,'TRIGGER SOURCE')
while not(trigger_value == 'EXTERNAL'):
mmc_iterative_execute.setConfig('Camera-TriggerSource','EXTERNAL')
mmc_iterative_execute.waitForConfig('Camera-TriggerSource','EXTERNAL')
time.sleep(1)
trigger_value = mmc_iterative_execute.getProperty(self.camera_name,'TRIGGER SOURCE')
# enforce camera to start mode
mmc_iterative_execute.setConfig('Camera-TriggerType','START')
mmc_iterative_execute.waitForConfig('Camera-TriggerType','START')
trigger_value = mmc_iterative_execute.getProperty(self.camera_name,'Trigger')
while not(trigger_value == 'START'):
mmc_iterative_execute.setConfig('Camera-TriggerType','START')
mmc_iterative_execute.waitForConfig('Camera-TriggerType','START')
time.sleep(1)
trigger_value = mmc_iterative_execute.getProperty(self.camera_name,'Trigger')
# enforce camera looking for a positive trigger
mmc_iterative_execute.setProperty(self.camera_name,r'TriggerPolarity','POSITIVE')
trigger_value = mmc_iterative_execute.getProperty(self.camera_name,r'TriggerPolarity')
while not(trigger_value == 'POSITIVE'):
mmc_iterative_execute.setProperty(self.camera_name,r'TriggerPolarity','POSITIVE')
time.sleep(1)
trigger_value = mmc_iterative_execute.getProperty(self.camera_name,r'TriggerPolarity')
# start DAQ
self.opmdaq.start_waveform_playback()
self.DAQ_running = True
# make sure stage is ready
ASIstage.check_if_busy(mmc_iterative_execute)
# run hardware triggered acquisition
total_steps=self.scan_steps+self.excess_stage_steps
total_acquistion_images = self.n_active_channels * total_steps
mmc_iterative_execute.waitForDevice(self.camera_name)
mmc_iterative_execute.startSequenceAcquisition(int(total_acquistion_images),0,True)
ASIstage.start_1d_stage_scan(mmc_iterative_execute)
# collect interleaved data
# place all data into Zarr, also place fidicual images into BDV for stitching
for s_idx in trange(total_steps,desc="scan", position=2, leave=False):
for c_idx in range(self.n_active_channels):
while (mmc_iterative_execute.getRemainingImageCount() == 0):
pass
image = mmc_iterative_execute.popNextImage()
if (s_idx>=self.excess_stage_steps):
opm_round_data[z_idx, c_idx, s_idx-self.excess_stage_steps, :, :] =image
#if (c_idx==0 and r_idx<(self.n_iterative_rounds-1)) or (c_idx==1 and r_idx==(self.n_iterative_rounds-1)):
# #write the fiducial channel into bigstitcher
# bdv_writer.append_plane(
# plane=image,
# z=s_idx-self.excess_stage_steps,
# time=r_idx,
# channel=0)
mmc_iterative_execute.stopSequenceAcquisition()
mmc_iterative_execute.clearCircularBuffer()
# stop DAQ
self.opmdaq.stop_waveform_playback()
self.DAQ_running=False
bdv_tile_idx = bdv_tile_idx + 1
self._save_stage_positions(r_idx,xy_idx,z_idx,current_stage_data)
# make sure stage has moved back to beginning
ASIstage.check_if_busy(mmc_iterative_execute)
# clean up memory
gc.collect()
# del reference to Zarr file and clean up memory
opm_round_data = None
del opm_round_data
gc.collect()
# construct round metadata and save
self._save_round_metadata(r_idx)
#------------------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------End acquisition-------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
if (success_fluidics):
# clean up DAQ
self.opmdaq.reset_scan_mirror()
# create down-sampled views with compression
# bdv_writer.create_pyramids(subsamp=((4, 8, 8)),
# blockdim=((8, 128, 128)),
# compression='lzf')
# write and close BDV H5 xml file
# bdv_writer.write_xml()
# bdv_writer.close()
# write full metadata
self._save_full_metadata()
# enforce camera in internal trigger mode
self._enforce_DCAM_internal_trigger()
else:
# write and close BDV H5 xml file
# bdv_writer.write_xml()
# bdv_writer.close()
# enforce camera in internal trigger mode
self._enforce_DCAM_internal_trigger()
raise Exception('Error in fluidics. Acquisition failed.')
def _crop_camera(self):
"""
Crop camera to GUI values
:return None:
"""
with RemoteMMCore() as mmc_crop_camera:
current_ROI = mmc_crop_camera.getROI()
if not(current_ROI[2]==2304) or not(current_ROI[3]==2304):
mmc_crop_camera.clearROI()
mmc_crop_camera.waitForDevice(self.camera_name)
mmc_crop_camera.setROI(int(self.ROI_uleft_corner_x),int(self.ROI_uleft_corner_y),int(self.ROI_width_x),int(self.ROI_width_y))
mmc_crop_camera.waitForDevice(self.camera_name)
def _lasers_to_hardware(self):
"""
Change lasers to hardware control
:return None:
"""
with RemoteMMCore() as mmc_lasers_hardware:
# turn all lasers off
mmc_lasers_hardware.setConfig('Laser','Off')
mmc_lasers_hardware.waitForConfig('Laser','Off')
# set all laser to external triggering
mmc_lasers_hardware.setConfig('Modulation-405','External-Digital')
mmc_lasers_hardware.waitForConfig('Modulation-405','External-Digital')
mmc_lasers_hardware.setConfig('Modulation-488','External-Digital')
mmc_lasers_hardware.waitForConfig('Modulation-488','External-Digital')
mmc_lasers_hardware.setConfig('Modulation-561','External-Digital')
mmc_lasers_hardware.waitForConfig('Modulation-561','External-Digital')
mmc_lasers_hardware.setConfig('Modulation-637','External-Digital')
mmc_lasers_hardware.waitForConfig('Modulation-637','External-Digital')
mmc_lasers_hardware.setConfig('Modulation-730','External-Digital')
mmc_lasers_hardware.waitForConfig('Modulation-730','External-Digital')
# turn all lasers on
mmc_lasers_hardware.setConfig('Laser','AllOn')
mmc_lasers_hardware.waitForConfig('Laser','AllOn')
def _lasers_to_software(self):
"""
Change lasers to software control
:return None:
"""
with RemoteMMCore() as mmc_lasers_software:
# turn all lasers off
mmc_lasers_software.setConfig('Laser','Off')
mmc_lasers_software.waitForConfig('Laser','Off')
# set all lasers back to software control
mmc_lasers_software.setConfig('Modulation-405','CW (constant power)')
mmc_lasers_software.waitForConfig('Modulation-405','CW (constant power)')
mmc_lasers_software.setConfig('Modulation-488','CW (constant power)')
mmc_lasers_software.waitForConfig('Modulation-488','CW (constant power)')
mmc_lasers_software.setConfig('Modulation-561','CW (constant power)')
mmc_lasers_software.waitForConfig('Modulation-561','CW (constant power)')
mmc_lasers_software.setConfig('Modulation-637','CW (constant power)')
mmc_lasers_software.waitForConfig('Modulation-637','CW (constant power)')
mmc_lasers_software.setConfig('Modulation-730','CW (constant power)')
mmc_lasers_software.waitForConfig('Modulation-730','CW (constant power)')
def _set_mmc_laser_power(self):
"""
Change laser power
:return None:
"""
with RemoteMMCore() as mmc_laser_power:
mmc_laser_power.setProperty(r'Coherent-Scientific Remote',r'Laser 405-100C - PowerSetpoint (%)',float(self.channel_powers[0]))
mmc_laser_power.setProperty(r'Coherent-Scientific Remote',r'Laser 488-150C - PowerSetpoint (%)',float(self.channel_powers[1]))
mmc_laser_power.setProperty(r'Coherent-Scientific Remote',r'Laser OBIS LS 561-150 - PowerSetpoint (%)',float(self.channel_powers[2]))
mmc_laser_power.setProperty(r'Coherent-Scientific Remote',r'Laser 637-140C - PowerSetpoint (%)',float(self.channel_powers[3]))
mmc_laser_power.setProperty(r'Coherent-Scientific Remote',r'Laser 730-30C - PowerSetpoint (%)',float(self.channel_powers[4]))
def _setup_camera(self):
"""
Setup camera readout and triggering for OPM
:return None:
"""
with RemoteMMCore() as mmc_camera_setup:
# give camera time to change modes if necessary
mmc_camera_setup.setConfig('Camera-Setup','ScanMode3')
mmc_camera_setup.waitForConfig('Camera-Setup','ScanMode3')
# set camera to start trigger
mmc_camera_setup.setConfig('Camera-TriggerType','START')
mmc_camera_setup.waitForConfig('Camera-TriggerType','START')
trigger_value = mmc_camera_setup.getProperty(self.camera_name,'Trigger')
while not(trigger_value == 'START'):
mmc_camera_setup.setConfig('Camera-TriggerType','START')
mmc_camera_setup.waitForConfig('Camera-TriggerType','START')
time.sleep(2)
trigger_value = mmc_camera_setup.getProperty(self.camera_name,'Trigger')
# give camera time to change modes if necessary
mmc_camera_setup.setProperty(self.camera_name,r'OUTPUT TRIGGER KIND[0]','EXPOSURE')
mmc_camera_setup.setProperty(self.camera_name,r'OUTPUT TRIGGER KIND[1]','EXPOSURE')
mmc_camera_setup.setProperty(self.camera_name,r'OUTPUT TRIGGER KIND[2]','EXPOSURE')
mmc_camera_setup.setProperty(self.camera_name,r'OUTPUT TRIGGER POLARITY[0]','POSITIVE')
mmc_camera_setup.setProperty(self.camera_name,r'OUTPUT TRIGGER POLARITY[1]','POSITIVE')
mmc_camera_setup.setProperty(self.camera_name,r'OUTPUT TRIGGER POLARITY[2]','POSITIVE')
mmc_camera_setup.setProperty(self.camera_name,r'MINIMUM ACQUISITION TIMEOUT','120.0')
mmc_camera_setup.setProperty(self.camera_name,r'TriggerPolarity','POSITIVE')
def _enforce_DCAM_external_trigger(self):
"""
Enforce camera being in trigger = EXTERNAL mode
:return None:
"""
with RemoteMMCore() as mmc_camera_trigger:
# set camera to START mode upon input trigger
mmc_camera_trigger.setConfig('Camera-TriggerSource','EXTERNAL')
mmc_camera_trigger.waitForConfig('Camera-TriggerSource','EXTERNAL')
# check if camera actually changed
# we find that camera doesn't always go back to START mode and need to check it
trigger_value = mmc_camera_trigger.getProperty(self.camera_name,'TRIGGER SOURCE')
while not(trigger_value == 'EXTERNAL'):
mmc_camera_trigger.setConfig('Camera-TriggerSource','EXTERNAL')
mmc_camera_trigger.waitForConfig('Camera-TriggerSource','EXTERNAL')
time.sleep(2)
trigger_value = mmc_camera_trigger.getProperty(self.camera_name,'TRIGGER SOURCE')
def _enforce_DCAM_internal_trigger(self):
"""
Enforce camera being in trigger = INTERNAL mode
:return None:
"""
with RemoteMMCore() as mmc_camera_trigger:
# set camera to START mode upon input trigger
mmc_camera_trigger.setConfig('Camera-TriggerSource','INTERNAL')
mmc_camera_trigger.waitForConfig('Camera-TriggerSource','INTERNAL')
# check if camera actually changed
# we find that camera doesn't always go back to START mode and need to check it
trigger_value = mmc_camera_trigger.getProperty(self.camera_name,'TRIGGER SOURCE')
while not(trigger_value == 'INTERNAL'):
mmc_camera_trigger.setConfig('Camera-TriggerSource','INTERNAL')
mmc_camera_trigger.waitForConfig('Camera-TriggerSource','INTERNAL')
trigger_value = mmc_camera_trigger.getProperty(self.camera_name,'TRIGGER SOURCE')
def _startup(self):
"""
Startup OPM instrument in neutral state for all hardware
:return None:
"""
# set lasers to 0% power and hardware control
self._set_mmc_laser_power()
self._lasers_to_hardware()
# set camera to OPM specific setup
self._crop_camera()
self._setup_camera()
self._enforce_DCAM_internal_trigger()
'''
# connect to pump
self.pump_controller = APump(self.pump_parameters)
# set pump to remote control
self.pump_controller.enableRemoteControl(True)
# connect to valves
self.valve_controller = HamiltonMVP(com_port=self.valve_COM_port)
# initialize valves
self.valve_controller.autoAddress()
'''
# connect to DAQ
self.opmdaq = OPMNIDAQ()
# reset scan mirror position to neutral
self.opmdaq.reset_scan_mirror()
def _shutdown(self):
"""
Shutdown OPM instrument in neutral state for all hardware
:return None:
"""
# set lasers to 0% power and software control
self._set_mmc_laser_power()
self._lasers_to_software()
# shutdown DAQ
if self.DAQ_running:
self.opmdaq.stop_waveform_playback()
self.opmdaq.reset_scan_mirror()
if (self.iterative_setup):
self.valve_controller.close()
self.pump_controller.disconnect()
@magicgui(
auto_call=False,
exposure_ms={"widget_type": "FloatSpinBox", "min": 1, "max": 500,'label': 'Camera exposure (ms)'},
layout='horizontal',
call_button='Update exposure'
)
def set_exposure(self, exposure_ms=10.0):
"""
Magicgui element to get camera exposure time
:param exposure_ms: float
camera exposure time
:return None:
"""
if not(exposure_ms == self.exposure_ms):
self.exposure_ms=exposure_ms
self.exposure_changed = True
else:
self.exposure_changed = False
@magicgui(
auto_call=False,
uleft_corner_x={"widget_type": "SpinBox", "min": 0, "max": 2304,'label': 'ROI center (non-tilt)'},
uleft_corner_y={"widget_type": "SpinBox", "min": 0, "max": 2304,'label': 'ROI center (tilt)'},
width_x={"widget_type": "SpinBox", "min": 0, "max": 2304,'label': 'ROI width (non-tilt)'},
width_y={"widget_type": "SpinBox", "min": 0, "max": 2304,'label': 'ROI height (tilt)'},
layout='vertical',
call_button="Update crop"
)
def set_ROI(self, uleft_corner_x=200,uleft_corner_y=896,width_x=1800,width_y=512):
"""
Magicgui element to get camera ROI
:param uleft_corner_x: int
upper left ROI x pixel
:param uleft_corner_y: int
upper left ROI y pixel
:param width_x: int
ROI width in pixels
:param width_y: int
ROI height in pixels = TILTED DIRECTION
:return None:
"""
if not(int(uleft_corner_x)==self.ROI_uleft_corner_x) or not(int(uleft_corner_y)==self.ROI_uleft_corner_y) or not(int(width_x)==self.ROI_width_x) or not(int(width_y)==self.ROI_width_y):
self.ROI_uleft_corner_x=int(uleft_corner_x)
self.ROI_uleft_corner_y=int(uleft_corner_y)
self.ROI_width_x=int(width_x)
self.ROI_width_y=int(width_y)
self.ROI_changed = True
else:
self.ROI_changed = False
@magicgui(
auto_call=False,
power_405={"widget_type": "FloatSpinBox", "min": 0, "max": 100, "label": '405nm power (%)'},
power_488={"widget_type": "FloatSpinBox", "min": 0, "max": 100, "label": '488nm power (%)'},
power_561={"widget_type": "FloatSpinBox", "min": 0, "max": 100, "label": '561nm power (%)'},
power_635={"widget_type": "FloatSpinBox", "min": 0, "max": 100, "label": '635nm power (%)'},
power_730={"widget_type": "FloatSpinBox", "min": 0, "max": 100, "label": '730nm power (%)'},
layout='vertical',
call_button='Update powers'
)
def set_laser_power(self, power_405=0.0, power_488=0.0, power_561=0.0, power_635=0.0, power_730=0.0):
"""
Magicgui element to get laser powers (0-100%)
:param power_405: float
405 nm laser power
:param power_488: float
488 nm laser power
:param power_561: float
561 nm laser power
:param power_635: float
635 nm laser power
:param power_730: float
730 nm laser power
:return None:
"""
channel_powers = [power_405,power_488,power_561,power_635,power_730]
if not(np.all(channel_powers == self.channel_powers)):
self.channel_powers=channel_powers
self.powers_changed = True
else:
self.powers_changed = False
@magicgui(
auto_call=True,
active_channels = {"widget_type": "Select", "choices": ["Off","405","488","561","635","730"], "allow_multiple": True, "label": "Active channels"}
)
def set_active_channel(self, active_channels):
"""
Magicgui element to set active lasers
:param active_channels: list
list of booleans, one for each laser channel
:return None:
"""
states = [False,False,False,False,False]
for channel in active_channels:
if channel == 'Off':
states = [False,False,False,False,False]
break
if channel == '405':
states[0]=True
elif channel == '488':
states[1]=True
elif channel == '561':
states[2]=True
elif channel == '635':
states[3]=True
elif channel == '730':
states[4]=True
if not(np.all(states == self.channel_states)):
self.channel_states=states
self.channels_changed = True
else:
self.channels_changed = False
@magicgui(
auto_call=False,
scan_mirror_footprint_um={"widget_type": "FloatSpinBox", "min": 5, "max": 225, "label": 'Mirror sweep (um)'},
layout='horizontal',
call_button='Update scan range'
)
def set_galvo_sweep(self, scan_mirror_footprint_um=50.0):
"""
Magicgui element to set scan footprint
:param scan_mirror_footprint_um: float
size of scan mirror sweep in microns
:return None:
"""
if not(scan_mirror_footprint_um==self.scan_mirror_footprint_um):
self.scan_mirror_footprint_um=scan_mirror_footprint_um
self.footprint_changed = True
else:
self.footprint_changed = False
# control continuous 2D imaging (software triggering)
@magicgui(
auto_call=True,
live_mode_2D={"widget_type": "PushButton", "label": 'Start/Stop Live (2D)'},
layout='horizontal'
)
def live_mode_2D(self,live_mode_2D=False):
if (np.any(self.channel_states)):
if not(self.worker_3d_running) and not(self.worker_iterative_running):
if self.worker_2d_running:
self.worker_2d.pause()
if self.DAQ_running:
self.opmdaq.stop_waveform_playback()
self.DAQ_running=False
self.worker_2d_running = False
else:
if not(self.worker_2d_started):
self.worker_2d_started = True
self.worker_2d_running = True
self.worker_2d.start()
else:
self.worker_2d.resume()
self.worker_2d_running = True
else:
if self.worker_3d_running:
raise Exception('Stop live 3D acquisition first.')
elif self.worker_iterative_running:
raise Exception('Iterative acquisition in process.')
else:
raise Exception('Unknown error.')
else:
raise Exception('Set at least one active channel before starting.')
# control continuous 3D volume (hardware triggering)
@magicgui(
auto_call=True,
live_mode_3D={"widget_type": "PushButton", "label": 'Start/Stop live (3D)'},
layout='horizontal'
)
def live_mode_3D(self,live_mode_3D):
if (np.any(self.channel_states)):
if not(self.worker_2d_running) and not(self.worker_iterative_running):
self.galvo_scan = True
if self.worker_3d_running:
self.worker_3d.pause()
self.worker_3d_running = False
if self.DAQ_running:
self.opmdaq.stop_waveform_playback()
self.DAQ_running = False
self.opmdaq.reset_scan_mirror()
else:
if not(self.worker_3d_started):
self.worker_3d.start()
self.worker_3d_started = True
self.worker_3d_running = True
else:
self.worker_3d.resume()
self.worker_3d_running = True
else:
if self.worker_2d_running:
raise Exception('Stop live 2D acquisition first.')
elif self.worker_iterative_running:
raise Exception('Iterative acquisition in process.')
else:
raise Exception('Unknown error.')
else:
raise Exception('Set at least one active channel before starting.')
# set filepath for saving data
@magicgui(
auto_call=False,
save_path={"widget_type": "FileEdit","mode": "d", "label": 'Save path:'},
layout='horizontal',
call_button="Set"
)
def set_save_path(self, save_path='d:/'):
self.save_path = Path(save_path)
self.save_path_setup = True
def _set_iterative_configuration(self,values):
if len(values) > 0:
self.codebook = values[0]
self.df_fluidics = values[1]
self.scan_settings = values[2]
self.valve_controller = values[3]
self.pump_controller = values[4]
self.iterative_setup = True
# control stage scan (hardware triggering)
@magicgui(
auto_call=True,
stagescan_mode_3D={"widget_type": "PushButton", "label": 'Start iterative scan'},
layout='horizontal'
)
def stagescan_mode_3D(self,stagescan_mode_3D):
if not(self.worker_2d_running) and not(self.worker_3d_running):
if (self.iterative_setup and self.save_path_setup):
self.galvo_scan = False
self.worker_iterative.start()
self.worker_iterative_running = True
self.worker_iterative.returned.connect(self._create_worker_iterative)
else:
raise Exception('Set configuration and save path first.')
else:
raise Exception('Stop active live mode first.')
| 56,334
| 45.21411
| 192
|
py
|
OPM
|
OPM-master/napari-control/src/OPMIterative.py
|
# napari imports
from magicclass import magicclass, set_design, MagicTemplate
from magicgui import magicgui, widgets
from napari.qt.threading import thread_worker
#general python imports
from pathlib import Path
from pymmcore_plus import RemoteMMCore
import numpy as np
from distutils.util import strtobool
# ASU OPM imports
import src.utils.data_io as data_io
from src.utils.fluidics_control import run_fluidic_program
from src.hardware.APump import APump
from src.hardware.HamiltonMVP import HamiltonMVP
# Fluidics loader, exposure, channels, and stage scan definitions
@magicclass(labels=False,widget_type="tabbed")
class OPMIterative(MagicTemplate):
# initialize
def __init__(self):
self.fluidics_file_path = None
self.fluidics_program = None
self.n_iterative_rounds = 0
self.fluidics_loaded = False
self.pump_COM_port = 'COM5'
self.valve_COM_port = 'COM6'
self.pump_parameters = {'pump_com_port': self.pump_COM_port,
'pump_ID': 30,
'verbose': True,
'simulate_pump': False,
'serial_verbose': False,
'flip_flow_direction': False}
self.scan_axis_step_um = 0.400 # unit: um
self.scan_axis_start_um = 0 # unit: um
self.scan_axis_end_um = 0 # unit: um
self.scan_axis_positions = 0 # positions
self.scan_axis_speed_readout = 0 # mm/s
self.scan_axis_speed_nuclei = 0 # mm/s
self.tile_axis_start_um = 0 # unit: um
self.tile_axis_end_um = 0 # unit: um
self.tile_axis_step_um = 0 # unit: um
self.tile_axis_positions = 0
self.height_axis_start_um = 0 # unit: um
self.height_axis_end_um = 0 # unit: um
self.height_axis_step_um = 0 # unit: um
self.height_axis_positions = 0
self.n_xy_tiles = 0 # number of xy tiles
self.n_z_tiles = 0 # number of z tiles
self.stage_volume_set = False
self.channel_states_readout = [False,False,False,False,False]
self.channel_states_nuclei = [False,False,False,False,False]
self.channel_powers = [0.0,0.0,0.0,0.0,0.0]
self.n_active_channels_readout = 0
self.n_active_channels_nuclei = 0
self.exposure_ms = 10.0
self.pixel_size_um = 0.115
self.channels_set = False
self.setup_complete = False
self.debug = False
# thread worker for cross-class communication of setup
def _set_worker_iterative_setup(self,worker_iterative_setup):
self.worker_iterative_setup = worker_iterative_setup
# calculate scan volume, pulling ROI from camera settings
def _calculate_scan_volume(self):
try:
with RemoteMMCore() as mmc_stage_setup:
# set experiment exposure
mmc_stage_setup.setExposure(self.exposure_ms)
# snap image
mmc_stage_setup.snapImage()
# grab exposure
true_exposure = mmc_stage_setup.getExposure()
# grab ROI
current_ROI = mmc_stage_setup.getROI()
self.x_pixels = current_ROI[2]
self.y_pixels = current_ROI[3]
if not((self.y_pixels == 256) or (self.y_pixels==512)):
raise Exception('Set camera ROI first.')
# get actual framerate from micromanager properties
actual_readout_ms = true_exposure+float(mmc_stage_setup.getProperty('OrcaFusionBT','ReadoutTime')) #unit: ms
if self.debug: print('Full readout time = ' + str(actual_readout_ms))
# scan axis setup
scan_axis_step_mm = self.scan_axis_step_um / 1000. #unit: mm
self.scan_axis_start_mm = self.scan_axis_start_um / 1000. #unit: mm
self.scan_axis_end_mm = self.scan_axis_end_um / 1000. #unit: mm
scan_axis_range_um = np.abs(self.scan_axis_end_um-self.scan_axis_start_um) # unit: um
self.scan_axis_range_mm = scan_axis_range_um / 1000 #unit: mm
actual_exposure_s = actual_readout_ms / 1000. #unit: s
self.scan_axis_speed_readout = np.round(scan_axis_step_mm / actual_exposure_s / self.n_active_channels_readout,5) #unit: mm/s
self.scan_axis_speed_nuclei = np.round(scan_axis_step_mm / actual_exposure_s / self.n_active_channels_nuclei,5) #unit: mm/s
self.scan_axis_positions = np.rint(self.scan_axis_range_mm / scan_axis_step_mm).astype(int) #unit: number of positions
# tile axis setup
tile_axis_overlap=0.2 #unit: percentage
tile_axis_range_um = np.abs(self.tile_axis_end_um - self.tile_axis_start_um) #unit: um
tile_axis_ROI = self.x_pixels*self.pixel_size_um #unit: um
self.tile_axis_step_um = np.round((tile_axis_ROI) * (1-tile_axis_overlap),2) #unit: um
self.n_xy_tiles = np.rint(tile_axis_range_um / self.tile_axis_step_um).astype(int)+1 #unit: number of positions
# if tile_axis_positions rounded to zero, make sure we acquire at least one position
if self.n_xy_tiles == 0:
self.n_xy_tiles=1
# height axis setup
# check if there are multiple heights
height_axis_range_um = np.abs(self.height_axis_end_um-self.height_axis_start_um) #unit: um
# if multiple heights, check if heights are due to uneven tissue position or for z tiling
height_axis_overlap=0.2 #unit: percentage
height_axis_ROI = self.y_pixels*self.pixel_size_um*np.sin(30.*np.pi/180.) #unit: um
self.height_axis_step_um = np.round((height_axis_ROI)*(1-height_axis_overlap),2) #unit: um
self.n_z_tiles = np.rint(height_axis_range_um / self.height_axis_step_um).astype(int)+1 #unit: number of positions
# if height_axis_positions rounded to zero, make sure we acquire at least one position
if self.n_z_tiles==0:
self.n_z_tiles=1
# create dictionary with scan settings
self.scan_settings = [{'exposure_ms': float(self.exposure_ms),
'scan_axis_start_um': float(self.scan_axis_start_um),
'scan_axis_end_um': float(self.scan_axis_end_um),
'scan_axis_step_um': float(self.scan_axis_step_um),
'tile_axis_start_um': float(self.tile_axis_start_um),
'tile_axis_end_um': float(self.tile_axis_end_um),
'tile_axis_step_um': float(self.tile_axis_step_um),
'height_axis_start_um': float(self.height_axis_start_um),
'height_axis_end_um': float(self.height_axis_end_um),
'height_axis_step_um': float(self.height_axis_step_um),
'n_iterative_rounds': int(self.n_iterative_rounds),
'nuclei_round': int(self.codebook['nuclei_round']),
'num_xy_tiles': int(self.n_xy_tiles),
'num_z_tiles': int(self.n_z_tiles),
'n_active_channels_readout': int(self.n_active_channels_readout),
'n_active_channels_nuclei': int(self.n_active_channels_nuclei),
'scan_axis_positions': int(self.scan_axis_positions),
'scan_axis_speed_readout': float(self.scan_axis_speed_readout),
'scan_axis_speed_nuclei': float(self.scan_axis_speed_nuclei),
'y_pixels': int(self.y_pixels),
'x_pixels': int(self.x_pixels),
'405_active_readout': bool(self.channel_states_readout[0]),
'488_active_readout': bool(self.channel_states_readout[1]),
'561_active_readout': bool(self.channel_states_readout[2]),
'635_active_readout': bool(self.channel_states_readout[3]),
'730_active_readout': bool(self.channel_states_readout[4]),
'405_power_readout': float(self.channel_powers_readout[0]),
'488_power_readout': float(self.channel_powers_readout[1]),
'561_power_readout': float(self.channel_powers_readout[2]),
'635_power_readout': float(self.channel_powers_readout[3]),
'730_power_readout': float(self.channel_powers_readout[4]),
'405_active_nuclei': bool(self.channel_states_nuclei[0]),
'488_active_nuclei': bool(self.channel_states_nuclei[1]),
'561_active_nuclei': bool(self.channel_states_nuclei[2]),
'635_active_nuclei': bool(self.channel_states_nuclei[3]),
'730_active_nuclei': bool(self.channel_states_nuclei[4]),
'405_power_nuclei': float(self.channel_powers_nuclei[0]),
'488_power_nuclei': float(self.channel_powers_nuclei[1]),
'561_power_nuclei': float(self.channel_powers_nuclei[2]),
'635_power_nuclei': float(self.channel_powers_nuclei[3]),
'730_power_nuclei': float(self.channel_powers_nuclei[4])}]
self.stage_volume_set = True
except:
raise Exception("Error in stage volume setup.")
# load fluidics and codebook files
def _load_fluidics(self):
try:
self.df_fluidics = data_io.read_fluidics_program(self.fluidics_file_path)
self.codebook = data_io.read_config_file(self.codebook_file_path)
self.fluidics_loaded = True
except:
raise Exception('Error in loading fluidics and/or codebook files.')
# generate summary of fluidics and codebook files
def _generate_fluidics_summary(self):
self.n_iterative_rounds = int(self.codebook['n_rounds'])
if (self.n_iterative_rounds == int(self.df_fluidics['round'].max())):
self.n_active_channels_readout = int(self.codebook['dyes_per_round'])
self.channel_states_readout = [
False,
bool(strtobool(self.codebook['alexa488'])),
bool(strtobool(self.codebook['atto565'])),
bool(strtobool(self.codebook['alexa647'])),
bool(strtobool(self.codebook['cy7']))]
if not(self.codebook['nuclei_round']==-1):
self.n_active_channels_nuclei = 2
self.channel_states_nuclei = [
True,
True,
False,
False,
False]
fluidics_data = (f"Experiment type: {str(self.codebook['type'])} \n"
f"Number of iterative rounds: {str(self.codebook['n_rounds'])} \n\n"
f"Number of targets: {str(self.codebook['targets'])} \n"
f"Channels per round: {str(self.codebook['dyes_per_round'])} \n"
f"Alexa488 fidicual: {str(self.codebook['alexa488'])} \n"
f"Atto565 readout: {str(self.codebook['atto565'])} \n"
f"Alexa647 readout: {str(self.codebook['alexa647'])} \n"
f"Cy7 readout: {str(self.codebook['cy7'])} \n"
f"Nuclear marker round: {str(self.codebook['nuclei_round'])} \n\n")
self.fluidics_summary.value = fluidics_data
else:
raise Exception('Number of rounds in codebook file and fluidics file do not match.')
# generate summary of experimental setup
def _generate_experiment_summary(self):
exp_data = (f"Number of iterative rounds: {str(self.n_iterative_rounds)} \n\n"
f"Scan start: {str(self.scan_axis_start_um)} \n"
f"Scan end: {str(self.scan_axis_end_um)} \n"
f"Number of scan positions: {str(self.scan_axis_positions)} \n"
f"Readout rounds scan speed: {str(self.scan_axis_speed_readout)} \n"
f"Nuclei round scan speed: {str(self.scan_axis_speed_nuclei)} \n\n"
f"Number of Y tiles: {str(self.n_xy_tiles)} \n"
f"Tile start: {str(self.tile_axis_start_um)} \n"
f"Tile end: {str(self.tile_axis_end_um)} \n"
f"Tile step: {str(self.tile_axis_step_um)} \n\n"
f"Number of Z slabs: {str(self.n_z_tiles)} \n"
f"Height start: {str(self.height_axis_start_um)} \n"
f"Height end: {str(self.height_axis_end_um)} \n"
f"Height step: {str(self.height_axis_step_um)} \n\n"
f"--------Readout rounds------- \n"
f"Number of channels: {str(self.n_active_channels_readout)} \n"
f"Active lasers: {str(self.channel_states_readout)} \n"
f"Lasers powers: {str(self.channel_powers_readout)} \n\n"
f"--------Nuclei rounds------- \n"
f"Number of channels: {str(self.n_active_channels_nuclei)} \n"
f"Active lasers: {str(self.channel_states_nuclei)} \n"
f"Lasers powers: {str(self.channel_powers_nuclei)} \n\n")
self.experiment_summary.value = exp_data
@magicgui(
auto_call=False,
fluidics_file_path={"widget_type": "FileEdit", 'label': 'Fluidics program'},
codebook_file_path={"widget_type": "FileEdit", 'label': 'Codebook'},
layout='vertical',
call_button='Load fluidics'
)
def load_fluidics_program(self,
fluidics_file_path = Path('C:/Users/qi2lab/Documents/GitHub/common_fluidics_programs'),
codebook_file_path = Path('C:/Users/qi2lab/Documents/GitHub/common_codebooks')):
self.fluidics_file_path = fluidics_file_path
self.codebook_file_path = codebook_file_path
self._load_fluidics()
self._generate_fluidics_summary()
fluidics_summary = widgets.TextEdit(label='Fluidics Summary', value="None", name="Fluidics summary")
@magicgui(
auto_call=True,
run_fluidics={"widget_type": "PushButton", 'label': 'Load fluidic controller and run first round'},
layout='vertical'
)
def run_first_fluidics_round(self, run_fluidics):
if self.fluidics_loaded:
# connect to pump
self.pump_controller = APump(self.pump_parameters)
# set pump to remote control
self.pump_controller.enableRemoteControl(True)
# connect to valves
self.valve_controller = HamiltonMVP(com_port=self.valve_COM_port)
# initialize valves
self.valve_controller.autoAddress()
# run fluidics flush
success_fluidics = False
success_fluidics = run_fluidic_program(0,self.df_fluidics,self.valve_controller,self.pump_controller)
if not(success_fluidics):
raise Exception('Error in fluidics unit.')
else:
self.first_round_run = True
else:
raise Exception('Configure fluidics first.')
@magicgui(
auto_call=False,
exposure_ms={"widget_type": "FloatSpinBox",'min': 3, 'max': 60,'label': 'Exposure (same for all channels)'},
power_405={"widget_type": "FloatSpinBox", 'min': 0, 'max': 100, 'label': '405 nm power'},
power_488={"widget_type": "FloatSpinBox", 'min': 0, 'max': 100, 'label': '488 nm power'},
power_561={"widget_type": "FloatSpinBox", 'min': 0, 'max': 100, 'label': '561 nm power'},
power_635={"widget_type": "FloatSpinBox", 'min': 0, 'max': 100, 'label': '635 nm power'},
power_730={"widget_type": "FloatSpinBox", 'min': 0, 'max': 100, 'label': '730 nm power'},
call_button='Set lasers')
def define_channels(
self,
exposure_ms=10.0,
power_405=0.0,
power_488=0.0,
power_561=0.0,
power_635=0.0,
power_730=0.0):
if (self.fluidics_loaded and self.first_round_run):
self.channel_powers_readout = [0.0,power_488,power_561,power_635,power_730]
if not(self.codebook['nuclei_round']==-1):
self.channel_powers_nuclei = [power_405,power_488,0.0,0.0,0.0]
self.exposure_ms = exposure_ms
self.channels_set = True
self._generate_experiment_summary()
else:
raise Exception('Configure fluidics and run initial round first.')
@magicgui(
auto_call=False,
scan_axis_start_um={"widget_type": "FloatSpinBox", 'min' : -20000, 'max': 20000, 'label': 'Scan start:'},
scan_axis_end_um={"widget_type": "FloatSpinBox", 'min' : -20000, 'max': 20000,'label': 'Scan end:'},
tile_axis_start_um={"widget_type": "FloatSpinBox", 'min' : -20000, 'max': 20000,'label': 'Tile start:'},
tile_axis_end_um={"widget_type": "FloatSpinBox", 'min' : -20000, 'max': 20000,'label': 'Tile end:'},
height_axis_start_um={"widget_type": "FloatSpinBox", 'min' : -20000, 'max': 20000,'label': 'Height start:'},
height_axis_end_um={"widget_type": "FloatSpinBox", 'min' : -20000, 'max': 20000,'label': 'Height end:'},
call_button='Set scan volume')
def define_scan_volume(
self,
scan_axis_start_um=0.0,
scan_axis_end_um=0.0,
tile_axis_start_um=0.0,
tile_axis_end_um=0.0,
height_axis_start_um=0.0,
height_axis_end_um=0.0):
if (self.channels_set and self.first_round_run and self.fluidics_loaded):
self.scan_axis_start_um = scan_axis_start_um
self.scan_axis_end_um = scan_axis_end_um
self.tile_axis_start_um = tile_axis_start_um
self.tile_axis_end_um = tile_axis_end_um
self.height_axis_start_um = height_axis_start_um
self.height_axis_end_um = height_axis_end_um
self._calculate_scan_volume()
self._generate_experiment_summary()
else:
raise Exception("Configure fluidics, run initial round, and configure channels first.")
experiment_summary = widgets.TextEdit(label='Experiment Summary', value="None", name="Experiment summary")
@magicgui(
auto_call=True,
accept_setup_btn={"widget_type": "PushButton", 'label': 'Accept iterative experiment setup'},
layout='vertical'
)
def accept_setup(self, accept_setup_btn):
if (self.stage_volume_set and self.channels_set and self.first_round_run and self.fluidics_loaded):
self.worker_iterative_setup.start()
self.setup_complete = True
else:
raise Exception('Configure fluidics, ruin intial round, configure channels, and configure stage scan volume first.')
# return fluidics, codebook, and experimental setup for running stage scan
@thread_worker
def _return_experiment_setup(self):
return self.codebook, self.df_fluidics, self.scan_settings, self.valve_controller,self.pump_controller
def main():
ui=OPMIterative()
ui.show(run=True)
if __name__ == "__main__":
main()
| 20,141
| 51.181347
| 141
|
py
|
OPM
|
OPM-master/napari-control/src/hardware/PicardShutter.py
|
#!/usr/bin/python
# ----------------------------------------------------------------------------------------
# The basic I/O class for Picard USB Shutter
# ----------------------------------------------------------------------------------------
# Doug Shepherd
# 11/2022
# douglas.shepherd@asu.edu
# ----------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------
# Import
# ----------------------------------------------------------------------------------------
import time
import clr
clr.AddReference('PiUsbNet')
import PiUsbNet
import gc
# ----------------------------------------------------------------------------------------
# PicardShutter Class Definition
# ----------------------------------------------------------------------------------------
class PicardShutter():
def __init__(self,shutter_id,verbose=False):
# Define attributes
self.shutter_id = shutter_id
self.verbose = verbose
try:
self.shutter: PiUsbNet.Shutter = PiUsbNet.Shutter()
self.shutter.StateChanged += self._shutter_state_changed
self.shutter.Open(self.shutter_id)
if not self.shutter.IsConnected:
if self.verbose: print('Shutter not found')
except PiUsbNet.UsbDeviceException as exc:
if self.verbose: print(f'PiUsbNet exception: {exc}')
# Event handler function. Called by PiUsbNet.dll when the position changes.
# This function runs in a worker thread.
def _shutter_state_changed(self, sender: PiUsbNet.Shutter, args: PiUsbNet.ShutterStateChangedEventArgs):
if (self.verbose): print(f'Shutter state: {args.State}')
def printShutterState(self):
try:
print(self.shutter.State)
except PiUsbNet.UsbDeviceException as exc:
if self.verbose: print(f'PiUsbNet exception: {exc}')
# open shutter
def openShutter(self):
try:
new_state = PiUsbNet.ShutterState.Open
self.shutter.State = new_state
# Wait until new state is signaled. Timeout after 1 sec
start_time = time.time()
while self.shutter.State != new_state and (time.time()-start_time < 10.0):
time.sleep(0.2)
if self.shutter.State != new_state:
if self.verbose: print('Shutter change state timeout')
else:
if self.verbose: print(f'Shutter at new state: {self.shutter.State}')
except PiUsbNet.UsbDeviceException as exc:
if self.verbose: print(f'PiUsbNet exception: {exc}')
# open shutter
def closeShutter(self):
try:
new_state = PiUsbNet.ShutterState.Closed
self.shutter.State = new_state
# Wait until new state is signaled. Timeout after 1 sec
start_time = time.time()
while self.shutter.State != new_state and (time.time()-start_time < 10.0):
time.sleep(0.2)
if self.shutter.State != new_state:
if self.verbose: print('Shutter change state timeout')
else:
if self.verbose: print(f'Shutter at new state: {self.shutter.State}')
except PiUsbNet.UsbDeviceException as exc:
if self.verbose: print(f'PiUsbNet exception: {exc}')
# make sure shutter is closed at shutdown
def shutDown(self):
self.closeShutter()
self.shutter = None
time.sleep(.1)
gc.collect()
| 3,585
| 38.844444
| 108
|
py
|
OPM
|
OPM-master/napari-control/src/hardware/HamiltonMVP.py
|
#!/usr/bin/python
# ----------------------------------------------------------------------------------------
# A basic class for serial interface with a series of daisy chained Hamilton MVP devices
# ----------------------------------------------------------------------------------------
# Jeff Moffitt
# 12/17/13
# jeffmoffitt@gmail.com
#
# TODO: Simulated port should be in a different class
# ----------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------
# Import
# ----------------------------------------------------------------------------------------
import time
from src.hardware.AbstractValve import AbstractValve
# ----------------------------------------------------------------------------------------
# HamiltonMVP Class Definition
# ----------------------------------------------------------------------------------------
class HamiltonMVP(AbstractValve):
def __init__(self,
com_port = "COM4",
num_simulated_valves = 0,
verbose = False):
# Define attributes
self.com_port = com_port
self.verbose = verbose
self.num_simulated_valves = num_simulated_valves
# Determine simulation mode
self.simulate = (self.num_simulated_valves > 0)
# Create serial port (if not in simulation mode)
if not self.simulate:
import serial
self.serial = serial.Serial(port = self.com_port,
baudrate = 9600,
bytesize = serial.SEVENBITS,
parity = serial.PARITY_ODD,
stopbits = serial.STOPBITS_ONE,
timeout = 0.1)
# Define important serial characters
self.acknowledge = "\x06"
self.carriage_return = "\x13"
self.negative_acknowledge = "\x21"
self.read_length = 64
self.char_offset = 97 # offset to convert int current_device
# to ascii addresses (0=a, 1=b, ...)
# Define valve and port properties
self.max_valves = 16 # Maximum number of daisy chains
self.valve_names = []
self.num_valves = 0
self.valve_configs = []
self.max_ports_per_valve = []
self.current_port = []
# Configure device
self.autoAddress()
self.autoDetectValves()
# ------------------------------------------------------------------------------------
# Define Device Addresses: Must be First Command Issued
# ------------------------------------------------------------------------------------
def autoAddress(self):
if not self.simulate:
auto_address_cmd = "1a\r"
if self.verbose:
print("Addressing Hamilton Valves")
x = self.write(auto_address_cmd)
response = self.read() # Clear buffer
else:
print("Simulating Hamilton MVP")
# ------------------------------------------------------------------------------------
# Auto Detect and Configure Valves: Devices are detected by acknowledgement of
# initialization command
# ------------------------------------------------------------------------------------
def autoDetectValves(self):
if not self.simulate:
print("----------------------------------------------------------------------")
print("Opening the Hamilton MVP Valve Daisy Chain")
print(" " + "COM Port: " + str(self.com_port))
for valve_ID in range(self.max_valves): # Loop over all possible valves
# Generate address character (0=a, 1=b, ...)
device_address_character = chr(valve_ID + self.char_offset)
if self.verbose:
print("Looking for device with address: " + str(valve_ID) + "=" + device_address_character)
self.valve_names.append(device_address_character) # Save device characters
# Send initialization command to valve: if it acknowledges, then it exists
found_valve = self.initializeValve(valve_ID)
if found_valve:
# Determine valve configuration
valve_config = self.howIsValveConfigured(valve_ID)
if valve_config[1]: # Indicates successful response
self.valve_configs.append(valve_config)
self.max_ports_per_valve.append(self.numPortsPerConfiguration(valve_config))
self.current_port.append(0)
if self.verbose:
print("Found " + valve_config + " device at address " + str(valve_ID))
else:
break
self.num_valves = len(self.valve_configs)
if self.num_valves == 0:
self.valve_names = "0"
print("Error: no valves discovered")
return False # Return failure
# Display found valves
print("Found " + str(self.num_valves) + " Hamilton MVP Valves")
for valve_ID in range(self.num_valves):
print(" " + "Device " + self.valve_names[valve_ID] + " is configured with " + self.valve_configs[valve_ID])
print("Initializing valves...")
# Wait for final device to stop moving
self.waitUntilNotMoving(self.num_valves-1)
return True
else: # Simulation code
for valve_ID in range(self.num_simulated_valves):
self.valve_configs.append(self.howIsValveConfigured(valve_ID))
self.max_ports_per_valve.append(self.numPortsPerConfiguration(self.howIsValveConfigured(valve_ID)))
self.current_port.append(0)
self.num_valves = self.num_simulated_valves
print("Created " + str(self.num_simulated_valves) + " simulated Hamilton MVP valves")
return True
# ------------------------------------------------------------------------------------
# Change Port Position
# ------------------------------------------------------------------------------------
def changePort(self, valve_ID, port_ID, direction = 0, wait_until_done = False):
# Check validity if valve and port IDs
if not self.isValidValve(valve_ID):
return False
if not self.isValidPort(valve_ID, port_ID):
return False
if not self.simulate:
# Compose message and increment port_ID (starts at 1)
message = "LP" + str(direction) + str(port_ID+1) + "R\r"
response = self.inquireAndRespond(valve_ID, message)
if response[0] == "Negative Acknowledge":
print("Move failed: " + str(response))
if response[1]: #Acknowledged move
self.current_port[valve_ID] = port_ID
if wait_until_done:
self.waitUntilNotMoving()
return response[1]
else: ## simulation code
self.current_port[valve_ID] = port_ID
return True
# ------------------------------------------------------------------------------------
# Close Serial Port
# ------------------------------------------------------------------------------------
def close(self):
if not self.simulate:
self.serial.close()
if self.verbose: print("Closed hamilton valves")
else: ## simulation code
if self.verbose: print("Closed simulated hamilton valves")
# ------------------------------------------------------------------------------------
# Initialize Port Position of Given Valve
# ------------------------------------------------------------------------------------
def initializeValve(self, valve_ID):
if not self.simulate:
response = self.inquireAndRespond(valve_ID,
message ="LXR\r",
dictionary = {},
default = "")
if self.verbose:
if response[1]: print("Initialized Valve: " + str(valve_ID+1))
else: print("Did not find valve: " + str(valve_ID+1))
return response[1]
else:
return True
# ------------------------------------------------------------------------------------
# Basic I/O with Serial Port
# This function returns a response tuple used by this class
# (dictionary entry, affirmative response?, raw response string)
# ------------------------------------------------------------------------------------
def inquireAndRespond(self, valve_ID, message, dictionary = {}, default = "Unknown"):
# Check if the valve_ID valve is initialized
if not self.isValidValve(valve_ID):
return ("", False, "")
# Prepend address of provided valve (0=a, 1=b, ...)
message = self.valve_names[valve_ID] + message
# Write message and read response
self.write(message)
response = self.read()
# Parse response into sent message and response
repeated_message = response[:(response.find(self.carriage_return)-1)]
actual_response = response[(response.find(self.carriage_return)-1):
(response.rfind(self.carriage_return))]
#actual_response = actual_response # remove carriage returns
# Check for negative acknowledge
if actual_response == self.negative_acknowledge:
return ("Negative Acknowledge", False, response)
# Check for acknowledge
if actual_response == self.acknowledge:
return ("Acknowledge", True, response)
# Parse provided dictionary with response
return_value = dictionary.get(actual_response, default)
if return_value == default:
return (default, False, response)
else:
return (return_value, True, response)
# ------------------------------------------------------------------------------------
# Generate Default Port Names
# ------------------------------------------------------------------------------------
def getDefaultPortNames(self, valve_ID):
if not self.isValidValve(valve_ID):
return ("")
default_names = []
for port_ID in range(self.max_ports_per_valve[valve_ID]):
default_names.append("Port " + str(port_ID+1))
return default_names
# ------------------------------------------------------------------------------------
# Generate Rotation Direction Labels
# ------------------------------------------------------------------------------------
def getRotationDirections(self, valve_ID):
if not self.isValidValve(valve_ID):
return ("")
return ("Clockwise", "Counter Clockwise")
# ------------------------------------------------------------------------------------
# Get Valve Status
# ------------------------------------------------------------------------------------
def getStatus(self, valve_ID):
return (self.whereIsValve(valve_ID), not self.isMovementFinished(valve_ID))
# ------------------------------------------------------------------------------------
# Poll Valve Configuration
# ------------------------------------------------------------------------------------
def howIsValveConfigured(self, valve_ID):
if not self.simulate:
response = self.inquireAndRespond(valve_ID,
message ="LQT\r",
dictionary = {"2": "8 ports",
"3": "6 ports",
"4": "3 ports",
"5": "2 ports @180",
"6": "2 ports @90",
"7": "4 ports"},
default = "Unknown response")
return response[0]
else: ## simulation code
return "8 ports"
# ------------------------------------------------------------------------------------
# Determine number of active valves
# ------------------------------------------------------------------------------------
def howManyValves(self):
return self.num_valves
# ------------------------------------------------------------------------------------
# Poll Movement of Valve
# ------------------------------------------------------------------------------------
def isMovementFinished(self, valve_ID):
if not self.simulate:
response = self.inquireAndRespond(valve_ID,
message ="F\r",
dictionary = {"*": False,
"N": False,
"Y": True},
default = "Unknown response")
return response[0]
else: ## simulation code
return ("Y", True, "Simulation")
# ------------------------------------------------------------------------------------
# Poll Overload Status of Valve
# ------------------------------------------------------------------------------------
def isValveOverloaded(self, valve_ID):
if not self.simulate:
return self.inquireAndRespond(valve_ID,
message ="G\r",
dictionary = {"*": False,
"N": False,
"Y": True},
default = "Unknown response")
else: ## simulation code
return ("N", False, "Simulation")
# ------------------------------------------------------------------------------------
# Check if Port is Valid
# ------------------------------------------------------------------------------------
def isValidPort(self, valve_ID, port_ID):
if not self.isValidValve(valve_ID):
return False
elif not (port_ID < self.max_ports_per_valve[valve_ID]):
if self.verbose:
print(str(port_ID) + " is not a valid port on valve " + str(valve_ID))
return False
else:
return True
# ------------------------------------------------------------------------------------
# Check if Valve is Valid
# ------------------------------------------------------------------------------------
def isValidValve(self, valve_ID):
if not (valve_ID < self.max_valves):
if self.verbose:
print(str(valve_ID) + " is not a valid valve")
return False
else:
return True
# ------------------------------------------------------------------------------------
# Convert Port Configuration String to Number of Ports
# ------------------------------------------------------------------------------------
def numPortsPerConfiguration(self, configuration_string):
return {"8 ports": 8,
"6 ports": 6,
"3 ports": 3,
"2 ports @180": 2,
"2 ports @90": 2,
"4 ports": 4}.get(configuration_string, 0)
# ------------------------------------------------------------------------------------
# Read from Serial Port
# ------------------------------------------------------------------------------------
def read(self):
response = self.serial.read(self.read_length).decode()
if self.verbose:
print("Received: " + str((response, "")))
return response
# ------------------------------------------------------------------------------------
# Reset Chain: Readdress and redetect valves
# ------------------------------------------------------------------------------------
def resetChain(self):
# Reset device configuration
self.valve_names = []
self.num_valves = 0
self.valve_configs = []
self.max_ports_per_valve = []
# Configure Device
self.autoAddress()
self.autoDetectValves()
# ------------------------------------------------------------------------------------
# Halt Hamilton Class Until Movement is Finished
# ------------------------------------------------------------------------------------
def waitUntilNotMoving(self, valve_ID, pause_time = 1):
doneMoving = False
while not doneMoving:
doneMoving = self.isMovementFinished(valve_ID)
time.sleep(pause_time)
# ------------------------------------------------------------------------------------
# Poll Valve Configuration
# ------------------------------------------------------------------------------------
def whatIsValveConfiguration(self, valve_ID):
if not self.isValidValve(valve_ID):
return ""
else:
return self.valve_configs[valve_ID]
# ------------------------------------------------------------------------------------
# Poll Valve Location
# ------------------------------------------------------------------------------------
def whereIsValve(self, valve_ID):
if not self.simulate:
response = self.inquireAndRespond(valve_ID,
message ="LQP\r",
dictionary = {"1": "Port 1",
"2": "Port 2",
"3": "Port 3",
"4": "Port 4",
"5": "Port 5",
"6": "Port 6",
"7": "Port 7",
"8": "Port 8"},
default = "Unknown Port")
return response[0]
else: ## simulation code
return {"1": "Port 1",
"2": "Port 2",
"3": "Port 3",
"4": "Port 4",
"5": "Port 5",
"6": "Port 6",
"7": "Port 7",
"8": "Port 8"}.get(str(self.current_port[valve_ID]+1))
# ------------------------------------------------------------------------------------
# Write to Serial Port
# ------------------------------------------------------------------------------------
def write(self, message):
self.serial.write(message.encode())
if self.verbose:
print("Wrote: " + message[:-1]) # Display all but final carriage return
# ----------------------------------------------------------------------------------------
# Test/Demo of Classs
# ----------------------------------------------------------------------------------------
if (__name__ == '__main__'):
hamilton = HamiltonMVP(verbose = True)
for valve_ID in range(hamilton.howManyValves()):
text = "Valve " + str(valve_ID+1)
text = " is configured with " + hamilton.howIsValveConfigured(valve_ID)
hamilton.close()
#
# The MIT License
#
# Copyright (c) 2013 Zhuang Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
| 21,387
| 45.495652
| 125
|
py
|
OPM
|
OPM-master/napari-control/src/hardware/AbstractValve.py
|
#!/usr/bin/python
'''
Abstract class for controlling a hardware valve. Function names were derived from original valveChain and hamilton classes.
George Emanuel
2/16/2018
'''
from abc import ABC, abstractmethod
class AbstractValve(ABC):
@abstractmethod
def changePort(self, valve_ID, port_ID, direction = 0):
pass
@abstractmethod
def howManyValves(self):
pass
@abstractmethod
def close(self):
pass
@abstractmethod
def getDefaultPortNames(self, valve_ID):
pass
@abstractmethod
def howIsValveConfigured(self, valve_ID):
pass
@abstractmethod
def getStatus(self, valve_ID):
pass
@abstractmethod
def resetChain(self):
pass
@abstractmethod
def getRotationDirections(self, valve_ID):
pass
| 820
| 18.093023
| 123
|
py
|
OPM
|
OPM-master/napari-control/src/hardware/ASI.py
|
#!/usr/bin/python
'''
----------------------------------------------------------------------------------------
OPM ASI Tiger functions
----------------------------------------------------------------------------------------
Douglas Shepherd
12/11/2021
douglas.shepherd@asu.edu
----------------------------------------------------------------------------------------
'''
# ----------------------------------------------------------------------------------------
# Import
# ----------------------------------------------------------------------------------------
from pymmcore_plus import RemoteMMCore
import time
def check_if_busy(mmcore_stage):
'''
Check if ASI Tiger controller is busy executing a command
:param mmcore_stage: RemoteMMCore
handle to existing RemoteMMCore
:return None:
'''
# turn on 'transmit repeated commands' for Tiger
mmcore_stage.setProperty('TigerCommHub','OnlySendSerialCommandOnChange','No')
# check to make sure Tiger is not busy
ready='B'
while(ready!='N'):
command = 'STATUS'
mmcore_stage.setProperty('TigerCommHub','SerialCommand',command)
ready = mmcore_stage.getProperty('TigerCommHub','SerialResponse')
time.sleep(.010)
# turn off 'transmit repeated commands' for Tiger
mmcore_stage.setProperty('TigerCommHub','OnlySendSerialCommandOnChange','Yes')
def set_joystick_mode(mmcore_stage,x_stage_name,z_stage_name,joystick_mode):
'''
Turn ASI Tiger joystick input on or off
:param mmcore_stage: RemoteMMCore
handle to existing RemoteMMCore
:param x_stage_name: str
name of xy stage in MM config
:param z_stage_name: str
name of z stage in MM config
:param joystick_mode: bool
joystick input state
:return None:
'''
if joystick_mode:
mmcore_stage.setProperty(x_stage_name,'JoystickEnabled','Yes')
mmcore_stage.setProperty(z_stage_name,'JoystickInput','22 - right wheel')
else:
mmcore_stage.setProperty(x_stage_name,'JoystickEnabled','No')
mmcore_stage.setProperty(z_stage_name,'JoystickInput','0 - none')
def set_axis_speed(mmcore_stage,axis,axis_speed):
'''
Change ASI Tiger X/Y axis movement speed
:param mmcore_stage: RemoteMMCore
handle to existing RemoteMMCore
:param axis: str
name of axis ('X' or 'Y')
:param axis_speed: float
speed in mm/s
:return None:
'''
if axis == 'X':
command = 'SPEED X='+str(axis_speed)
mmcore_stage.setProperty('TigerCommHub','SerialCommand',command)
elif axis == 'Y':
command = 'SPEED Y='+str(axis_speed)
mmcore_stage.setProperty('TigerCommHub','SerialCommand',command)
def set_xy_position(mmcore_stage,stage_x,stage_y):
'''
Set ASI Tiger XY stage position
:param mmcore_stage: RemoteMMCore
handle to existing RemoteMMCore
:param stage_x_um: float
x axis position in um
:param stage_y_um: float
x axis position in um
:return None:
'''
mmcore_stage.setXYPosition(stage_x,stage_y)
mmcore_stage.waitForDevice(mmcore_stage.getXYStageDevice())
def set_z_position(mmcore_stage,stage_z):
'''
Set ASI Tiger Z stage position
:param mmcore_stage: RemoteMMCore
handle to existing RemoteMMCore
:param stage_z_um: float
z axis position in um
:return None:
'''
mmcore_stage.setZPosition(stage_z)
mmcore_stage.waitForDevice(mmcore_stage.getFocusDevice())
def set_1d_stage_scan(mmcore_stage):
'''
Setup ASI Tiger for constant speed stage scan on X axis
:param mmcore_stage: RemoteMMCore
handle to existing RemoteMMCore
:return None:
'''
command = '1SCAN X? Y=0 Z=9 F=0'
mmcore_stage.setProperty('TigerCommHub','SerialCommand',command)
def set_1d_stage_scan_area(mmcore_stage,scan_axis_start_mm,scan_axis_end_mm):
'''
Setup ASI Tiger limits for X axis constant speed scan
:param mmcore_stage: RemoteMMCore
handle to existing RemoteMMCore
:param scan_axis_start_mm: float
z axis position in mm
:param scan_axis_end_mm: float
z axis position in mm
:return None:
'''
scan_axis_start_mm = scan_axis_start_mm
scan_axis_end_mm = scan_axis_end_mm
command = '1SCANR X='+str(scan_axis_start_mm)+' Y='+str(scan_axis_end_mm)+' R=10'
mmcore_stage.setProperty('TigerCommHub','SerialCommand',command)
def setup_start_trigger_output(mmcore_stage):
'''
Setup ASI Tiger trigger ouput on PLC add-on card
:param mmcore_stage: RemoteMMCore
handle to existing RemoteMMCore
:return None:
'''
plcName = 'PLogic:E:36'
propPosition = 'PointerPosition'
propCellConfig = 'EditCellConfig'
addrOutputBNC1 = 33 # BNC1 on the PLC front panel
addrStageSync = 46 # TTL5 on Tiger backplane = stage sync signal
# connect stage sync signal to BNC output
mmcore_stage.setProperty(plcName, propPosition, addrOutputBNC1)
mmcore_stage.setProperty(plcName, propCellConfig, addrStageSync)
def start_1d_stage_scan(mmcore_stage):
'''
Send ASI Tiger "start" command for constant speed stage scan.
Should be called after acquisition sequence is started.
:param mmcore_stage: RemoteMMCore
handle to existing RemoteMMCore
:return None:
'''
command='1SCAN'
mmcore_stage.setProperty('TigerCommHub','SerialCommand',command)
def get_xyz_position(mmcore_stage):
'''
Get ASI Tiger stage position
:param mmcore_stage: RemoteMMCore
handle to existing RemoteMMCore
:return stage_x_um: float
x stage position in micron
:return stage_y_um: float
y stage position in micron
:return stage_z_um: float
z stage position in micron
'''
xy_pos = mmcore_stage.getXYPosition()
stage_x_um = xy_pos[0]
stage_y_um = xy_pos[1]
stage_z_um = mmcore_stage.getPosition()
return stage_x_um,stage_y_um,stage_z_um
| 5,987
| 30.851064
| 90
|
py
|
OPM
|
OPM-master/napari-control/src/hardware/APump.py
|
#!/usr/bin/python
# ----------------------------------------------------------------------------------------
# The basic I/O class for a Gibson peristaltic pump
# ----------------------------------------------------------------------------------------
# George Emanuel with modifications by Jeff Moffitt
# 11/16/15
# jeffmoffitt@gmail.com
# ----------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------
# Import
# ----------------------------------------------------------------------------------------
import serial
import time
acknowledge = '\x06'
start = '\x0A'
stop = '\x0D'
# ----------------------------------------------------------------------------------------
# GlisonMP3 Class Definition
# ----------------------------------------------------------------------------------------
class APump():
def __init__(self,
parameters = False):
# Define attributes
self.com_port = parameters.get("pump_com_port", "COM3")
self.pump_ID = parameters.get("pump_ID", 30)
self.verbose = parameters.get("verbose", True)
self.simulate = parameters.get("simulate_pump", True)
self.serial_verbose = parameters.get("serial_verbose", False)
self.flip_flow_direction = parameters.get("flip_flow_direction", False)
# Create serial port
self.serial = serial.Serial(port = self.com_port,
baudrate = 19200,
parity= serial.PARITY_EVEN,
bytesize=serial.EIGHTBITS,
stopbits=serial.STOPBITS_TWO,
timeout=0.1)
# Define initial pump status
self.flow_status = "Stopped"
self.speed = 0.0
self.direction = "Forward"
self.disconnect()
self.enableRemoteControl(1)
self.startFlow(self.speed, self.direction)
self.identification = self.getIdentification()
def getIdentification(self):
return self.sendImmediate(self.pump_ID, "%")
def enableRemoteControl(self, remote):
if remote:
self.sendBuffered(self.pump_ID, "SR")
else:
self.sendBuffered(self.pump_ID, "SK")
def readDisplay(self):
return self.sendImmediate(self.pump_ID, "R")
def getStatus(self):
message = self.readDisplay()
if self.flip_flow_direction:
direction = {" ": "Not Running", "-": "Forward", "+": "Reverse"}.\
get(message[0], "Unknown")
else:
direction = {" ": "Not Running", "+": "Forward", "-": "Reverse"}.\
get(message[0], "Unknown")
status = "Stopped" if direction == "Not Running" else "Flowing"
control = {"K": "Keypad", "R": "Remote"}.get(message[-1], "Unknown")
auto_start = "Disabled"
speed = float(message[1:len(message) - 1])
return (status, speed, direction, control, auto_start, "No Error")
def close(self):
self.enableRemoteControl(0)
def setFlowDirection(self, forward):
if self.flip_flow_direction:
if forward:
self.sendBuffered(self.pump_ID, "K<")
else:
self.sendBuffered(self.pump_ID, "K>")
else:
if forward:
self.sendBuffered(self.pump_ID, "K>")
else:
self.sendBuffered(self.pump_ID, "K<")
def setSpeed(self, rotation_speed):
if rotation_speed >= 0 and rotation_speed <= 48:
rotation_int = int(rotation_speed*100)
self.sendBuffered(self.pump_ID, "R" + ("%04d" % rotation_int))
def startFlow(self, speed, direction = "Forward"):
self.setSpeed(speed)
self.setFlowDirection(direction == "Forward")
def stopFlow(self):
self.setSpeed(0.0)
return True
def sendImmediate(self, unitNumber, command):
self.selectUnit(unitNumber)
self.sendString(command[0])
newCharacter = self.getResponse()
response = ""
while not (ord(newCharacter) & 0x80):
response += newCharacter.decode()
self.sendString(acknowledge)
newCharacter = self.getResponse()
response += chr(ord(newCharacter) & ~0x80)
self.disconnect()
return response
def sendBuffered(self, unitNumber, command):
self.selectUnit(unitNumber)
self.sendAndAcknowledge(start + command + stop)
self.disconnect()
def disconnect(self):
self.sendAndAcknowledge('\xff')
def selectUnit(self, unitNumber):
devSelect = chr(0x80 | unitNumber)
self.sendString(devSelect)
return self.getResponse() == devSelect
def sendAndAcknowledge(self, string):
for i in range(0, len(string)):
self.sendString(string[i])
self.getResponse()
def sendString(self, string):
self.serial.write(string.encode())
def getResponse(self):
return self.serial.read()
#return self.serial.read().decode()
| 5,237
| 33.460526
| 90
|
py
|
OPM
|
OPM-master/napari-control/src/hardware/OPMNIDAQ.py
|
#!/usr/bin/python
'''
----------------------------------------------------------------------------------------
Basic class to run NIDAQ with camera as master for OPM using PyDAQMx
----------------------------------------------------------------------------------------
Peter Brown
Franky Djutanta
Douglas Shepherd
12/11/2021
douglas.shepherd@asu.edu
----------------------------------------------------------------------------------------
'''
# ----------------------------------------------------------------------------------------
# Import
# ----------------------------------------------------------------------------------------
import PyDAQmx as daq
import ctypes as ct
import numpy as np
class OPMNIDAQ:
def __init__(self,scan_mirror_neutral=-0.15,scan_mirror_calibration=0.043):
self.scan_type = 'mirror'
self.interleave_lasers = True
self.do_ind = [0,1,2,3,4]
self.active_channels_indices = None
self.n_active_channels = 0
self.DAQ_sample_rate_Hz = 10000
self.num_DI_channels = 8
self.dataDO = None
self.waveform = None
self.channelAO = "/Dev1/ao0"
self.min_AO_voltage = -7.0
self.max_AO_voltage = 7.0
self.channelDO = "/Dev1/port0/line0:7"
self.channelDI_trigger_from_camera = "/Dev1/PFI0"
self.channelDI_start_trigger = "/Dev1/PFI1"
self.channelDI_change_trigger = "/Dev1/PFI2"
self.scan_mirror_neutral = scan_mirror_neutral
self.scan_mirror_calibration = scan_mirror_calibration
def set_scan_type(self,scan_type):
self.scan_type = scan_type
def reset_scan_mirror(self):
self.taskAO = daq.Task()
self.taskAO.CreateAOVoltageChan("/Dev1/ao0","",-6.0,6.0,daq.DAQmx_Val_Volts,None)
self.taskAO.WriteAnalogScalarF64(True, -1, self.scan_mirror_neutral, None)
self.taskAO.StopTask()
self.taskAO.ClearTask()
def set_scan_mirror_range(self,scan_mirrror_step_size_um,scan_mirror_sweep_um):
# determine sweep footprint
self.min_volt = -(scan_mirror_sweep_um * self.scan_mirror_calibration / 2.) + self.scan_mirror_neutral # unit: volts
self.scan_axis_step_volts = scan_mirrror_step_size_um * self.scan_mirror_calibration # unit: V
self.scan_axis_range_volts = scan_mirror_sweep_um * self.scan_mirror_calibration # unit: V
self.scan_steps = np.rint(self.scan_axis_range_volts / self.scan_axis_step_volts).astype(np.int16) # galvo steps
return self.scan_steps
def set_interleave_mode(self,interleave_lasers):
self.interleave_lasers = interleave_lasers
def set_channels_to_use(self,channel_states):
self.active_channel_indices = [ind for ind, st in zip(self.do_ind, channel_states) if st]
self.n_active_channels = len(self.active_channel_indices)
def generate_waveforms(self):
if self.scan_type == 'mirror':
# setup DAQ
nvoltage_steps = self.scan_steps
# 2 time steps per frame, except for first frame plus one final frame to reset voltage
#samples_per_ch = (nvoltage_steps * 2 - 1) + 1
self.samples_per_ch = (nvoltage_steps * 2 * self.n_active_channels - 1) + 1
# Generate values for DO
dataDO = np.zeros((self.samples_per_ch, self.num_DI_channels), dtype=np.uint8)
for ii, ind in enumerate(self.active_channel_indices):
dataDO[2*ii::2*self.n_active_channels, ind] = 1
dataDO[-1, :] = 0
# generate voltage steps
max_volt = self.min_volt + self.scan_axis_range_volts # 2
voltage_values = np.linspace(self.min_volt, max_volt, nvoltage_steps)
# Generate values for AO
waveform = np.zeros(self.samples_per_ch)
# one less voltage value for first frame
waveform[0:2*self.n_active_channels - 1] = voltage_values[0]
if len(voltage_values) > 1:
# (2 * # active channels) voltage values for all other frames
waveform[2*self.n_active_channels - 1:-1] = np.kron(voltage_values[1:], np.ones(2 * self.n_active_channels))
# set back to initial value at end
waveform[-1] = voltage_values[0]
self.dataDO = dataDO
self.waveform = waveform
elif self.scan_type == 'stage':
# setup digital trigger buffer on DAQ
self.samples_per_ch = 2 * int(self.n_active_channels)
# create DAQ pattern for laser strobing controlled via rolling shutter
dataDO = np.zeros((self.samples_per_ch, self.num_DI_channels), dtype=np.uint8)
for ii, ind in enumerate(self.active_channel_indices):
dataDO[2*ii::2*int(self.n_active_channels), int(ind)] = 1
self.dataDO = dataDO
self.waveform = None
def start_waveform_playback(self):
try:
self.taskDI = daq.Task()
self.taskDI.CreateDIChan("/Dev1/PFI0", "", daq.DAQmx_Val_ChanForAllLines)
## Configure change detectin timing (from wave generator)
self.taskDI.CfgInputBuffer(0) # must be enforced for change-detection timing, i.e no buffer
self.taskDI.CfgChangeDetectionTiming("/Dev1/PFI0", "/Dev1/PFI0", daq.DAQmx_Val_ContSamps, 0)
## Set where the starting trigger
self.taskDI.CfgDigEdgeStartTrig("/Dev1/PFI0", daq.DAQmx_Val_Rising)
## Export DI signal to unused PFI pins, for clock and start
self.taskDI.ExportSignal(daq.DAQmx_Val_ChangeDetectionEvent, "/Dev1/PFI2")
self.taskDI.ExportSignal(daq.DAQmx_Val_StartTrigger, "/Dev1/PFI1")
# ----- DIGITAL output ------
self.taskDO = daq.Task()
# TO DO: Write each laser line separately!
self.taskDO.CreateDOChan("/Dev1/port0/line0:7", "", daq.DAQmx_Val_ChanForAllLines)
## Configure timing (from DI task)
self.taskDO.CfgSampClkTiming("/Dev1/PFI2", self.DAQ_sample_rate_Hz, daq.DAQmx_Val_Rising, daq.DAQmx_Val_ContSamps, self.samples_per_ch)
## Configure timing (from DI task)
self.taskDO.CfgSampClkTiming(self.channelDI_change_trigger, self.DAQ_sample_rate_Hz, daq.DAQmx_Val_Rising, daq.DAQmx_Val_ContSamps, self.samples_per_ch)
## Write the output waveform
samples_per_ch_ct_digital = ct.c_int32()
self.taskDO.WriteDigitalLines(self.samples_per_ch, False, 10.0, daq.DAQmx_Val_GroupByChannel, self.dataDO, ct.byref(samples_per_ch_ct_digital), None)
if self.scan_type == 'mirror':
# ------- ANALOG output -----------
# first, set the galvo to the initial point if it is not already
self.taskAO_first = daq.Task()
self.taskAO_first.CreateAOVoltageChan("/Dev1/ao0", "", -6.0, 6.0, daq.DAQmx_Val_Volts, None)
self.taskAO_first.WriteAnalogScalarF64(True, -1, self.waveform[0], None)
self.taskAO_first.StopTask()
self.taskAO_first.ClearTask()
# now set up the task to ramp the galvo
self.taskAO = daq.Task()
self.taskAO.CreateAOVoltageChan("/Dev1/ao0", "", -6.0, 6.0, daq.DAQmx_Val_Volts, None)
## Configure timing (from DI task)
self.taskAO.CfgSampClkTiming("/Dev1/PFI2", self.DAQ_sample_rate_Hz, daq.DAQmx_Val_Rising, daq.DAQmx_Val_ContSamps, self.samples_per_ch)
## Write the output waveform
samples_per_ch_ct = ct.c_int32()
self.taskAO.WriteAnalogF64(self.samples_per_ch, False, 10.0, daq.DAQmx_Val_GroupByScanNumber, self.waveform, ct.byref(samples_per_ch_ct), None)
# start analog tasks
self.taskAO.StartTask()
# start digital tasks
self.taskDO.StartTask()
self.taskDI.StartTask()
except daq.DAQError as err:
print("DAQmx Error %s"%err)
def stop_waveform_playback(self):
try:
self.taskDI.StopTask()
self.taskDO.StopTask()
if self.scan_type == 'mirror':
self.taskAO.StopTask()
self.taskDI.ClearTask()
self.taskDO.ClearTask()
if self.scan_type == 'mirror':
self.taskAO.ClearTask()
except daq.DAQError as err:
print("DAQmx Error %s"%err)
| 8,624
| 43.921875
| 164
|
py
|
OPM
|
OPM-master/napari-control/src/utils/autofocus_remote_unit.py
|
#!/usr/bin/env python
'''
Optimize O2-O3 coupling by capturing images of collimated 532 alignment laser injected into system
using the back of pentaband dichroic with O3 at different positions along the (tilted) optical axis.
Shepherd 11/2022
'''
import numpy as np
from scipy import ndimage
from pymmcore_plus import CMMCorePlus
def calculate_focus_metric(image: np.ndarray):
"""
calculate focus metric
:param image: ndarray
image to test
:return focus_metric: float
focus metric
"""
# calculate focus metric
image[image>2**16-10]=0
image[image<100]=0
kernel = [[0,1,0],[1,1,1],[0,1,0]]
focus_metric = np.max(ndimage.minimum_filter(image,footprint=kernel))
# return focus metric
return focus_metric
def find_best_O3_focus_metric(mmc: CMMCorePlus,shutter_controller,O3_stage_name,verbose=False):
"""
optimize position of O3 with respect to O2 using TTL control of a Thorlabs K101 controller, Thorlabs PIA25 piezo motor, and Thorlabs 1" translation stage.
:param mmc: mmc
PyMMCorePlus mmc object
:param shutter_controller: PicardShutter
Picard shutter controller
:param O3_piezo_stage: str
name of O3 piezo stage in MM config
:param verbose: bool
print information on autofocus
:return found_focus_metric: float
automatically determined focus metric
"""
# grab position and name of current MM focus stage
exp_zstage_pos = np.round(mmc.getPosition(),2)
exp_zstage_name = mmc.getFocusDevice()
if verbose: print(f'Current z-stage: {exp_zstage_name} with position {exp_zstage_pos}')
# set MM focus stage to O3 piezo stage
mmc.setFocusDevice(O3_stage_name)
mmc.waitForDevice(O3_stage_name)
# grab O3 focus stage position
O3_stage_pos_start = np.round(mmc.getPosition(),2)
mmc.waitForDevice(O3_stage_name)
if verbose: print(f'O3 z-stage: {O3_stage_name} with position {O3_stage_pos_start}')
# generate arrays
n_O3_stage_steps=20.
O3_stage_step_size = .25
O3_stage_positions = np.round(np.arange(O3_stage_pos_start-(O3_stage_step_size*np.round(n_O3_stage_steps/2,0)),O3_stage_pos_start+(O3_stage_step_size*np.round(n_O3_stage_steps/2,0)),O3_stage_step_size),2).astype(np.float)
focus_metrics = np.zeros(O3_stage_positions.shape[0])
if verbose: print('Starting rough alignment.')
# open alignment laser shutter
shutter_controller.openShutter()
i = 0
for O3_stage_pos in O3_stage_positions:
mmc.setPosition(O3_stage_pos)
mmc.waitForDevice(O3_stage_name)
mmc.snapImage()
test_image = mmc.getImage()
#test_image = np.reshape(tagged_image.pix,newshape=[tagged_image.tags['Height'], tagged_image.tags['Width']])
focus_metrics[i] = calculate_focus_metric(test_image)
if verbose: print(f'Current position: {O3_stage_pos}; Focus metric: {focus_metrics[i]}')
i = i+1
# find best rough focus position
rough_best_O3_stage_index = np.argmax(focus_metrics)
rough_best_O3_stage_pos=O3_stage_positions[rough_best_O3_stage_index]
if verbose: print(f'Rough align position: {rough_best_O3_stage_pos} vs starting position: {O3_stage_pos_start}')
if np.abs(rough_best_O3_stage_pos-O3_stage_pos_start) < 2.:
mmc.setPosition(rough_best_O3_stage_pos)
mmc.waitForDevice(O3_stage_name)
perform_fine = True
else:
mmc.setPosition(O3_stage_pos_start)
mmc.waitForDevice(O3_stage_name)
if verbose: print('Rough focus failed to find better position.')
best_03_stage_pos = O3_stage_pos_start
perform_fine = False
# generate arrays
del n_O3_stage_steps, O3_stage_step_size, O3_stage_positions, focus_metrics
if perform_fine:
n_O3_stage_steps=10.
O3_stage_step_size = .1
O3_stage_positions = np.round(np.arange(rough_best_O3_stage_pos-(O3_stage_step_size*np.round(n_O3_stage_steps/2,0)),rough_best_O3_stage_pos+(O3_stage_step_size*np.round(n_O3_stage_steps/2,0)),O3_stage_step_size),2).astype(np.float)
focus_metrics = np.zeros(O3_stage_positions.shape[0])
if verbose: print('Starting fine alignment.')
i = 0
for O3_stage_pos in O3_stage_positions:
mmc.setPosition(O3_stage_pos)
mmc.waitForDevice(O3_stage_name)
mmc.snapImage()
test_image = mmc.getImage()
#test_image = np.reshape(tagged_image.pix,newshape=[tagged_image.tags['Height'], tagged_image.tags['Width']])
focus_metrics[i] = calculate_focus_metric(test_image)
if verbose: print(f'Current position: {O3_stage_pos}; Focus metric: {focus_metrics[i]}')
i = i+1
# find best fine focus position
fine_best_O3_stage_index = np.argmax(focus_metrics)
fine_best_O3_stage_pos=O3_stage_positions[fine_best_O3_stage_index]
if verbose: print(f'Fine align position: {fine_best_O3_stage_pos} vs starting position: {rough_best_O3_stage_pos}')
if np.abs(fine_best_O3_stage_pos-rough_best_O3_stage_pos) < .5:
mmc.setPosition(fine_best_O3_stage_pos)
mmc.waitForDevice(O3_stage_name)
best_03_stage_pos = fine_best_O3_stage_pos
else:
mmc.setPosition(rough_best_O3_stage_pos)
mmc.waitForDevice(O3_stage_name)
if verbose: print('Fine focus failed to find better position.')
best_03_stage_pos = O3_stage_pos_start
perform_fine = False
shutter_controller.closeShutter()
# set focus device back to MM experiment focus stage
mmc.setFocusDevice(exp_zstage_name)
mmc.waitForDevice(exp_zstage_name)
#mmc.setPosition(exp_zstage_pos)
#mmc.waitForDevice(exp_zstage_name)
exp_zstage_pos = np.round(mmc.getPosition(),2)
if verbose: print(f'Current z-stage: {exp_zstage_name} with position {exp_zstage_pos}')
return best_03_stage_pos
def manage_O3_focus(mmc: CMMCorePlus,shutter_controller,O3_stage_name,verbose=False):
"""
helper function to manage autofocus of O3 with respect to O2
:param mmc: mmc
PyMMCorePlus mmc object
:param shutter_controller: PicardShutter
Picard shutter controller
:param O3_piezo_stage: str
String for the O3 piezo stage
:param verbose: bool
print information on autofocus
:return updated_O3_stage_position: float
automatically determined focus metric. Defaults to original position if not found
"""
# get exposure for experiment
exposure_experiment_ms = mmc.getExposure()
# set camera to fast readout mode
readout_mode_experiment = mmc.getCurrentConfig('Camera-Setup')
mmc.setConfig('Camera-Setup','ScanMode3')
mmc.waitForConfig('Camera-Setup','ScanMode3')
# set camera to internal control
mmc.setConfig('Camera-TriggerSource','INTERNAL')
mmc.waitForConfig('Camera-TriggerSource','INTERNAL')
# set exposure to 10 ms
mmc.setExposure(5)
updated_O3_stage_position = find_best_O3_focus_metric(mmc,shutter_controller,O3_stage_name,verbose)
# put camera back into operational readout mode
mmc.setConfig('Camera-Setup',readout_mode_experiment)
mmc.waitForConfig('Camera-Setup',readout_mode_experiment)
mmc.setExposure(exposure_experiment_ms)
mmc.snapImage()
return updated_O3_stage_position
| 7,440
| 37.35567
| 239
|
py
|
OPM
|
OPM-master/napari-control/src/utils/opm_psf.py
|
import psfmodels as psfm
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp2d
# ROI tools
def get_skewed_roi_size(sizes, theta, dc, dstep, ensure_odd=True):
"""
Get ROI size in OPM matrix that includes sufficient xy and z points
:param sizes: [z-size, y-size, x-size] in same units as dc, dstep
:param theta: angle in radians
:param dc: camera pixel size
:param dstep: step size
:param bool ensure_odd:
:return [no, n1, n2]: integer size of roi in skewed coordinates
"""
# x-size determines n2 size
n2 = int(np.ceil(sizes[2] / dc))
# z-size determines n1
n1 = int(np.ceil(sizes[0] / dc / np.sin(theta)))
# set so that @ top and bottom z-points, ROI includes the full y-size
n0 = int(np.ceil((0.5 * (n1 + 1)) * dc * np.cos(theta) + sizes[1]) / dstep)
if ensure_odd:
if np.mod(n2, 2) == 0:
n2 += 1
if np.mod(n1, 2) == 0:
n1 += 1
if np.mod(n0, 2) == 0:
n0 += 1
return [n0, n1, n2]
# coordinate transformations between OPM and coverslip frames
def get_skewed_coords(sizes, dc, ds, theta, scan_direction="lateral"):
"""
Get laboratory coordinates (i.e. coverslip coordinates) for a stage-scanning OPM set
:param sizes: (n0, n1, n2)
:param dc: camera pixel size
:param ds: stage step size
:param theta: in radians
:return x, y, z:
"""
nimgs, ny_cam, nx_cam = sizes
if scan_direction == "lateral":
x = dc * np.arange(nx_cam)[None, None, :]
# y = stage_pos[:, None, None] + dc * np.cos(theta) * np.arange(ny_cam)[None, :, None]
y = ds * np.arange(nimgs)[:, None, None] + dc * np.cos(theta) * np.arange(ny_cam)[None, :, None]
z = dc * np.sin(theta) * np.arange(ny_cam)[None, :, None]
elif scan_direction == "axial":
x = dc * np.arange(nx_cam)[None, None, :]
y = dc * np.cos(theta) * np.arange(ny_cam)[None, :, None]
z = ds * np.arange(nimgs)[:, None, None] + dc * np.sin(theta) * np.arange(ny_cam)[None, :, None]
else:
raise ValueError("scan_direction must be `lateral` or `axial` but was `%s`" % scan_direction)
return x, y, z
def create_psf_silicone_100x(dxy, dz, nxy, nz, ex_NA,ex_wvl,em_wvl):
"""
Create OPM PSF in coverslip coordinates
:param dxy: spacing of xy pixels
:param dz: spacing of z planes
:param nxy: number of xy pixels on a side
:param nz: number of z planes
:param ex_NA: excitaton NA
:param ex_wvl: excitation wavelength in microns
:param em_wvl: emission wavelength in microns
:return tot_psf:
"""
silicone_lens = {
'ni0': 1.4, # immersion medium RI design value
'ni': 1.4, # immersion medium RI experimental value
'ns': 1.45, # specimen refractive index
'tg': 170, # microns, coverslip thickness
'tg0': 170 # microns, coverslip thickness design value
}
ex_lens = {**silicone_lens, 'NA': ex_NA}
em_lens = {**silicone_lens, 'NA': 1.35}
# The psf model to use
# can be any of {'vectorial', 'scalar', or 'microscpsf'}
func = 'vectorial'
# the main function
_, _, tot_psf = psfm.tot_psf(nx=nxy, nz=nz, dxy=dxy, dz=dz, pz=15,
x_offset=0, z_offset=0,
ex_wvl = ex_wvl, em_wvl = em_wvl,
ex_params=ex_lens, em_params=em_lens,
psf_func=func)
return tot_psf
def generate_skewed_psf(ex_NA,ex_wvl,em_wvl):
"""
Create OPM PSF in skewed coordinates
:param ex_NA: excitaton NA
:param ex_wvl: excitation wavelength in microns
:param em_wvl: emission wavelength in microns
:return skewed_psf:
"""
dc = 0.115
na = 1.35
ni = 1.4
dstage = 0.4
theta = 30 * np.pi/180
xy_res = 1.6163399561827614 / np.pi * em_wvl / na
z_res = 2.355*(np.sqrt(6) / np.pi * ni * em_wvl / na ** 2)
roi_skewed_size = get_skewed_roi_size([z_res * 5, xy_res * 5, xy_res * 5],
theta, dc, dstage, ensure_odd=True)
# make square
roi_skewed_size[2]= roi_skewed_size[1]
# get tilted coordinates
x, y, z = get_skewed_coords(roi_skewed_size, dc, dstage, theta)
dx = x[0, 0, 1] - x[0, 0, 0]
dy = y[0, 1, 0] - y[0, 0, 0]
dz = z[0, 1, 0] - z[0, 0, 0]
z -= z.mean()
x -= x.mean()
y -= y.mean()
# get on grid of coordinates
dxy = 0.5 * np.min([dx, dy])
dz = 0.5 * dz
nxy = np.max([int(2 * ((x.max() - x.min()) // dxy) + 1),
int(2 * ((y.max() - y.min()) // dxy) + 1)])
nz = z.size
xg = np.arange(nxy) * dxy
xg -= xg.mean()
yg = np.arange(nxy) * dxy
yg -= yg.mean()
psf_grid = create_psf_silicone_100x(dxy, dz, nxy, nz, na,ex_wvl,em_wvl)
psf_grid = psf_grid / np.max(psf_grid[nz//2])
# get value from interpolation
skewed_psf = np.zeros(roi_skewed_size)
for ii in range(nz):
skewed_psf[:, ii, :] = interp2d(xg, yg, psf_grid[ii], kind="linear")(x.ravel(), y[:, ii].ravel())
return skewed_psf
| 5,177
| 32.623377
| 105
|
py
|
OPM
|
OPM-master/napari-control/src/utils/image_post_processing.py
|
'''
QI2lab OPM suite
Reconstruction tools
Image processing tools for OPM reconstruction
Last updated: Shepherd 01/22 - changes to include map_blocks based dexp deconvolution and recent other changes.
'''
#!/usr/bin/env python
import sys
import numpy as np
from pathlib import Path
#from tifffile import tifffile
from numba import njit, prange
#from flat_field import calc_flatfield
from functools import partial
import dask.array as da
from dask.diagnostics import ProgressBar
import gc
try:
import cupy as cp
CP_AVAILABLE = True
except:
CP_AVAILABLE = False
if CP_AVAILABLE:
try:
import microvolution_py as mv_decon
DECON_LIBRARY = 'mv'
except:
try:
from clij2fft.richardson_lucy import richardson_lucy_nc
from clij2fft.richardson_lucy import getlib
DECON_LIBRARY = 'clij'
except:
DECON_LIBRARY = None
else:
DECON_LIBRARY = None
# http://numba.pydata.org/numba-doc/latest/user/parallel.html#numba-parallel
@njit(parallel=True)
def deskew(data,theta,distance,pixel_size):
"""
Perform parallelized orthogonal interpolation into a uniform pixel size grid.
:param data: ndarray
image stack of uniformly spaced OPM planes
:param theta: float
angle relative to coverslip
:param distance: float
step between image planes along coverslip
:param pizel_size: float
in-plane camera pixel size in OPM coordinates
:return output: ndarray
image stack of deskewed OPM planes on uniform grid
"""
# unwrap parameters
[num_images,ny,nx]=data.shape # (pixels)
# change step size from physical space (nm) to camera space (pixels)
pixel_step = distance/pixel_size # (pixels)
# calculate the number of pixels scanned during stage scan
scan_end = num_images * pixel_step # (pixels)
# calculate properties for final image
final_ny = np.int64(np.ceil(scan_end+ny*np.cos(theta*np.pi/180))) # (pixels)
final_nz = np.int64(np.ceil(ny*np.sin(theta*np.pi/180))) # (pixels)
final_nx = np.int64(nx) # (pixels)
# create final image
output = np.zeros((final_nz, final_ny, final_nx),dtype=np.float32) # (time, pixels,pixels,pixels - data is float32)
# precalculate trig functions for scan angle
tantheta = np.float32(np.tan(theta * np.pi/180)) # (float32)
sintheta = np.float32(np.sin(theta * np.pi/180)) # (float32)
costheta = np.float32(np.cos(theta * np.pi/180)) # (float32)
# perform orthogonal interpolation
# loop through output z planes
# defined as parallel loop in numba
# http://numba.pydata.org/numba-doc/latest/user/parallel.html#numba-parallel
for z in prange(0,final_nz):
# calculate range of output y pixels to populate
y_range_min=np.minimum(0,np.int64(np.floor(np.float32(z)/tantheta)))
y_range_max=np.maximum(final_ny,np.int64(np.ceil(scan_end+np.float32(z)/tantheta+1)))
# loop through final y pixels
# defined as parallel loop in numba
# http://numba.pydata.org/numba-doc/latest/user/parallel.html#numba-parallel
for y in prange(y_range_min,y_range_max):
# find the virtual tilted plane that intersects the interpolated plane
virtual_plane = y - z/tantheta
# find raw data planes that surround the virtual plane
plane_before = np.int64(np.floor(virtual_plane/pixel_step))
plane_after = np.int64(plane_before+1)
# continue if raw data planes are within the data range
if ((plane_before>=0) and (plane_after<num_images)):
# find distance of a point on the interpolated plane to plane_before and plane_after
l_before = virtual_plane - plane_before * pixel_step
l_after = pixel_step - l_before
# determine location of a point along the interpolated plane
za = z/sintheta
virtual_pos_before = za + l_before*costheta
virtual_pos_after = za - l_after*costheta
# determine nearest data points to interpoloated point in raw data
pos_before = np.int64(np.floor(virtual_pos_before))
pos_after = np.int64(np.floor(virtual_pos_after))
# continue if within data bounds
if ((pos_before>=0) and (pos_after >= 0) and (pos_before<ny-1) and (pos_after<ny-1)):
# determine points surrounding interpolated point on the virtual plane
dz_before = virtual_pos_before - pos_before
dz_after = virtual_pos_after - pos_after
# compute final image plane using orthogonal interpolation
output[z,y,:] = (l_before * dz_after * data[plane_after,pos_after+1,:] +
l_before * (1-dz_after) * data[plane_after,pos_after,:] +
l_after * dz_before * data[plane_before,pos_before+1,:] +
l_after * (1-dz_before) * data[plane_before,pos_before,:]) /pixel_step
# return output
return output
def manage_flat_field_py(stack):
"""
Manage performing flat and dark-field using python adapation of BaSiC algorithm.
Returns flat- and darkfield corrected image
:param stack: ndarray
matrix of OPM planes
:return corrected_stack: ndarray of deskewed OPM planes on uniform grid
"""
num_images = 500
if stack.shape[0] > num_images:
stack_for_flat_field = stack[np.random.choice(stack.shape[0], num_images, replace=False)]
else:
stack_for_flat_field = stack
flat_field, dark_field = calc_flatfield(images=stack_for_flat_field)
corrected_stack = perform_flat_field(flat_field,dark_field,stack)
return corrected_stack, flat_field, dark_field
def perform_flat_field(flat_field,dark_field,stack):
"""
Calculate flat- and darkfield corrected image. Returns corrected image.
:param flat_field: ndarray
flatfield correction
:param dark_field: ndarray
darkfield correction
:param stack: dask.array
matrix of OPM planes
:return corrected_stack: ndarray
corrected OPM image planes
"""
#dark_field[dark_field>50]=50
#corrected_stack = stack.astype(np.float32) - dark_field
stack[stack<0] = 0
corrected_stack = stack/flat_field
return corrected_stack
def lr_deconvolution(image,psf,iterations=5):
"""
Tiled Lucy-Richardson deconvolution using DECON_LIBRARY
:param image: ndarray
raw data
:param psf: ndarray
theoretical PSF
:param iterations: int
number of iterations to run
:return deconvolved: ndarray
deconvolved image
"""
# create dask array and apodization window
scan_chunk_size = 128
if image.shape[0]<scan_chunk_size:
dask_raw = da.from_array(image,chunks=(image.shape[0],image.shape[1],image.shape[2]))
overlap_depth = (0,0,0)
else:
dask_raw = da.from_array(image,chunks=(scan_chunk_size,image.shape[1],image.shape[2]))
overlap_depth = (psf.shape[0],0,0)
del image
gc.collect()
if DECON_LIBRARY=='mv':
lr_dask = partial(mv_lr_decon,psf=psf,num_iterations=iterations)
elif DECON_LIBRARY=='clij':
lib = getlib()
lr_dask = partial(clij_lr,psf=psf,num_iters=iterations,tau=0.001,lib=lib)
# create dask plan for overlapped blocks
dask_decon = da.map_overlap(lr_dask,dask_raw,depth=overlap_depth,boundary='reflect',trim=True,meta=np.array((), dtype=np.uint16))
# perform LR deconvolution in blocks
with ProgressBar():
decon_data = dask_decon.compute(scheduler='single-threaded')
# clean up memory
del dask_decon, lib
gc.collect()
cp.clear_memo()
cp._default_memory_pool.free_all_blocks()
return decon_data.astype(np.uint16)
def mv_lr_decon(image,psf,num_iterations):
'''
Lucy-Richardson deconvolution using commerical Microvolution library.
:param image: ndarray
raw image
:param ch_idx: int
wavelength index
:return image: ndarray
deconvolved image
'''
params = mv_decon.DeconParameters()
params.generatePsf = False
params.nx = image.shape[2]
params.ny = image.shape[1]
params.nz = image.shape[0]
params.blind = False
params.psfNx = psf.shape[2]
params.psfNy = psf.shape[1]
params.psfNz = psf.shape[0]
params.dr = 115.0
params.dz = 400.0
params.psfDr = 115.0
params.psfDz = 400.0
params.iterations = num_iterations
params.background = 100
params.regularizationType=mv_decon.RegularizationType_TV
params.scaling = mv_decon.Scaling_U16
try:
launcher = mv_decon.DeconvolutionLauncher()
image = image.astype(np.float16)
launcher.SetParameters(params)
for z in range(params.nz):
launcher.SetImageSlice(z, image[z,:])
psf_image = psf.astype(np.float16)
for z in range(params.psfNz):
launcher.SetPsfSlice(z,psf_image[z,:])
new_image = np.zeros(image.shape,dtype=np.uint16)
del image
launcher.Run()
for z in range(params.nz):
launcher.RetrieveImageSlice(z, new_image[z,:])
except:
err = sys.exc_info()
print("Unexpected error:", err[0])
print(err[1])
print(err[2])
new_image = np.zeros(image.shape,dtype=np.uint16)
return new_image.astype(np.uint16)
def clij_lr(image,psf,num_iters,tau,lib):
result = richardson_lucy_nc(image.astype(np.float32),psf.astype(np.float32),num_iters,tau,lib)
return result.astype(np.uint16)
| 9,889
| 32.299663
| 133
|
py
|
OPM
|
OPM-master/napari-control/src/utils/data_io.py
|
#!/usr/bin/python
'''
----------------------------------------------------------------------------------------
OPM I/O functions
----------------------------------------------------------------------------------------
Peter Brown
Douglas Shepherd
12/11/2021
douglas.shepherd@asu.edu
----------------------------------------------------------------------------------------
'''
import re
from npy2bdv import BdvEditor
import pandas as pd
import numpy as np
from pathlib import Path
import tifffile
def read_metadata(fname):
"""
Read data from csv file consisting of one line giving titles, and the other giving values. Return as dictionary
:param fname:
:return metadata:
"""
scan_data_raw_lines = []
with open(fname, "r") as f:
for line in f:
scan_data_raw_lines.append(line.replace("\n", ""))
titles = scan_data_raw_lines[0].split(",")
# convert values to appropriate datatypes
vals = scan_data_raw_lines[1].split(",")
for ii in range(len(vals)):
if re.fullmatch("\d+", vals[ii]):
vals[ii] = int(vals[ii])
elif re.fullmatch("\d*.\d+", vals[ii]):
vals[ii] = float(vals[ii])
elif vals[ii].lower() == "False".lower():
vals[ii] = False
elif vals[ii].lower() == "True".lower():
vals[ii] = True
else:
# otherwise, leave as string
pass
# convert to dictionary
metadata = {}
for t, v in zip(titles, vals):
metadata[t] = v
return metadata
def read_config_file(config_path):
"""
Read data from csv file consisting of one line giving titles, and the other giving values. Return as dictionary
:param config_path: Path
Location of configuration file
:return dict_from_csv: dict
instrument configuration metadata
"""
dict_from_csv = pd.read_csv(config_path, header=None, index_col=0, squeeze=True).to_dict()
return dict_from_csv
def read_fluidics_program(program_path):
"""
Read fluidics program from CSV file as pandas dataframe
:param program_path: Path
location of fluidics program
:return df_program: Dataframe
dataframe containing fluidics program
"""
df_program = pd.read_csv(program_path)
return df_program
def write_metadata(data_dict, save_path):
"""
Write metadata file as csv
:param data_dict: dict
dictionary of metadata entries
:param save_path: Path
path for file
:return None:
"""
pd.DataFrame([data_dict]).to_csv(save_path)
def return_affine_xform(path_to_xml,r_idx,y_idx,z_idx,total_z_pos):
"""
:param path_to_xml: Path
path to BDV XML. BDV H5 must be present for loading
:param r_idx: integer
round index
:param t_idx: integer
time index
:param y_idx: integer
y tile index
:param z_idx: integer
z tile index
:return data_numpy: NDarray
4D numpy array of all affine transforms
"""
bdv_editor = BdvEditor(str(path_to_xml))
tile_idx = (y_idx+z_idx)+(y_idx*(total_z_pos-1))
affine_xforms = []
read_affine_success = True
affine_idx = 0
while read_affine_success:
try:
affine_xform = bdv_editor.read_affine(time=r_idx,illumination=0,channel=0,tile=tile_idx,angle=0,index=affine_idx)
except:
read_affine_success = False
else:
affine_xforms.append(affine_xform)
affine_idx = affine_idx + 1
read_affine_success = True
return affine_xforms
def stitch_data(path_to_xml,iterative_flag):
"""
:param path_to_xml: Path
path to BDV XML. BDV H5 must be present for loading
:param iterative_flag: Bool
flag if multiple rounds need to be aligned
"""
# TO DO: 1. write either pyimagej bridge + macro OR call FIJI/BigStitcher in headless mode.
# 2. fix flipped x-axis between Python and FIJI. Easier to flip data in Python than deal with
# annoying affine that flips data.
def return_data_from_zarr_to_numpy(dataset, time_idx, channel_idx, num_images, y_pixels,x_pixels):
"""
:param dataset: zarr dataset object
:param time_idx: integer time_axis
:param channel_idx: integer channel index
:param num_images: integer for number of images from sweep to return
:param y_pixels: integer for y pixel size
:param x_pixels: integer for x pixel size
:return data_numpy: 3D numpy array of requested data
"""
data_numpy = np.empty([num_images,y_pixels,x_pixels]).astype(np.uint16)
data_numpy = dataset[time_idx,channel_idx,0:num_images,:]
return data_numpy
def return_opm_psf(ch_idx):
"""
Load pre-generated OPM psf
TO DO: write checks and generate PSF if it does not exist on disk
:param z_idx: int
index of z slice. Assume 15 steps above coverslip for now
:return psf: ndarray
pre-generated skewed PSF
"""
root_path = Path(__file__).parent.resolve()
if ch_idx == 0:
psf_name = Path('psfs') / Path('opm_psf_420_nm.tif')
elif ch_idx == 1:
psf_name = Path('psfs') / Path('opm_psf_520_nm.tif')
elif ch_idx == 2:
psf_name = Path('psfs') / Path('opm_psf_580_nm.tif')
elif ch_idx == 3:
psf_name = Path('psfs') / Path('opm_psf_670_nm.tif')
elif ch_idx == 4:
psf_name = Path('psfs') / Path('opm_psf_780_nm.tif')
psf_path = root_path / psf_name
opm_psf = tifffile.imread(psf_path)
return np.flipud(opm_psf)
| 5,574
| 27.88601
| 125
|
py
|
OPM
|
OPM-master/napari-control/src/utils/flat_field.py
|
#!/usr/bin/env python
'''
Python and cupy implementation of BaSiC flat-field correction (doi: 10.1038/ncomms14836)
Adapted from code found at: https://github.com/peng-lab/PyBasicCellprofilerPlugin
TO DO: Tons of otpimization opportunities with cupy, numba, and cucim. Maybe need to write our own DCT operator for use on GPU?
Licensing of python code unclear, sent an email to clarify if we can reproduce here since we don't need full
CellProfiler plugin framework
Last updated: Shepherd 06/21
'''
from numba.np.ufunc import parallel
import numpy as np
from typing import List
from skimage.transform import resize as skresize
from scipy.fftpack import dct, idct
import cupy as cp
RESIZE_ORDER = 1
RESIZE_MODE = "symmetric"
PRESERVE_RANGE = True
OUTPUT_IMAGE = "OutputImage"
FIRST_CYCLE = "First Cycle"
LAST_CYCLE = "Last Cycle"
def calc_flatfield(images,if_darkfield=True,if_baseline_drift=False,lambda_flatfield=0,lambda_darkfield=0,max_iterations=100,optimization_tolerance=1.0e-6,
max_reweight_iterations=10,eplson=0.1,varying_coeff=True,reweight_tolerance=1.0e-3):
"""
Function to calculate darkfield and brightfield correction from an image stack
:param images: ndarray
:param calc_darkfield: boolean
:param lambda_flatfield: float
:param lambda_darkfield: float
:param max_iterations: int
:param optimization_tolerance: float
:param max_reweight_iterations: int
:param epsilon: float
:param varying_coeff: float
:param reweight_tolerance: float
:return darkfield: ndarray
:return flatfield: ndarray
"""
_saved_size = images[0].shape
nrows = _saved_size[0]//16
ncols = _saved_size[1]//16
D = np.zeros((images.shape[0],nrows,ncols), dtype=np.uint16)
for i in range(images.shape[0]):
D[i,:,:] = _resize_image(image=images[i,:], y_side_size=ncols,x_side_size=nrows)
meanD = np.mean(D, axis=2)
meanD = meanD / np.mean(meanD)
W_meanD = _dct2d(meanD.T)
# setting lambda_flatfield and lambda_darkfield if they are not set by the user
if lambda_flatfield <= 0:
lambda_flatfield = np.sum(np.abs(W_meanD)) / 400 * 0.5
if lambda_darkfield <= 0:
lambda_darkfield = lambda_flatfield * 0.2
D = np.sort(D, axis=2)
XAoffset = np.zeros((nrows, ncols))
weight = np.ones(D.shape)
reweighting_iter = 0
flag_reweighting = True
flatfield_last = np.ones((nrows, ncols))
darkfield_last = np.random.randn(nrows, ncols)
while flag_reweighting:
reweighting_iter += 1
initial_flatfield = False
if initial_flatfield:
raise IOError('Initial flatfield option not implemented yet!')
else:
X_k_A, X_k_E, X_k_Aoffset = _inexact_alm_rspca_l1(
images = D,
lambda_flatfield = lambda_flatfield,
if_darkfield = if_darkfield,
lambda_darkfield = lambda_darkfield,
optimization_tolerance = optimization_tolerance,
max_iterations = max_iterations,
weight=weight
)
XA = np.reshape(X_k_A, [nrows, ncols, -1], order='F')
XE = np.reshape(X_k_E, [nrows, ncols, -1], order='F')
XAoffset = np.reshape(X_k_Aoffset, [nrows, ncols], order='F')
XE_norm = XE / np.mean(XA, axis=(0, 1))
weight = np.ones_like(XE_norm) / (np.abs(XE_norm) + eplson)
weight = weight * weight.size / np.sum(weight)
temp = np.mean(XA, axis=2) - XAoffset
flatfield_current = temp / np.mean(temp)
darkfield_current = XAoffset
mad_flatfield = np.sum(np.abs(flatfield_current - flatfield_last)) / np.sum(np.abs(flatfield_last))
temp_diff = np.sum(np.abs(darkfield_current - darkfield_last))
if temp_diff < 1e-7:
mad_darkfield = 0
else:
mad_darkfield = temp_diff / np.maximum(np.sum(np.abs(darkfield_last)), 1e-6)
flatfield_last = flatfield_current
darkfield_last = darkfield_current
if np.maximum(mad_flatfield,
mad_darkfield) <= reweight_tolerance or \
reweighting_iter >= max_reweight_iterations:
flag_reweighting = False
shading = np.mean(XA, 2) - XAoffset
flatfield = _resize_image(
image = shading,
x_side_size = _saved_size[0],
y_side_size = _saved_size[1]
)
flatfield = flatfield / np.mean(flatfield)
if if_darkfield:
darkfield = _resize_image(
image = XAoffset,
x_side_size = _saved_size[0],
y_side_size = _saved_size[1]
)
else:
darkfield = np.zeros_like(flatfield)
return flatfield.astype(np.float32), darkfield.astype(np.float32)
def baseline_drift(images_list,working_size = 128, flatfield: np.ndarray = None, darkfield: np.ndarray = None, **kwargs):
#TODO: Rename s.t. fluorescence is included? E.g. background_fluorescence?
"""
Estimation of background fluoresence signal for time-lapse movie.
Used in conjunction with BaSiC.
"""
nrows = ncols = working_size
# Preparing input images
resized_images = np.stack(_resize_images_list(images_list = images_list, side_size = working_size))
resized_images = resized_images.reshape([-1, nrows * nrows], order = 'F')
# Reszing flat- and dark-field
resized_flatfield = _resize_image(image = flatfield, side_size = working_size)
resized_darkfield = _resize_image(image = darkfield, side_size = working_size)
# reweighting
_weights = np.ones(resized_images.shape)
eplson = 0.1
tol = 1e-6
for reweighting_iter in range(1,6):
W_idct_hat = np.reshape(resized_flatfield, (1,-1), order='F')
A_offset = np.reshape(resized_darkfield, (1,-1), order='F')
A1_coeff = np.mean(resized_images, 1).reshape([-1,1])
# main iteration loop starts:
# The first element of the second array of np.linalg.svd
_temp = np.linalg.svd(resized_images, full_matrices=False)[1]
norm_two = _temp[0]
mu = 12.5/norm_two # this one can be tuned
mu_bar = mu * 1e7
rho = 1.5 # this one can be tuned
d_norm = np.linalg.norm(resized_images, ord = 'fro')
ent1 = 1
_iter = 0
total_svd = 0
converged = False;
A1_hat = np.zeros(resized_images.shape)
E1_hat = np.zeros(resized_images.shape)
Y1 = 0
while not converged:
_iter = _iter + 1;
A1_hat = W_idct_hat * A1_coeff + A_offset
# update E1 using l0 norm
E1_hat = E1_hat + np.divide((resized_images - A1_hat - E1_hat + (1/mu)*Y1), ent1)
E1_hat = np.maximum(E1_hat - _weights/(ent1*mu), 0) +\
np.minimum(E1_hat + _weights/(ent1*mu), 0)
# update A1_coeff, A2_coeff and A_offset
#if coeff_flag
R1 = resized_images - E1_hat
A1_coeff = np.mean(R1,1).reshape(-1,1) - np.mean(A_offset,1)
A1_coeff[A1_coeff<0] = 0
Z1 = resized_images - A1_hat - E1_hat
Y1 = Y1 + mu*Z1
mu = min(mu*rho, mu_bar)
# stop Criterion
stopCriterion = np.linalg.norm(Z1, ord = 'fro') / d_norm
if stopCriterion < tol:
converged = True
# updating weight
# XE_norm = E1_hat / np.mean(A1_hat)
XE_norm = E1_hat
mean_vec = np.mean(A1_hat, axis=1)
XE_norm = np.transpose(np.tile(mean_vec, (16384, 1))) / XE_norm
_weights = 1./(abs(XE_norm)+eplson)
_weights = np.divide( np.multiply(_weights, _weights.shape[0] * _weights.shape[1]), np.sum(_weights))
return A1_coeff
def _inexact_alm_rspca_l1(images,lambda_flatfield,if_darkfield,lambda_darkfield,optimization_tolerance,max_iterations,weight=None):
if weight is not None and weight.size != images.size:
raise IOError('weight matrix has different size than input sequence')
# if
# Initialization and given default variables
p = images.shape[2]
q = images.shape[1]
m = p*q
n = images.shape[0]
images = np.reshape(images, (m, n), order='F')
if weight is not None:
weight = np.reshape(weight, (m, n), order='F')
else:
weight = np.ones_like(images)
#_, svd, _ = np.linalg.svd(images, full_matrices=False) #TODO: Is there a more efficient implementation of SVD?
c_images = cp.asarray(images)
_, c_svd, _ = cp.linalg.svd(c_images,full_matrices=False)
svd = cp.asnumpy(c_svd)
norm_two = svd[0]
Y1 = 0
#Y2 = 0
ent1 = 1
ent2 = 10
A1_hat = np.zeros_like(images)
A1_coeff = np.ones((1, images.shape[1]))
E1_hat = np.zeros_like(images)
W_hat = _dct2d(np.zeros((p, q)).T)
mu = 12.5 / norm_two
mu_bar = mu * 1e7
rho = 1.5
d_norm = np.linalg.norm(images, ord='fro')
A_offset = np.zeros((m, 1))
B1_uplimit = np.min(images)
B1_offset = 0
#A_uplimit = np.expand_dims(np.min(images, axis=1), 1)
A_inmask = np.zeros((p, q))
A_inmask[int(np.round(p / 6) - 1): int(np.round(p*5 / 6)), int(np.round(q / 6) - 1): int(np.round(q * 5 / 6))] = 1
# main iteration loop starts
iter = 0
total_svd = 0
converged = False
#time_zero = time.time()
#time_zero_it = time.time()
while not converged:
# time_zero_it = time.time()
iter += 1
if len(A1_coeff.shape) == 1:
A1_coeff = np.expand_dims(A1_coeff, 0)
if len(A_offset.shape) == 1:
A_offset = np.expand_dims(A_offset, 1)
W_idct_hat = _idct2d(W_hat.T)
A1_hat = np.dot(np.reshape(W_idct_hat, (-1,1), order='F'), A1_coeff) + A_offset
temp_W = (images - A1_hat - E1_hat + (1 / mu) * Y1) / ent1
temp_W = np.reshape(temp_W, (p, q, n), order='F')
temp_W = np.mean(temp_W, axis=2)
W_hat = W_hat + _dct2d(temp_W.T)
W_hat = np.maximum(W_hat - lambda_flatfield / (ent1 * mu), 0) + np.minimum(W_hat + lambda_flatfield / (ent1 * mu), 0)
W_idct_hat = _idct2d(W_hat.T)
if len(A1_coeff.shape) == 1:
A1_coeff = np.expand_dims(A1_coeff, 0)
if len(A_offset.shape) == 1:
A_offset = np.expand_dims(A_offset, 1)
A1_hat = np.dot(np.reshape(W_idct_hat, (-1,1), order='F'), A1_coeff) + A_offset
E1_hat = images - A1_hat + (1 / mu) * Y1 / ent1
E1_hat = _shrinkageOperator(E1_hat, weight / (ent1 * mu))
R1 = images - E1_hat
A1_coeff = np.mean(R1, 0) / np.mean(R1)
A1_coeff[A1_coeff < 0] = 0
if if_darkfield:
validA1coeff_idx = np.where(A1_coeff < 1)
B1_coeff = (np.mean(R1[np.reshape(W_idct_hat, -1, order='F') > np.mean(W_idct_hat) - 1e-6][:, validA1coeff_idx[0]], 0) - \
np.mean(R1[np.reshape(W_idct_hat, -1, order='F') < np.mean(W_idct_hat) + 1e-6][:, validA1coeff_idx[0]], 0)) / np.mean(R1)
k = np.array(validA1coeff_idx).shape[1]
temp1 = np.sum(A1_coeff[validA1coeff_idx[0]]**2)
temp2 = np.sum(A1_coeff[validA1coeff_idx[0]])
temp3 = np.sum(B1_coeff)
temp4 = np.sum(A1_coeff[validA1coeff_idx[0]] * B1_coeff)
temp5 = temp2 * temp3 - temp4 * k
if temp5 == 0:
B1_offset = 0
else:
B1_offset = (temp1 * temp3 - temp2 * temp4) / temp5
# limit B1_offset: 0<B1_offset<B1_uplimit
B1_offset = np.maximum(B1_offset, 0)
B1_offset = np.minimum(B1_offset, B1_uplimit / (np.mean(W_idct_hat)+1e-5))
B_offset = B1_offset * np.reshape(W_idct_hat, -1, order='F') * (-1)
B_offset = B_offset + np.ones_like(B_offset) * B1_offset * np.mean(W_idct_hat)
A1_offset = np.mean(R1[:, validA1coeff_idx[0]], axis=1) - np.mean(A1_coeff[validA1coeff_idx[0]]) * np.reshape(W_idct_hat, -1, order='F')
A1_offset = A1_offset - np.mean(A1_offset)
A_offset = A1_offset - np.mean(A1_offset) - B_offset
# smooth A_offset
W_offset = _dct2d(np.reshape(A_offset, (p,q), order='F').T)
W_offset = np.maximum(W_offset - lambda_darkfield / (ent2 * mu), 0) + \
np.minimum(W_offset + lambda_darkfield / (ent2 * mu), 0)
A_offset = _idct2d(W_offset.T)
A_offset = np.reshape(A_offset, -1, order='F')
# encourage sparse A_offset
A_offset = np.maximum(A_offset - lambda_darkfield / (ent2 * mu), 0) + \
np.minimum(A_offset + lambda_darkfield / (ent2 * mu), 0)
A_offset = A_offset + B_offset
Z1 = images - A1_hat - E1_hat
Y1 = Y1 + mu * Z1
mu = np.minimum(mu * rho, mu_bar)
# Stop Criterion
stopCriterion = np.linalg.norm(Z1, ord='fro') / d_norm
if stopCriterion < optimization_tolerance:
converged = True
if not converged and iter >= max_iterations:
converged = True
A_offset = np.squeeze(A_offset)
A_offset = A_offset + B1_offset * np.reshape(W_idct_hat, -1, order='F')
return A1_hat, E1_hat, A_offset
def _resize_image(image: np.ndarray, x_side_size: float = None, y_side_size: float = None):
if image.shape[0] != x_side_size or image.shape[1] != y_side_size:
return skresize(
image,
(x_side_size, y_side_size),
order = RESIZE_ORDER,
mode = RESIZE_MODE,
preserve_range = PRESERVE_RANGE
)
else:
return image
def _shrinkageOperator(matrix, epsilon):
temp1 = matrix - epsilon
temp1[temp1 < 0] = 0
temp2 = matrix + epsilon
temp2[temp2 > 0] = 0
res = temp1 + temp2
return res
def _dct2d(mtrx: np.array):
"""
Calculates 2D discrete cosine transform.
Parameters
----------
mtrx
Input matrix.
Returns
-------
Discrete cosine transform of the input matrix.
"""
# Check if input object is 2D.
if mtrx.ndim != 2:
raise ValueError("Passed object should be a matrix or a numpy array with dimension of two.")
return dct(dct(mtrx.T, norm='ortho').T, norm='ortho')
def _idct2d(mtrx: np.array):
"""
Calculates 2D inverse discrete cosine transform.
Parameters
----------
mtrx
Input matrix.
Returns
-------
Inverse of discrete cosine transform of the input matrix.
"""
# Check if input object is 2D.
if mtrx.ndim != 2:
raise ValueError("Passed object should be a matrix or a numpy array with dimension of two.")
return idct(idct(mtrx.T, norm='ortho').T, norm='ortho')
| 14,832
| 34.570743
| 155
|
py
|
OPM
|
OPM-master/napari-control/src/utils/fluidics_control.py
|
#!/usr/bin/python
'''
----------------------------------------------------------------------------------------
OPM fluidics controller functions
----------------------------------------------------------------------------------------
Douglas Shepherd
12/11/2021
douglas.shepherd@asu.edu
----------------------------------------------------------------------------------------
'''
import numpy as np
import time
import sys
def lookup_valve(source_name):
"""
Convert name of well using ASU controller convention to ASU MVP valve settings
:param source_name: string
:return valve_position: ndarray
"""
valve_dict = {'B01': [0,1], 'B02': [0,2], 'B03': [0,3], 'B04': [0,4], 'B05': [0,5], 'B06': [0,6], 'B07': [0,7],
'B08': [1,1], 'B09': [1,2], 'B10': [1,3], 'B11': [1,4], 'B12': [1,5], 'B13': [1,6], 'B14': [1,7],
'B15': [2,1], 'B16': [2,2], 'B17': [2,3], 'B18': [2,4], 'B19': [2,5], 'B20': [2,6], 'B21': [2,7],
'B22': [3,1], 'B23': [3,2], 'B24': [3,3],
'SSC': [3,0], 'READOUT WASH': [3,4], 'IMAGING BUFFER': [3,5], 'CLEAVE': [3,7]}
valve_position = valve_dict.get(source_name)
return valve_position
def run_fluidic_program(r_idx, df_program, mvp_controller, pump_controller):
"""
Run fluidics program for a given round. Requires data structure generated by ASU fluidics program generator (define_fluidics_program.ipynb)
:param r_idx: int
fluidics round to execute
:param df_program: dataframe
dataframe containing entire fluidics program
:param mvp_controller: HamiltonMVP
handle to initialized chain of Hamilton MVP valves
:param pump_controller: APump
handle to initialized pump
:return True: boolean
TO DO: need to work this into try/except statements to catch any pump errors
"""
# select current round
df_current_program = df_program[(df_program['round']==r_idx+1)]
print ('Executing iterative round '+str(r_idx+1)+'.')
for index, row in df_current_program.iterrows():
# extract source name
source_name = str(row['source']).strip()
# extract pump rate
pump_amount_ml = float(row['volume'])
pump_time_min = float(row['time'])
if source_name == 'RUN':
pump_controller.stopFlow()
print('Fluidics round done, running imaging.')
elif source_name == 'PAUSE':
pump_controller.stopFlow()
print('Pausing for:' +str(pump_time_min*60)+' seconds.')
time.sleep(pump_time_min*60)
else:
# extract and set valve
valve_position = lookup_valve(source_name)
mvp_unit = valve_position[0]
valve_number = valve_position[1]
if mvp_unit == 0:
mvp_controller.changePort(valve_ID=0,port_ID=valve_number)
mvp_controller.changePort(valve_ID=1,port_ID=0)
mvp_controller.changePort(valve_ID=2,port_ID=0)
mvp_controller.changePort(valve_ID=3,port_ID=0)
elif mvp_unit == 1:
mvp_controller.changePort(valve_ID=0,port_ID=0)
mvp_controller.changePort(valve_ID=1,port_ID=valve_number)
mvp_controller.changePort(valve_ID=2,port_ID=0)
mvp_controller.changePort(valve_ID=3,port_ID=0)
elif mvp_unit == 2:
mvp_controller.changePort(valve_ID=0,port_ID=0)
mvp_controller.changePort(valve_ID=1,port_ID=0)
mvp_controller.changePort(valve_ID=2,port_ID=valve_number)
mvp_controller.changePort(valve_ID=3,port_ID=0)
elif mvp_unit == 3:
mvp_controller.changePort(valve_ID=0,port_ID=0)
mvp_controller.changePort(valve_ID=1,port_ID=0)
mvp_controller.changePort(valve_ID=2,port_ID=0)
mvp_controller.changePort(valve_ID=3,port_ID=valve_number)
time.sleep(3)
print('MVP unit: '+str(mvp_unit)+'; Valve #: '+str(valve_number))
# convert ml/min rate to pump rate
# this is hardcoded to the ASU fluidic setup
# please check for your own setup
pump_rate = -1.0
if np.round((pump_amount_ml/pump_time_min),2) == 1:
pump_rate = 48.0
elif np.round((pump_amount_ml/pump_time_min),2) == 0.50:
pump_rate = 11.0
elif np.round((pump_amount_ml/pump_time_min),2) == 0.40:
pump_rate = 10.0
elif np.round((pump_amount_ml/pump_time_min),2) == 0.36:
pump_rate = 9.5
elif np.round((pump_amount_ml/pump_time_min),2) == 0.33:
pump_rate = 9.0
elif np.round((pump_amount_ml/pump_time_min),2) == 0.22:
pump_rate = 5.0
elif np.round((pump_amount_ml/pump_time_min),2) == 0.2:
pump_rate = 4.0
print('Pump setting: '+str(pump_rate))
if pump_rate == -1.0:
print('Error in determining pump rate. Exiting.')
sys.exit()
# run pump
pump_controller.startFlow(pump_rate,direction='Forward')
time.sleep(pump_time_min*60)
pump_controller.stopFlow()
return True
| 5,342
| 39.78626
| 143
|
py
|
OPM
|
OPM-master/control/run_opm_iterative_stagescan_GUI.py
|
#!/usr/bin/env python
'''
OPM stage control with iterative fluidics.
Shepherd 08/21 - refactor for easier reading, O2-O3 autofocus, and managing file transfer in seperate Python process
Shepherd 07/21 - switch to interleaved excitation during stage scan and initial work on O2-O3 autofocusing
Shepherd 06/21 - clean up code and bring inline with widefield bypass GUI code
Shepherd 05/21 - pull all settings from MM GUI and prompt user to setup experiment using "easygui" package
Shepherd 05/21 - add in fluidics control. Recently refactored into seaprate files
Shepherd 04/21 - large-scale changes for new metadata and on-the-fly uploading to server for simultaneous reconstruction
'''
# imports
# fluidics instrument control classes adapted from Zhuang lab code: https://github.com/ZhuangLab/storm-control
from hardware.APump import APump
from hardware.HamiltonMVP import HamiltonMVP
# Need to have arduino with shutter.ino uploaded to it to use this instrument control class
from hardware.ArduinoShutter import ArduinoShutter
# pycromanager control of micromanager
from pycromanager import Bridge, Acquisition
# python control of NI DAQ
import PyDAQmx as daq
import ctypes as ct
<<<<<<< Updated upstream
=======
# Need to build from source: https://github.com/Galvant/InstrumentKit
# used for serial control of Thorlabs APT devices
#import instruments as ik
#import instruments.units as u
>>>>>>> Stashed changes
# python system imports
import shutil
import subprocess
from threading import Thread
import time
import sys
import gc
from pathlib import Path
import numpy as np
# qi2lab OPM functions
from utils.data_io import read_config_file, read_fluidics_program, write_metadata
from utils.fluidics_control import run_fluidic_program
from utils.opm_setup import setup_asi_tiger, setup_obis_laser_boxx, camera_hook_fn, retrieve_setup_from_MM
from utils.autofocus_remote_unit import manage_O3_focus
# quick and easy blocking GUI to interface with user for setup outside MM GUI
import easygui
def main():
""""
Execute iterative, interleaved OPM stage scan using MM GUI
"""
# flags for metadata, processing, drift correction, and O2-O3 autofocusing
setup_metadata=True
copy_data = False
setup_processing=False
debug_flag = False
resume_fluidics = False
#maintain_03_focus = False
correct_stage_drift = False
resume_r_idx = 0
resume_y_tile_idx = 0
resume_z_tile_idx = 0
z_offset = 0
z_pos_offset = 0
# check if user wants to flush system?
run_fluidics = False
flush_system = False
run_type = easygui.choicebox('Type of run?', 'Iterative multiplexing setup', ['Flush fluidics (no imaging)', 'Iterative imaging', 'Single round (test)'])
if run_type == str('Flush fluidics (no imaging)'):
flush_system = True
run_fluidics = True
# load fluidics program
fluidics_path = easygui.fileopenbox('Load fluidics program')
program_name = Path(fluidics_path)
elif run_type == str('Iterative imaging'):
flush_system = False
run_fluidics = True
# load fluidics program
fluidics_path = easygui.fileopenbox('Load fluidics program')
program_name = Path(fluidics_path)
elif run_type == str('Single round (test)'):
flush_system = False
run_fluidics = False
iterative_rounds = 1
file_directory = Path(__file__).resolve().parent
config_file = file_directory / Path('opm_config.csv')
df_config = read_config_file(config_file)
if run_fluidics:
# define ports for pumps and valves
pump_COM_port = str(df_config['pump_com_port'])
valve_COM_port = str(df_config['valve_com_port'])
# setup pump parameters
pump_parameters = {'pump_com_port': pump_COM_port,
'pump_ID': 30,
'verbose': True,
'simulate_pump': False,
'serial_verbose': False,
'flip_flow_direction': False}
# connect to pump
pump_controller = APump(pump_parameters)
# set pump to remote control
pump_controller.enableRemoteControl(True)
# connect to valves
valve_controller = HamiltonMVP(com_port=valve_COM_port)
# initialize valves
valve_controller.autoAddress()
# load user defined program from hard disk
df_program = read_fluidics_program(program_name)
iterative_rounds = df_program['round'].max()
print('Number of iterative rounds: '+str(iterative_rounds))
if flush_system:
# run fluidics program for this round
success_fluidics = False
success_fluidics = run_fluidic_program(0, df_program, valve_controller, pump_controller)
if not(success_fluidics):
print('Error in fluidics! Stopping scan.')
sys.exit()
print('Flushed fluidic system.')
sys.exit()
'''
# connect to alignment laser shutter
shutter_com_port = df_config['shutter_com_port']
shutter_parameters = {'arduino_com_port': shutter_com_port,
'verbose': False}
shutter_controller = ArduinoShutter(shutter_parameters)
shutter_controller.closeShutter()
# connect to piezo controller
# controller must be setup to have a virtual com port. Might need to follow
piezo_com_port = df_config['piezo_com_port']
piezo_controller = ik.thorlabs.APTPiezoInertiaActuator.open_serial(piezo_com_port, baud=115200)
piezo_channel = piezo_controller.channel[0]
piezo_channel.enabled_single = True
max_volts = u.Quantity(110, u.V)
step_rate = u.Quantity(100, 1/u.s)
acceleration = u.Quantity(1000, 1/u.s**2)
piezo_channel.drive_op_parameters = [max_volts, step_rate, acceleration]
'''
# connect to Micromanager instance
with Bridge() as bridge:
core = bridge.get_core()
#roi_imaging = [180,644,1960,512]
#core.set_roi(*roi_imaging)
'''
# make sure camera does not have an ROI set
core.snap_image()
y_pixels = core.get_image_height()
x_pixels = core.get_image_width()
while not(y_pixels==2304) or not(x_pixels==2304):
roi_reset = False
while not(roi_reset):
roi_reset = easygui.ynbox('Removed camera ROI?', 'Title', ('Yes', 'No'))
core.snap_image()
y_pixels = core.get_image_height()
x_pixels = core.get_image_width()
'''
'''
# set ROI
roi_selection = easygui.choicebox('Imaging volume setup.', 'ROI size', ['256x1800', '512x1800', '1024x1800'])
if roi_selection == str('256x1800'):
roi_imaging = [160,1024,1950,256]
roi_alignment = [876,85,64,64]
core.set_roi(*roi_imaging)
elif roi_selection == str('512x1800'):
roi_imaging = [160,896,1950,512]
core.set_roi(*roi_imaging)
elif roi_selection == str('1024x1800'):
roi_imaging = [160,640,1950,1024]
core.set_roi(*roi_imaging)
'''
'''
# check if user wants to capture alignment image
run_alignment = easygui.choicebox('Align O2-O3 coupling.', 'Mode', ['Capture new alignment', 'Load reference alignment'])
if run_alignment == str('Capture new alignment'):
reference_image, found_focus_position = manage_O3_focus(core,roi_alignment,shutter_controller,piezo_channel,initialize=True,reference_image=None)
elif run_alignment == str('Load reference alignment'):
print('Not implemented yet. Please capture new reference image.')
run_alignment = str('Capture new alignment')
'''
# set lasers to zero power and software control
#channel_powers = [0.,0.,0.,0.,0.]
#setup_obis_laser_boxx(core,channel_powers,state='software')
# set camera to fast readout mode
core.set_config('Camera-Setup','ScanMode3')
core.wait_for_config('Camera-Setup','ScanMode3')
# set camera to START mode upon input trigger
core.set_config('Camera-TriggerType','START')
core.wait_for_config('Camera-TriggerType','START')
# set camera to positive input trigger
core.set_config('Camera-TriggerPolarity','POSITIVE')
core.wait_for_config('Camera-TriggerPolarity','POSITIVE')
# set camera to internal control
core.set_config('Camera-TriggerSource','INTERNAL')
core.wait_for_config('Camera-TriggerSource','INTERNAL')
# set camera to output positive triggers on all lines for exposure
core.set_property('OrcaFusionBT','OUTPUT TRIGGER KIND[0]','EXPOSURE')
core.set_property('OrcaFusionBT','OUTPUT TRIGGER KIND[1]','EXPOSURE')
core.set_property('OrcaFusionBT','OUTPUT TRIGGER KIND[2]','EXPOSURE')
core.set_property('OrcaFusionBT','OUTPUT TRIGGER POLARITY[0]','POSITIVE')
core.set_property('OrcaFusionBT','OUTPUT TRIGGER POLARITY[1]','POSITIVE')
core.set_property('OrcaFusionBT','OUTPUT TRIGGER POLARITY[2]','POSITIVE')
# enable joystick
core.set_property('XYStage:XY:31','JoystickEnabled','Yes')
core.set_property('ZStage:M:37','JoystickInput','22 - right wheel')
# change core timeout for long stage moves
core.set_property('Core','TimeoutMs',500000)
time.sleep(1)
# galvo voltage at neutral
galvo_neutral_volt = -0.27 # unit: volts
# pull galvo line from config file
galvo_ao_line = str(df_config['galvo_ao_pin'])
# set the galvo to the neutral position if it is not already
try:
taskAO_first = daq.Task()
taskAO_first.CreateAOVoltageChan(galvo_ao_line,"",-4.0,4.0,daq.DAQmx_Val_Volts,None)
taskAO_first.WriteAnalogScalarF64(True, -1, galvo_neutral_volt, None)
taskAO_first.StopTask()
taskAO_first.ClearTask()
except daq.DAQError as err:
print("DAQmx Error %s"%err)
gc.collect()
# iterate over user defined program
# TO DO: find way to allow for restart based on metadata already saved to disk
for r_idx in range(resume_r_idx,iterative_rounds):
with Bridge() as bridge:
core = bridge.get_core()
studio = bridge.get_studio()
# get handle to xy and z stages
xy_stage = core.get_xy_stage_device()
z_stage = core.get_focus_device()
# set motors to on to actively maintain position during fluidics run
# TO DO: figure out how to make this work
#core.set_property('XYStage:XY:31','MaintainState-MA',2)
#core.set_property('ZStage:M:37','MaintainState-MA',2)
if run_fluidics:
if (r_idx == resume_r_idx) and not(resume_r_idx==0) and not(resume_fluidics):
print('Skipping fluidics to resume imaging.')
setup_metadata = False
else:
# run fluidics program for this round
success_fluidics = False
success_fluidics = run_fluidic_program(r_idx, df_program, valve_controller, pump_controller)
if not(success_fluidics):
print('Error in fluidics! Stopping scan.')
sys.exit()
# set motors to standard drift correction setting
# TO DO: figure out how to make this work
#core.set_property('XYStage:XY:31','MaintainState-MA',0)
#core.set_property('ZStage:M:37','MaintainState-MA',0)
# if first round, have user setup positions, laser intensities, and exposure time in MM GUI
if r_idx == resume_r_idx:
# setup imaging parameters using MM GUI
run_imaging = False
while not(run_imaging):
setup_done = False
while not(setup_done):
setup_done = easygui.ynbox('Finished setting up MM?', 'Title', ('Yes', 'No'))
df_MM_setup, active_channel_indices = retrieve_setup_from_MM(core,studio,df_config,debug=debug_flag)
channel_states = [bool(df_MM_setup['405_active']),
bool(df_MM_setup['488_active']),
bool(df_MM_setup['561_active']),
bool(df_MM_setup['635_active']),
bool(df_MM_setup['730_active'])]
channel_powers = [float(df_MM_setup['405_power']),
float(df_MM_setup['488_power']),
float(df_MM_setup['561_power']),
float(df_MM_setup['635_power']),
float(df_MM_setup['730_power'])]
# construct and display imaging summary to user
scan_settings = (f"Number of labeling rounds: {str(iterative_rounds)} \n\n"
f"Number of Y tiles: {str(df_MM_setup['tile_axis_positions'])} \n"
f"Tile start: {str(df_MM_setup['tile_axis_start_um'])} \n"
f"Tile end: {str(df_MM_setup['tile_axis_end_um'])} \n\n"
f"Number of Z slabs: {str(df_MM_setup['height_axis_positions'])} \n"
f"Height start: {str(df_MM_setup['height_axis_start_um'])} \n"
f"Height end: {str(df_MM_setup['height_axis_end_um'])} \n\n"
f"Number of channels: {str(df_MM_setup['n_active_channels'])} \n"
f"Active lasers: {str(channel_states)} \n"
f"Lasers powers: {str(channel_powers)} \n\n"
f"Number of scan positions: {str(df_MM_setup['scan_axis_positions'])} \n"
f"Scan start: {str(df_MM_setup['scan_axis_start_um'])} \n"
f"Scan end: {str(df_MM_setup['scan_axis_end_um'])} \n")
output = easygui.textbox(scan_settings, 'Please review scan settings')
# verify user actually wants to run imaging
run_imaging = easygui.ynbox('Run acquistion?', 'Title', ('Yes', 'No'))
if run_imaging == True:
# disable joystick
core.set_property('XYStage:XY:31','JoystickEnabled','No')
core.set_property('ZStage:M:37','JoystickInput','0 - none')
# set flag to change configuration
config_changed = True
# if last round, switch to DAPI + alexa488 readout instead
if (r_idx == (iterative_rounds - 1)) and (run_fluidics):
# enable joystick
core.set_property('XYStage:XY:31','JoystickEnabled','Yes')
core.set_property('ZStage:M:37','JoystickInput','22 - right wheel')
setup_done = False
while not(setup_done):
setup_done = easygui.ynbox('Finished setting up MM?', 'Title', ('Yes', 'No'))
df_MM_setup, active_channel_indices = retrieve_setup_from_MM(core,studio,df_config,debug=debug_flag)
channel_states = [bool(df_MM_setup['405_active']),
bool(df_MM_setup['488_active']),
bool(df_MM_setup['561_active']),
bool(df_MM_setup['635_active']),
bool(df_MM_setup['730_active'])]
channel_powers = [float(df_MM_setup['405_power']),
float(df_MM_setup['488_power']),
float(df_MM_setup['561_power']),
float(df_MM_setup['635_power']),
float(df_MM_setup['730_power'])]
# construct and display imaging summary to user
scan_settings = (f"Number of labeling rounds: {str(iterative_rounds)} \n\n"
f"Number of Y tiles: {str(df_MM_setup['tile_axis_positions'])} \n"
f"Tile start: {str(df_MM_setup['tile_axis_start_um'])} \n"
f"Tile end: {str(df_MM_setup['tile_axis_end_um'])} \n"
f"Tile step: {str(df_MM_setup['tile_axis_step_um'])} \n\n"
f"Number of Z slabs: {str(df_MM_setup['height_axis_positions'])} \n"
f"Height start: {str(df_MM_setup['height_axis_start_um'])} \n"
f"Height end: {str(df_MM_setup['height_axis_end_um'])} \n"
f"Height step: {str(df_MM_setup['height_axis_step_um'])} \n\n"
f"Number of channels: {str(df_MM_setup['n_active_channels'])} \n"
f"Active lasers: {str(channel_states)} \n"
f"Lasers powers: {str(channel_powers)} \n\n"
f"Number of scan positions: {str(df_MM_setup['scan_axis_positions'])} \n"
f"Scan start: {str(df_MM_setup['scan_axis_start_um'])} \n"
f"Scan end: {str(df_MM_setup['scan_axis_end_um'])} \n")
output = easygui.textbox(scan_settings, 'Please review last round scan settings')
# verify user actually wants to run imaging
run_imaging = easygui.ynbox('Run acquistion of last round?', 'Title', ('Yes', 'No'))
if run_imaging == True:
# disable joystick
core.set_property('XYStage:XY:31','JoystickEnabled','No')
core.set_property('ZStage:M:37','JoystickInput','0 - none')
# set flag to change configuration
config_changed = True
if config_changed:
core.set_xy_position(float(df_MM_setup['scan_axis_start_um']),float(df_MM_setup['tile_axis_start_um']))
core.wait_for_device(xy_stage)
# setup constant speed stage scanning on ASI Tiger controller
setup_asi_tiger(core,float(df_MM_setup['scan_axis_speed']),float(df_MM_setup['scan_axis_start_mm']),float(df_MM_setup['scan_axis_end_mm']))
setup_obis_laser_boxx(core,channel_powers,state='digital')
# create events to execute scan
events = []
for x in range(int(df_MM_setup['scan_axis_positions'])+int(df_config['excess_scan_positions'])):
for c in active_channel_indices:
evt = { 'axes': {'z': x,'c': c}}
events.append(evt)
# setup digital trigger buffer on DAQ
n_active_channels = int(df_MM_setup['n_active_channels'])
samples_per_ch = 2 * n_active_channels
DAQ_sample_rate_Hz = 10000
num_DI_channels = 8
# create DAQ pattern for laser strobing controlled via rolling shutter
dataDO = np.zeros((samples_per_ch, num_DI_channels), dtype=np.uint8)
for ii, ind in enumerate(active_channel_indices):
dataDO[2*ii::2*n_active_channels, int(ind)] = 1
# set camera to internal control
core.set_config('Camera-TriggerSource','INTERNAL')
core.wait_for_config('Camera-TriggerSource','INTERNAL')
core = None
studio = None
del core, studio
gc.collect()
'''
if (r_idx > resume_r_idx) or (resume_r_idx==0):
resume_y_tile_idx = 0
resume_z_tile_idx = 0
'''
for y_idx in range(resume_y_tile_idx,int(df_MM_setup['tile_axis_positions'])):
with Bridge() as bridge:
core = bridge.get_core()
# calculate tile axis position
tile_position_um = float(df_MM_setup['tile_axis_start_um'])+(float(df_MM_setup['tile_axis_step_um'])*y_idx)
# move XY stage to new tile axis position
core.set_xy_position(float(df_MM_setup['scan_axis_start_um']),tile_position_um)
core.wait_for_device(xy_stage)
# turn on 'transmit repeated commands' for Tiger
core.set_property('TigerCommHub','OnlySendSerialCommandOnChange','No')
# check to make sure Tiger is not busy
ready='B'
while(ready!='N'):
command = 'STATUS'
core.set_property('TigerCommHub','SerialCommand',command)
ready = core.get_property('TigerCommHub','SerialResponse')
time.sleep(.500)
# turn off 'transmit repeated commands' for Tiger
core.set_property('TigerCommHub','OnlySendSerialCommandOnChange','Yes')
# give MM time to return for proper port cleanup
time.sleep(1)
for z_idx in range(resume_z_tile_idx,int(df_MM_setup['height_axis_positions'])):
if df_MM_setup['height_strategy'] == 'tile':
# calculate height axis position
height_position_um = float(df_MM_setup['height_axis_start_um'])+(float(df_MM_setup['height_axis_step_um'])*z_idx)
elif df_MM_setup['height_strategy'] == 'track':
height_position_um = float(df_MM_setup['height_axis_start_um'])+(float(df_MM_setup['height_axis_step_um'])*y_idx)
# move Z stage to new height axis position
core.set_position(height_position_um)
core.wait_for_device(z_stage)
# update save_name with current tile information
if (r_idx == resume_r_idx) and (y_idx == resume_y_tile_idx) and (z_idx == resume_z_tile_idx):
save_name_ryz = Path(str(df_MM_setup['save_name'])+'_r'+str(r_idx).zfill(4)+'_y'+str(y_idx).zfill(4)+'_z'+str(z_idx+z_offset).zfill(4)+'_a')
else:
save_name_ryz = Path(str(df_MM_setup['save_name'])+'_r'+str(r_idx).zfill(4)+'_y'+str(y_idx).zfill(4)+'_z'+str(z_idx+z_offset).zfill(4))
# turn on 'transmit repeated commands' for Tiger
core.set_property('TigerCommHub','OnlySendSerialCommandOnChange','No')
# check to make sure Tiger is not busy
ready='B'
while(ready!='N'):
command = 'STATUS'
core.set_property('TigerCommHub','SerialCommand',command)
ready = core.get_property('TigerCommHub','SerialResponse')
time.sleep(.500)
# turn off 'transmit repeated commands' for Tiger
core.set_property('TigerCommHub','OnlySendSerialCommandOnChange','Yes')
# query current stage positions
xy_pos = core.get_xy_stage_position()
stage_x = xy_pos.x
stage_y = xy_pos.y
stage_z = core.get_position()
# TO DO: implement phase correlation as described in file
if (correct_stage_drift == True) and (r_idx > 0):
# determine stage drift
pass
offset_y = 0.
offset_z = 0.
# apply YZ offsets
# do offset X for now since it is the scan direction, since that is easier to post-correct for
# create stage position dictionary
current_stage_data = [{'stage_x': float(stage_x),
'stage_y': float(stage_y),
'stage_z': float(stage_z),
'offset_y': float(offset_y),
'offset_z': float(offset_z)}]
# TO DO: install and activate ASI CRISP unit once it returns with different IR beam
# setup DAQ for laser strobing
try:
# ----- DIGITAL input -------
taskDI = daq.Task()
taskDI.CreateDIChan("/Dev1/PFI0","",daq.DAQmx_Val_ChanForAllLines)
## Configure change detection timing (from wave generator)
taskDI.CfgInputBuffer(0) # must be enforced for change-detection timing, i.e no buffer
taskDI.CfgChangeDetectionTiming("/Dev1/PFI0","/Dev1/PFI0",daq.DAQmx_Val_ContSamps,0)
## Set where the starting trigger
taskDI.CfgDigEdgeStartTrig("/Dev1/PFI0",daq.DAQmx_Val_Rising)
## Export DI signal to unused PFI pins, for clock and start
taskDI.ExportSignal(daq.DAQmx_Val_ChangeDetectionEvent, "/Dev1/PFI2")
taskDI.ExportSignal(daq.DAQmx_Val_StartTrigger,"/Dev1/PFI1")
# ----- DIGITAL output ------
taskDO = daq.Task()
taskDO.CreateDOChan("/Dev1/port0/line0:7","",daq.DAQmx_Val_ChanForAllLines)
## Configure timing (from DI task)
taskDO.CfgSampClkTiming("/Dev1/PFI2",DAQ_sample_rate_Hz,daq.DAQmx_Val_Rising,daq.DAQmx_Val_ContSamps,samples_per_ch)
## Write the output waveform
samples_per_ch_ct_digital = ct.c_int32()
taskDO.WriteDigitalLines(samples_per_ch,False,10.0,daq.DAQmx_Val_GroupByChannel,dataDO,ct.byref(samples_per_ch_ct_digital),None)
## ------ Start digital input and output tasks ----------
taskDO.StartTask()
taskDI.StartTask()
except daq.DAQError as err:
print("DAQmx Error %s"%err)
# set camera to external control
# DCAM sets the camera back to INTERNAL mode after each acquisition
core.set_config('Camera-TriggerSource','EXTERNAL')
core.wait_for_config('Camera-TriggerSource','EXTERNAL')
# verify that camera actually switched back to external trigger mode
trigger_state = core.get_property('OrcaFusionBT','TRIGGER SOURCE')
# if not in external control, keep trying until camera changes settings
while not(trigger_state =='EXTERNAL'):
time.sleep(2.0)
core.set_config('Camera-TriggerSource','EXTERNAL')
core.wait_for_config('Camera-TriggerSource','EXTERNAL')
trigger_state = core.get_property('OrcaFusionBT','TRIGGER SOURCE')
print('R: '+str(r_idx)+' Y: '+str(y_idx)+' Z: '+str(z_idx))
# run acquisition for this ryz combination
with Acquisition(directory=str(df_MM_setup['save_directory']), name=str(save_name_ryz),
post_camera_hook_fn=camera_hook_fn, show_display=False) as acq:
acq.acquire(events)
# stop DAQ and make sure it is at zero
try:
## Stop and clear both tasks
taskDI.StopTask()
taskDO.StopTask()
taskDI.ClearTask()
taskDO.ClearTask()
except daq.DAQError as err:
print("DAQmx Error %s"%err)
# save experimental info after first tile.
# we do it this way so that Pycromanager can manage directory creation
if (setup_metadata):
# save stage scan parameters
scan_param_data = [{'root_name': str(df_MM_setup['save_name']),
'scan_type': str('OPM-stage'),
'interleaved': bool(True),
'exposure': float(df_MM_setup['exposure_ms']),
'scan_axis_start': float(df_MM_setup['scan_axis_start_um']),
'scan_axis_end': float(df_MM_setup['scan_axis_end_um']),
'tile_axis_start': float(df_MM_setup['tile_axis_start_um']),
'tile_axis_end': float(df_MM_setup['tile_axis_end_um']),
'tile_axis_step': float(df_MM_setup['tile_axis_step_um']),
'height_axis_start': float(df_MM_setup['height_axis_start_um']),
'height_axis_end': float(df_MM_setup['height_axis_end_um']),
'height_axis_step': float(df_MM_setup['height_axis_step_um']),
'theta': float(30.0),
'scan_step': float(float(df_config['scan_axis_step_um'])*1000.),
'pixel_size': float(float(df_config['pixel_size'])*1000.),
'num_t': int(1),
'num_r': int(iterative_rounds),
'num_y': int(df_MM_setup['tile_axis_positions']),
'num_z': int(df_MM_setup['height_axis_positions']),
'num_ch': int(df_MM_setup['n_active_channels']),
'scan_axis_positions': int(df_MM_setup['scan_axis_positions']),
'excess_scan_positions': int(df_config['excess_scan_positions']),
'y_pixels': int(df_MM_setup['y_pixels']),
'x_pixels': int(df_MM_setup['x_pixels']),
'405_active': bool(channel_states[0]),
'488_active': bool(channel_states[1]),
'561_active': bool(channel_states[2]),
'635_active': bool(channel_states[3]),
'730_active': bool(channel_states[4]),
'405_power': float(channel_powers[0]),
'488_power': float(channel_powers[1]),
'561_power': float(channel_powers[2]),
'635_power': float(channel_powers[3]),
'730_power': float(channel_powers[4])}]
scan_metadata_path = Path(df_MM_setup['save_directory']) / Path('scan_metadata.csv')
write_metadata(scan_param_data[0], scan_metadata_path)
setup_metadata=False
# save stage scan positions after each tile
if (r_idx == resume_r_idx) and (y_idx == resume_y_tile_idx) and (z_idx == resume_z_tile_idx):
save_name_stage_positions = Path(str(df_MM_setup['save_name'])+'_r'+str(r_idx).zfill(4)+'_y'+str(y_idx).zfill(4)+'_z'+str(z_idx+z_offset).zfill(4)+'_a_stage_positions.csv')
else:
save_name_stage_positions = Path(str(df_MM_setup['save_name'])+'_r'+str(r_idx).zfill(4)+'_y'+str(y_idx).zfill(4)+'_z'+str(z_idx+z_offset).zfill(4)+'_stage_positions.csv')
save_name_stage_path = Path(df_MM_setup['save_directory']) / save_name_stage_positions
write_metadata(current_stage_data[0], save_name_stage_path)
# turn on 'transmit repeated commands' for Tiger
core.set_property('TigerCommHub','OnlySendSerialCommandOnChange','No')
# check to make sure Tiger is not busy
ready='B'
while(ready!='N'):
command = 'STATUS'
core.set_property('TigerCommHub','SerialCommand',command)
ready = core.get_property('TigerCommHub','SerialResponse')
time.sleep(.500)
# turn off 'transmit repeated commands' for Tiger
core.set_property('TigerCommHub','OnlySendSerialCommandOnChange','Yes')
if copy_data:
# TO DO: rework this to run in a separate Python process so that we aren't launching a large number of threads off of this process.
# if first tile, make parent directory on NAS and start reconstruction script on the server
if setup_processing:
# make home directory on NAS
save_directory_path = Path(str(df_MM_setup['save_directory']))
remote_directory = Path('y:/') / Path(save_directory_path.parts[1])
cmd='mkdir ' + str(remote_directory)
status_mkdir = subprocess.run(cmd, shell=True)
# copy full experiment metadata to NAS
src= Path(df_MM_setup['save_directory']) / Path('scan_metadata.csv')
dst= Path(remote_directory) / Path('scan_metadata.csv')
Thread(target=shutil.copy, args=[str(src), str(dst)]).start()
setup_processing=False
# copy current ryzc metadata to NAS
save_directory_path = Path(df_MM_setup['save_directory'])
remote_directory = Path('y:/') / Path(save_directory_path.parts[1])
src= Path(df_MM_setup['save_directory']) / Path(save_name_stage_positions.parts[2])
dst= Path(remote_directory) / Path(save_name_stage_positions.parts[2])
Thread(target=shutil.copy, args=[str(src), str(dst)]).start()
# copy current ryzc data to NAS
save_directory_path = Path(df_MM_setup['save_directory'])
remote_directory = Path('y:/') / Path(save_directory_path.parts[1])
src= Path(df_MM_setup['save_directory']) / Path(save_name_ryz+ '_1')
dst= Path(remote_directory) / Path(save_name_ryz+ '_1')
Thread(target=shutil.copytree, args=[str(src), str(dst)]).start()
'''
if (maintain_03_focus == True):
# run O3 focus optimizer
reference_image, found_focus_position = manage_O3_focus(core,roi_alignment,shutter_controller,piezo_channel,initialize=False,reference_image=reference_image)
'''
core = None
del core
gc.collect()
resume_z_tile_idx = 0
with Bridge() as bridge:
core = bridge.get_core()
# set lasers to zero power and software control
channel_powers = [0.,0.,0.,0.,0.]
setup_obis_laser_boxx(core,channel_powers,state='software')
# set camera to internal control
core.set_config('Camera-TriggerSource','INTERNAL')
core.wait_for_config('Camera-TriggerSource','INTERNAL')
# enable joystick
core.set_property('XYStage:XY:31','JoystickEnabled','Yes')
core.set_property('ZStage:M:37','JoystickInput','22 - right wheel')
gc.collect()
# shut down python initialized hardware
if (run_fluidics):
# shutter_controller.close()
valve_controller.close()
pump_controller.close()
gc.collect()
#-----------------------------------------------------------------------------
if __name__ == "__main__":
main()
| 37,175
| 49.648501
| 196
|
py
|
OPM
|
OPM-master/reconstruction/recon_opm_stagescan.py
|
#!/usr/bin/env python
'''
Stage scanning OPM post-processing using numpy, numba, skimage, pyimagej, and npy2bdv.
Places all tiles in actual stage positions and places iterative rounds into the time axis of BDV H5 for alignment
Orthgonal interpolation method adapted from Vincent Maioli (http://doi.org/10.25560/68022)
Last updated: Shepherd 04/21
'''
# imports
import numpy as np
from pathlib import Path
from pycromanager import Dataset
import npy2bdv
import sys
import gc
import argparse
import time
from skimage.measure import block_reduce
from image_post_processing import deskew
from itertools import compress, product
import data_io
import zarr
import tifffile
# parse experimental directory, load data, perform orthogonal deskew, and save as BDV H5 file
def main(argv):
# parse directory name from command line argument
# parse command line arguments
parser = argparse.ArgumentParser(description="Process raw OPM data.")
parser.add_argument("-i", "--ipath", type=str, help="supply the directory to be processed")
parser.add_argument("-d", "--decon", type=int, default=0,
help="0: no deconvolution (DEFAULT), 1: deconvolution")
parser.add_argument("-f", "--flatfield", type=int, default=0, help="0: No flat field (DEFAULT), 1: flat field (FIJI) 2: flat field (python)")
parser.add_argument("-s", "--save_type", type=int, default=1, help="0: TIFF stack output, 1: BDV output (DEFAULT), 2: Zarr output")
parser.add_argument("-z", "--z_down_sample",type=int, default=1, help="1: No downsampling (DEFAULT), n: Nx downsampling")
args = parser.parse_args()
input_dir_string = args.ipath
decon_flag = args.decon
flatfield_flag = args.flatfield
save_type= args.save_type
z_down_sample = args.z_down_sample
# https://docs.python.org/3/library/pathlib.html
# Create Path object to directory
input_dir_path=Path(input_dir_string)
# create parameter array from scan parameters saved by acquisition code
df_metadata = data_io.read_metadata(input_dir_path / Path('scan_metadata.csv'))
root_name = df_metadata['root_name']
scan_type = df_metadata['scan_type']
theta = df_metadata['theta']
scan_step = df_metadata['scan_step']
pixel_size = df_metadata['pixel_size']
num_t = df_metadata['num_t']
num_y = df_metadata['num_y']
num_z = df_metadata['num_z']
num_ch = df_metadata['num_ch']
num_images = df_metadata['scan_axis_positions']
y_pixels = df_metadata['y_pixels']
x_pixels = df_metadata['x_pixels']
chan_405_active = df_metadata['405_active']
chan_488_active = df_metadata['488_active']
chan_561_active = df_metadata['561_active']
chan_635_active = df_metadata['635_active']
chan_730_active = df_metadata['730_active']
active_channels = [chan_405_active,chan_488_active,chan_561_active,chan_635_active,chan_730_active]
channel_idxs = [0,1,2,3,4]
channels_in_data = list(compress(channel_idxs, active_channels))
n_active_channels = len(channels_in_data)
if not (num_ch == n_active_channels):
print('Channel setup error. Check metatdata file and directory names.')
sys.exit()
# calculate pixel sizes of deskewed image in microns
deskewed_x_pixel = pixel_size / 1000.
deskewed_y_pixel = pixel_size / 1000.
deskewed_z_pixel = pixel_size / 1000.
print('Deskewed pixel sizes before downsampling (um). x='+str(deskewed_x_pixel)+', y='+str(deskewed_y_pixel)+', z='+str(deskewed_z_pixel)+'.')
# create output directory
if decon_flag == 0 and flatfield_flag == 0:
output_dir_path = input_dir_path / 'deskew_output'
elif decon_flag == 0 and flatfield_flag > 0 :
output_dir_path = input_dir_path / 'deskew_flatfield_output'
elif decon_flag == 1 and flatfield_flag == 0:
output_dir_path = input_dir_path / 'deskew_decon_output'
elif decon_flag == 1 and flatfield_flag > 1:
output_dir_path = input_dir_path / 'deskew_flatfield_decon_output'
output_dir_path.mkdir(parents=True, exist_ok=True)
# Create TIFF if requested
if (save_type==0):
# create directory for data type
tiff_output_dir_path = output_dir_path / Path('tiff')
tiff_output_dir_path.mkdir(parents=True, exist_ok=True)
# Create BDV if requested
elif (save_type == 1):
# create directory for data type
bdv_output_dir_path = output_dir_path / Path('bdv')
bdv_output_dir_path.mkdir(parents=True, exist_ok=True)
# https://github.com/nvladimus/npy2bdv
# create BDV H5 file with sub-sampling for BigStitcher
bdv_output_path = bdv_output_dir_path / Path(root_name+'_bdv.h5')
bdv_writer = npy2bdv.BdvWriter(str(bdv_output_path),
nchannels=num_ch,
ntiles=num_y*num_z,
subsamp=((1,1,1),(4,8,8),(8,16,16)),
blockdim=((32, 128, 128),),
compression=None)
# create blank affine transformation to use for stage translation
unit_matrix = np.array(((1.0, 0.0, 0.0, 0.0), # change the 4. value for x_translation (px)
(0.0, 1.0, 0.0, 0.0), # change the 4. value for y_translation (px)
( 0.0, 0.0, 1.0, 0.0)))# change the 4. value for z_translation (px)
# Create Zarr if requested
elif (save_type == 2):
# create directory for data type
zarr_output_dir_path = output_dir_path / Path('zarr')
zarr_output_dir_path.mkdir(parents=True, exist_ok=True)
# create name for zarr directory
zarr_output_path = zarr_output_dir_path / Path(root_name + '_zarr.zarr')
# calculate size of one volume
# change step size from physical space (nm) to camera space (pixels)
pixel_step = scan_step/pixel_size # (pixels)
# calculate the number of pixels scanned during stage scan
scan_end = num_images * pixel_step # (pixels)
# calculate properties for final image
ny = np.int64(np.ceil(scan_end+y_pixels*np.cos(theta*np.pi/180))) # (pixels)
nz = np.int64(np.ceil(y_pixels*np.sin(theta*np.pi/180))) # (pixels)
nx = np.int64(x_pixels) # (pixels)
# create and open zarr file
root = zarr.open(str(zarr_output_path), mode="w")
opm_data = root.zeros("opm_data", shape=(num_t, num_y*num_z, num_ch, nz, ny, nx), chunks=(1, 1, 1, 32, 128, 128), dtype=np.uint16)
root = zarr.open(str(zarr_output_path), mode="rw")
opm_data = root["opm_data"]
# if retrospective flatfield is requested, import and open pyimagej in interactive mode
# because BaSiC flat-fielding plugin cannot run in headless mode
if flatfield_flag == 1:
from image_post_processing import manage_flat_field
import imagej
import scyjava
scyjava.config.add_option('-Xmx12g')
plugins_dir = Path('/home/dps/Fiji.app/plugins')
scyjava.config.add_option(f'-Dplugins.dir={str(plugins_dir)}')
ij_path = Path('/home/dps/Fiji.app')
ij = imagej.init(str(ij_path), headless=False)
ij.ui().showUI()
print('PyimageJ approach to flat fielding will be removed soon. Switch to GPU accelerated python BASIC code (-f 2).')
elif flatfield_flag == 2:
from image_post_processing import manage_flat_field_py
# if decon is requested, import microvolution wrapper
if decon_flag == 1:
from image_post_processing import mv_decon
# initialize counters
timepoints_in_data = list(range(num_t))
y_tile_in_data = list(range(num_y))
z_tile_in_data = list(range(num_z))
ch_in_BDV = list(range(n_active_channels))
tile_idx=0
# loop over all directories. Each directory will be placed as a "tile" into the BigStitcher file
for (y_idx, z_idx) in product(y_tile_in_data,z_tile_in_data):
for (t_idx, ch_BDV_idx) in product(timepoints_in_data, ch_in_BDV):
ch_idx = channels_in_data[ch_BDV_idx]
# open stage positions file
stage_position_filename = Path('t'+str(t_idx).zfill(4)+'_y'+str(y_idx).zfill(4)+'_z'+str(z_idx).zfill(4)+'_ch'+str(ch_idx).zfill(4)+'_stage_positions.csv')
stage_position_path = input_dir_path / stage_position_filename
# check to see if stage poisition file exists yet
while(not(stage_position_filename.exists())):
time.sleep(60)
df_stage_positions = data_io.read_metadata(stage_position_path)
stage_x = np.round(float(df_stage_positions['stage_x']),2)
stage_y = np.round(float(df_stage_positions['stage_y']),2)
stage_z = np.round(float(df_stage_positions['stage_z']),2)
print('y tile '+str(y_idx+1)+' of '+str(num_y)+'; z tile '+str(z_idx+1)+' of '+str(num_z)+'; channel '+str(ch_BDV_idx+1)+' of '+str(n_active_channels))
print('Stage location (um): x='+str(stage_x)+', y='+str(stage_y)+', z='+str(stage_z)+'.')
# construct directory name
current_tile_dir_path = Path(root_name+'_t'+str(t_idx).zfill(4)+'_y'+str(y_idx).zfill(4)+'_z'+str(z_idx).zfill(4)+'_ch'+str(ch_idx).zfill(4)+'_1')
tile_dir_path_to_load = input_dir_path / current_tile_dir_path
# https://pycro-manager.readthedocs.io/en/latest/read_data.html
dataset = Dataset(str(tile_dir_path_to_load))
raw_data = data_io.return_data_numpy(dataset=dataset, time_axis=None, channel_axis=None, num_images=num_images, y_pixels=y_pixels,x_pixels=x_pixels)
# perform flat-fielding
if flatfield_flag == 1:
print('Flatfield.')
corrected_stack = manage_flat_field(raw_data,ij)
elif flatfield_flag == 2:
corrected_stack = manage_flat_field_py(raw_data)
else:
corrected_stack = raw_data
del raw_data
# deskew
print('Deskew.')
deskewed = deskew(data=np.flipud(corrected_stack),theta=theta,distance=scan_step,pixel_size=pixel_size)
del corrected_stack
# downsample in z due to oversampling when going from OPM to coverslip geometry
if z_down_sample > 1:
print('Downsample.')
deskewed_downsample = block_reduce(deskewed, block_size=(z_down_sample,1,1), func=np.mean)
else:
deskewed_downsample = deskewed
del deskewed
# run deconvolution on deskewed image
if decon_flag == 1:
print('Deconvolve.')
deskewed_downsample_decon = mv_decon(deskewed_downsample,ch_idx,deskewed_y_pixel,z_down_sample*deskewed_z_pixel)
else:
deskewed_downsample_decon = deskewed_downsample
del deskewed_downsample
# save deskewed image into TIFF stack
if (save_type==0):
print('Write TIFF stack')
tiff_filename= root_name+'_t'+str(t_idx).zfill(3)+'_p'+str(tile_idx).zfill(4)+'_c'+str(ch_idx).zfill(3)+'.tiff'
tiff_output_path = tiff_output_dir_path / Path(tiff_filename)
tifffile.imwrite(str(tiff_output_path), deskewed_downsample_decon, imagej=True, resolution=(1/deskewed_x_pixel, 1/deskewed_y_pixel),
metadata={'spacing': (z_down_sample*deskewed_z_pixel), 'unit': 'um', 'axes': 'ZYX'})
metadata_filename = root_name+'_t'+str(t_idx).zfill(3)+'_p'+str(tile_idx).zfill(4)+'_c'+str(ch_idx).zfill(3)+'.csv'
metadata_output_path = tiff_output_dir_path / Path(metadata_filename)
tiff_stage_metadata = [{'stage_x': float(stage_x),
'stage_y': float(stage_y),
'stage_z': float(stage_z)}]
data_io.write_metadata(tiff_stage_metadata[0], metadata_output_path)
elif (save_type==1):
# create affine transformation for stage translation
# swap x & y from instrument to BDV
affine_matrix = unit_matrix
affine_matrix[0,3] = (stage_y)/(deskewed_y_pixel) # x-translation
affine_matrix[1,3] = (stage_x)/(deskewed_x_pixel) # y-translation
affine_matrix[2,3] = (-1*stage_z) / (z_down_sample*deskewed_z_pixel) # z-translation
# save tile in BDV H5 with actual stage positions
print('Write into BDV H5.')
bdv_writer.append_view(deskewed_downsample_decon, time=0, channel=ch_BDV_idx,
tile=tile_idx,
voxel_size_xyz=(deskewed_x_pixel, deskewed_y_pixel, z_down_sample*deskewed_z_pixel),
voxel_units='um',
calibration=(1,1,(z_down_sample*deskewed_z_pixel)/deskewed_y_pixel),
m_affine=affine_matrix,
name_affine = 'tile '+str(tile_idx)+' translation')
elif (save_type==2):
print('Write data into Zarr container')
opm_data[t_idx, tile_idx, ch_BDV_idx, :, :, :] = deskewed_downsample_decon
metadata_filename = root_name+'_t'+str(t_idx).zfill(3)+'_p'+str(tile_idx).zfill(4)+'_c'+str(ch_idx).zfill(3)+'.csv'
metadata_output_path = zarr_output_dir_path / Path(metadata_filename)
zarr_stage_metadata = [{'stage_x': float(stage_x),
'stage_y': float(stage_y),
'stage_z': float(stage_z)}]
data_io.write_metadata(zarr_stage_metadata[0], metadata_output_path)
# free up memory
del deskewed_downsample_decon
gc.collect()
tile_idx=tile_idx+1
if (save_type==2):
# write BDV xml file
# https://github.com/nvladimus/npy2bdv
# bdv_writer.write_xml(ntimes=num_t)
bdv_writer.write_xml()
bdv_writer.close()
# shut down pyimagej
if (flatfield_flag == 1):
ij.getContext().dispose()
# exit
print('Finished.')
sys.exit()
# run
if __name__ == "__main__":
main(sys.argv[1:])
| 14,548
| 47.016502
| 167
|
py
|
OPM
|
OPM-master/reconstruction/run_opm_stagescan.py
|
#!/usr/bin/env python
'''
OPM stage control without fluidics.
Shepherd 04/21 - large-scale changes for new metadata and on-the-fly uploading to server for simultaneous reconstruction
'''
# imports
from pycromanager import Bridge, Acquisition
from pathlib import Path
import numpy as np
import time
import sys
import msvcrt
import pandas as pd
import subprocess
import PyDAQmx as daq
import ctypes as ct
from itertools import compress
import shutil
from threading import Thread
import data_io
import gc
def camera_hook_fn(event,bridge,event_queue):
core = bridge.get_core()
command='1SCAN'
core.set_property('TigerCommHub','SerialCommand',command)
return event
def main():
#------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------Begin setup of scan parameters--------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
# set up lasers
channel_labels = ["405", "488", "561", "635", "730"]
channel_states = [False, False, True, False, False] # true -> active, false -> inactive
channel_powers = [50, 10, 90, 100, 95] # (0 -> 100%)
do_ind = [0, 1, 2, 3, 4] # digital output line corresponding to each channel
# parse which channels are active
active_channel_indices = [ind for ind, st in zip(do_ind, channel_states) if st]
n_active_channels = len(active_channel_indices)
print("%d active channels: " % n_active_channels, end="")
for ind in active_channel_indices:
print("%s " % channel_labels[ind], end="")
print("")
# exposure time
exposure_ms = 50.0
# excess scan positions
excess_scan_positions = 10
# galvo voltage at neutral
galvo_neutral_volt = -0.15 # unit: volts
# scan axis limits. Use stage positions reported by MM
scan_axis_start_um = 8680. #unit: um
scan_axis_end_um = 8800. #unit: um
# tile axis limits. Use stage positions reported by MM
tile_axis_start_um = -3841.28 #unit: um
tile_axis_end_um = -3841.28 #unit: um
# height axis limits. Use stage positions reported by MM
height_axis_start_um = 13128.63 #unit: um
height_axis_end_um = 13128.63 #unit: um
# number of timepoints to execute
# TO DO: add in control for rate of experiment
timepoints = 1
# FOV parameters
# ONLY MODIFY IF NECESSARY
# ROI = [0, 1152, 2304, 512] #unit: pixels
# setup file name
save_directory=Path('D:/20210831')
save_name = 'stage_scan'
#------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------End setup of scan parameters----------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
# connect to Micromanager instance
bridge = Bridge()
core = bridge.get_core()
# turn off lasers
core.set_config('Laser','Off')
core.wait_for_config('Laser','Off')
# set camera to fast readout mode
core.set_config('Camera-Setup','ScanMode3')
core.wait_for_config('Camera-Setup','ScanMode3')
# set camera to START mode upon input trigger
core.set_config('Camera-TriggerType','START')
core.wait_for_config('Camera-TriggerType','START')
# set camera to positive input trigger
core.set_config('Camera-TriggerPolarity','POSITIVE')
core.wait_for_config('Camera-TriggerPolarity','POSITIVE')
# set camera to internal control
core.set_config('Camera-TriggerSource','INTERNAL')
core.wait_for_config('Camera-TriggerSource','INTERNAL')
# set camera to output positive triggers on all lines for exposure
core.set_property('OrcaFusionBT','OUTPUT TRIGGER KIND[0]','EXPOSURE')
core.set_property('OrcaFusionBT','OUTPUT TRIGGER KIND[1]','EXPOSURE')
core.set_property('OrcaFusionBT','OUTPUT TRIGGER KIND[2]','EXPOSURE')
core.set_property('OrcaFusionBT','OUTPUT TRIGGER POLARITY[0]','POSITIVE')
core.set_property('OrcaFusionBT','OUTPUT TRIGGER POLARITY[1]','POSITIVE')
core.set_property('OrcaFusionBT','OUTPUT TRIGGER POLARITY[2]','POSITIVE')
# change core timeout for long stage moves
core.set_property('Core','TimeoutMs',100000)
time.sleep(1)
# set exposure
core.set_exposure(exposure_ms)
# determine image size
core.snap_image()
y_pixels = core.get_image_height()
x_pixels = core.get_image_width()
# grab exposure
true_exposure = core.get_exposure()
# get actual framerate from micromanager properties
actual_readout_ms = true_exposure+float(core.get_property('OrcaFusionBT','ReadoutTime')) #unit: ms
# camera pixel size
pixel_size_um = .115 # unit: um
# scan axis setup
scan_axis_step_um = 0.4 # unit: um
scan_axis_step_mm = scan_axis_step_um / 1000. #unit: mm
scan_axis_start_mm = scan_axis_start_um / 1000. #unit: mm
scan_axis_end_mm = scan_axis_end_um / 1000. #unit: mm
scan_axis_range_um = np.abs(scan_axis_end_um-scan_axis_start_um) # unit: um
scan_axis_range_mm = scan_axis_range_um / 1000 #unit: mm
actual_exposure_s = actual_readout_ms / 1000. #unit: s
scan_axis_speed = np.round(scan_axis_step_mm / actual_exposure_s,4) #unit: mm/s
scan_axis_positions = np.rint(scan_axis_range_mm / scan_axis_step_mm).astype(int) #unit: number of positions
# tile axis setup
tile_axis_overlap=0.2 #unit: percentage
tile_axis_range_um = np.abs(tile_axis_end_um - tile_axis_start_um) #unit: um
tile_axis_range_mm = tile_axis_range_um / 1000 #unit: mm
tile_axis_ROI = x_pixels*pixel_size_um #unit: um
tile_axis_step_um = np.round((tile_axis_ROI) * (1-tile_axis_overlap),2) #unit: um
tile_axis_step_mm = tile_axis_step_um / 1000 #unit: mm
tile_axis_positions = np.rint(tile_axis_range_mm / tile_axis_step_mm).astype(int)+1 #unit: number of positions
# if tile_axis_positions rounded to zero, make sure we acquire at least one position
if tile_axis_positions == 0:
tile_axis_positions=1
# height axis setup
height_axis_overlap=0.2 #unit: percentage
height_axis_range_um = np.abs(height_axis_end_um-height_axis_start_um) #unit: um
height_axis_range_mm = height_axis_range_um / 1000 #unit: mm
height_axis_ROI = y_pixels*pixel_size_um*np.sin(30.*np.pi/180.) #unit: um
height_axis_step_um = np.round((height_axis_ROI)*(1-height_axis_overlap),2) #unit: um
height_axis_step_mm = height_axis_step_um / 1000 #unit: mm
height_axis_positions = np.rint(height_axis_range_mm / height_axis_step_mm).astype(int)+1 #unit: number of positions
# if height_axis_positions rounded to zero, make sure we acquire at least one position
if height_axis_positions==0:
height_axis_positions=1
# get handle to xy and z stages
xy_stage = core.get_xy_stage_device()
z_stage = core.get_focus_device()
# galvo voltage at neutral
galvo_neutral_volt = -.15 # unit: volts
# set the galvo to the neutral position if it is not already
try:
taskAO_first = daq.Task()
taskAO_first.CreateAOVoltageChan("/Dev1/ao0","",-4.0,4.0,daq.DAQmx_Val_Volts,None)
taskAO_first.WriteAnalogScalarF64(True, -1, galvo_neutral_volt, None)
taskAO_first.StopTask()
taskAO_first.ClearTask()
except:
print("DAQmx Error %s"%err)
# Setup Tiger controller to pass signal when the scan stage cross the start position to the PLC
plcName = 'PLogic:E:36'
propPosition = 'PointerPosition'
propCellConfig = 'EditCellConfig'
#addrOutputBNC3 = 35 # BNC3 on the PLC front panel
addrOutputBNC1 = 33 # BNC1 on the PLC front panel
addrStageSync = 46 # TTL5 on Tiger backplane = stage sync signal
# connect stage sync signal to BNC output
core.set_property(plcName, propPosition, addrOutputBNC1)
core.set_property(plcName, propCellConfig, addrStageSync)
# turn on 'transmit repeated commands' for Tiger
core.set_property('TigerCommHub','OnlySendSerialCommandOnChange','No')
# set tile axis speed for all moves
command = 'SPEED Y=.1'
core.set_property('TigerCommHub','SerialCommand',command)
# check to make sure Tiger is not busy
ready='B'
while(ready!='N'):
command = 'STATUS'
core.set_property('TigerCommHub','SerialCommand',command)
ready = core.get_property('TigerCommHub','SerialResponse')
time.sleep(.500)
# set scan axis speed for large move to initial position
command = 'SPEED X=.1'
core.set_property('TigerCommHub','SerialCommand',command)
# check to make sure Tiger is not busy
ready='B'
while(ready!='N'):
command = 'STATUS'
core.set_property('TigerCommHub','SerialCommand',command)
ready = core.get_property('TigerCommHub','SerialResponse')
time.sleep(.500)
# turn off 'transmit repeated commands' for Tiger
core.set_property('TigerCommHub','OnlySendSerialCommandOnChange','Yes')
# move scan scan stage to initial position
core.set_xy_position(scan_axis_start_um,tile_axis_start_um)
core.wait_for_device(xy_stage)
core.set_position(height_axis_start_um)
core.wait_for_device(z_stage)
# turn on 'transmit repeated commands' for Tiger
core.set_property('TigerCommHub','OnlySendSerialCommandOnChange','No')
# set scan axis speed to correct speed for continuous stage scan
# expects mm/s
command = 'SPEED X='+str(scan_axis_speed)
core.set_property('TigerCommHub','SerialCommand',command)
# check to make sure Tiger is not busy
ready='B'
while(ready!='N'):
command = 'STATUS'
core.set_property('TigerCommHub','SerialCommand',command)
ready = core.get_property('TigerCommHub','SerialResponse')
time.sleep(.500)
# set scan axis to true 1D scan with no backlash
command = '1SCAN X? Y=0 Z=9 F=0'
core.set_property('TigerCommHub','SerialCommand',command)
# check to make sure Tiger is not busy
ready='B'
while(ready!='N'):
command = 'STATUS'
core.set_property('TigerCommHub','SerialCommand',command)
ready = core.get_property('TigerCommHub','SerialResponse')
time.sleep(.500)
# set range and return speed (5% of max) for scan axis
# expects mm
command = '1SCANR X='+str(scan_axis_start_mm)+' Y='+str(scan_axis_end_mm)+' R=10'
core.set_property('TigerCommHub','SerialCommand',command)
# check to make sure Tiger is not busy
ready='B'
while(ready!='N'):
command = 'STATUS'
core.set_property('TigerCommHub','SerialCommand',command)
ready = core.get_property('TigerCommHub','SerialResponse')
time.sleep(.500)
# turn off 'transmit repeated commands' for Tiger
core.set_property('TigerCommHub','OnlySendSerialCommandOnChange','Yes')
# set all laser to external triggering
core.set_config('Modulation-405','External-Digital')
core.wait_for_config('Modulation-405','External-Digital')
core.set_config('Modulation-488','External-Digital')
core.wait_for_config('Modulation-488','External-Digital')
core.set_config('Modulation-561','External-Digital')
core.wait_for_config('Modulation-561','External-Digital')
core.set_config('Modulation-637','External-Digital')
core.wait_for_config('Modulation-637','External-Digital')
core.set_config('Modulation-730','External-Digital')
core.wait_for_config('Modulation-730','External-Digital')
# turn all lasers on
core.set_config('Laser','AllOn')
core.wait_for_config('Laser','AllOn')
# set lasers to user defined power
core.set_property('Coherent-Scientific Remote','Laser 405-100C - PowerSetpoint (%)',channel_powers[0])
core.set_property('Coherent-Scientific Remote','Laser 488-150C - PowerSetpoint (%)',channel_powers[1])
core.set_property('Coherent-Scientific Remote','Laser OBIS LS 561-150 - PowerSetpoint (%)',channel_powers[2])
core.set_property('Coherent-Scientific Remote','Laser 637-140C - PowerSetpoint (%)',channel_powers[3])
core.set_property('Coherent-Scientific Remote','Laser 730-30C - PowerSetpoint (%)',channel_powers[4])
# setup DAQ
samples_per_ch = 2
DAQ_sample_rate_Hz = 10000
num_DI_channels = 8
# set the galvo to neutral
taskAO_last = daq.Task()
taskAO_last.CreateAOVoltageChan("/Dev1/ao0","",-4.0,4.0,daq.DAQmx_Val_Volts,None)
taskAO_last.WriteAnalogScalarF64(True, -1, galvo_neutral_volt, None)
taskAO_last.StopTask()
taskAO_last.ClearTask()
# output experiment info
print('Number of X positions: '+str(scan_axis_positions))
print('Number of Y tiles: '+str(tile_axis_positions))
print('Number of Z slabs: '+str(height_axis_positions))
print('Number of channels: '+str(n_active_channels))
# flags for metadata and processing
setup_processing=True
setup_metadata=True
# create events to execute scan
events = []
for x in range(scan_axis_positions+excess_scan_positions):
evt = { 'axes': {'z': x}}
events.append(evt)
for t_idx in range(timepoints):
for y_idx in range(tile_axis_positions):
# calculate tile axis position
tile_position_um = tile_axis_start_um+(tile_axis_step_um*y_idx)
# move XY stage to new tile axis position
core.set_xy_position(scan_axis_start_um,tile_position_um)
core.wait_for_device(xy_stage)
for z_idx in range(height_axis_positions):
# calculate height axis position
height_position_um = height_axis_start_um+(height_axis_step_um*z_idx)
# move Z stage to new height axis position
core.set_position(height_position_um)
core.wait_for_device(z_stage)
for ch_idx in active_channel_indices:
# create DAQ pattern for laser strobing controlled via rolling shutter
dataDO = np.zeros((samples_per_ch,num_DI_channels),dtype=np.uint8)
dataDO[0,ch_idx]=1
dataDO[1,ch_idx]=0
#print(dataDO)
# update save_name with current tile information
save_name_tyzc = save_name +'_t'+str(t_idx).zfill(4)+'_y'+str(y_idx).zfill(4)+'_z'+str(z_idx).zfill(4)+'_ch'+str(ch_idx).zfill(4)
# turn on 'transmit repeated commands' for Tiger
core.set_property('TigerCommHub','OnlySendSerialCommandOnChange','No')
# check to make sure Tiger is not busy
ready='B'
while(ready!='N'):
command = 'STATUS'
core.set_property('TigerCommHub','SerialCommand',command)
ready = core.get_property('TigerCommHub','SerialResponse')
time.sleep(.500)
# turn off 'transmit repeated commands' for Tiger
core.set_property('TigerCommHub','OnlySendSerialCommandOnChange','Yes')
# save actual stage positions
xy_pos = core.get_xy_stage_position()
stage_x = xy_pos.x
stage_y = xy_pos.y
stage_z = core.get_position()
current_stage_data = [{'stage_x': stage_x, 'stage_y': stage_y, 'stage_z': stage_z}]
df_current_stage = pd.DataFrame(current_stage_data)
# setup DAQ for laser strobing
try:
# ----- DIGITAL input -------
taskDI = daq.Task()
taskDI.CreateDIChan("/Dev1/PFI0","",daq.DAQmx_Val_ChanForAllLines)
## Configure change detection timing (from wave generator)
taskDI.CfgInputBuffer(0) # must be enforced for change-detection timing, i.e no buffer
taskDI.CfgChangeDetectionTiming("/Dev1/PFI0","/Dev1/PFI0",daq.DAQmx_Val_ContSamps,0)
## Set where the starting trigger
taskDI.CfgDigEdgeStartTrig("/Dev1/PFI0",daq.DAQmx_Val_Rising)
## Export DI signal to unused PFI pins, for clock and start
taskDI.ExportSignal(daq.DAQmx_Val_ChangeDetectionEvent, "/Dev1/PFI2")
taskDI.ExportSignal(daq.DAQmx_Val_StartTrigger,"/Dev1/PFI1")
# ----- DIGITAL output ------
taskDO = daq.Task()
taskDO.CreateDOChan("/Dev1/port0/line0:7","",daq.DAQmx_Val_ChanForAllLines)
## Configure timing (from DI task)
taskDO.CfgSampClkTiming("/Dev1/PFI2",DAQ_sample_rate_Hz,daq.DAQmx_Val_Rising,daq.DAQmx_Val_ContSamps,samples_per_ch)
## Write the output waveform
samples_per_ch_ct_digital = ct.c_int32()
taskDO.WriteDigitalLines(samples_per_ch,False,10.0,daq.DAQmx_Val_GroupByChannel,dataDO,ct.byref(samples_per_ch_ct_digital),None)
## ------ Start digital input and output tasks ----------
taskDO.StartTask()
taskDI.StartTask()
except daq.DAQError as err:
print("DAQmx Error %s"%err)
# set camera to external control
# DCAM sets the camera back to INTERNAL mode after each acquisition
core.set_config('Camera-TriggerSource','EXTERNAL')
core.wait_for_config('Camera-TriggerSource','EXTERNAL')
# verify that camera actually switched back to external trigger mode
trigger_state = core.get_property('OrcaFusionBT','TRIGGER SOURCE')
# if not in external control, keep trying until camera changes settings
while not(trigger_state =='EXTERNAL'):
time.sleep(2.0)
core.set_config('Camera-TriggerSource','EXTERNAL')
core.wait_for_config('Camera-TriggerSource','EXTERNAL')
trigger_state = core.get_property('OrcaFusionBT','TRIGGER SOURCE')
print('T: '+str(t_idx)+' Y: '+str(y_idx)+' Z: '+str(z_idx)+' C: '+str(ch_idx))
# run acquisition for this tyzc combination
with Acquisition(directory=save_directory, name=save_name_tyzc,
post_camera_hook_fn=camera_hook_fn, show_display=False, max_multi_res_index=0,
saving_queue_size=5000) as acq:
acq.acquire(events)
# clean up acquisition so that AcqEngJ releases directory.
# NOTE: This currently does not work.
acq = None
acq_deleted = False
while not(acq_deleted):
try:
del acq
except:
time.sleep(0.1)
acq_deleted = False
else:
gc.collect()
acq_deleted = True
# stop DAQ and make sure it is at zero
try:
## Stop and clear both tasks
taskDI.StopTask()
taskDO.StopTask()
taskDI.ClearTask()
taskDO.ClearTask()
except daq.DAQError as err:
print("DAQmx Error %s"%err)
# save experimental info after first tile.
# we do it this way so that Pycromanager can manage the directories.
if (setup_metadata):
# save stage scan parameters
scan_param_data = [{'root_name': str(save_name),
'scan_type': str('stage'),
'theta': float(30.0),
'scan_step': float(scan_axis_step_um*1000.),
'pixel_size': float(pixel_size_um*1000.),
'num_t': int(timepoints),
'num_y': int(tile_axis_positions),
'num_z': int(height_axis_positions),
'num_ch': int(n_active_channels),
'scan_axis_positions': int(scan_axis_positions),
'excess_scan_positions': int(excess_scan_positions),
'y_pixels': int(y_pixels),
'x_pixels': int(x_pixels),
'405_active': bool(channel_states[0]),
'488_active': bool(channel_states[1]),
'561_active': bool(channel_states[2]),
'635_active': bool(channel_states[3]),
'730_active': bool(channel_states[4])}]
# df_stage_scan_params = pd.DataFrame(scan_param_data)
# save_name_stage_params = save_directory / 'scan_metadata.csv'
# df_stage_scan_params.to_csv(save_name_stage_params)
data_io.write_metadata(scan_param_data[0], save_directory / Path('scan_metadata.csv'))
setup_metadata=False
# save stage scan positions after each tile
save_name_stage_positions = Path('t'+str(t_idx).zfill(4)+'_y'+str(y_idx).zfill(4)+'_z'+str(z_idx).zfill(4)+'_ch'+str(ch_idx).zfill(4)+'_stage_positions.csv')
save_name_stage_positions = save_directory / save_name_stage_positions
# todo: use data_io instead
df_current_stage.to_csv(save_name_stage_positions)
# turn on 'transmit repeated commands' for Tiger
core.set_property('TigerCommHub','OnlySendSerialCommandOnChange','No')
# check to make sure Tiger is not busy
ready='B'
while(ready!='N'):
command = 'STATUS'
core.set_property('TigerCommHub','SerialCommand',command)
ready = core.get_property('TigerCommHub','SerialResponse')
time.sleep(.500)
# turn off 'transmit repeated commands' for Tiger
core.set_property('TigerCommHub','OnlySendSerialCommandOnChange','Yes')
'''
# if first tile, make parent directory on NAS and start reconstruction script on the server
if setup_processing:
# make home directory on NAS
save_directory_path = Path(save_directory)
remote_directory = Path('y:/') / Path(save_directory_path.parts[1])
cmd='mkdir ' + str(remote_directory)
status_mkdir = subprocess.run(cmd, shell=True)
# copy full experiment metadata to NAS
src= Path(save_directory) / Path('scan_metadata.csv')
dst= Path(remote_directory) / Path('scan_metadata.csv')
Thread(target=shutil.copy, args=[str(src), str(dst)]).start()
setup_processing=False
# copy current tyzc metadata to NAS
save_directory_path = Path(save_directory)
remote_directory = Path('y:/') / Path(save_directory_path.parts[1])
src= Path(save_directory) / Path(save_name_stage_positions.parts[2])
dst= Path(remote_directory) / Path(save_name_stage_positions.parts[2])
Thread(target=shutil.copy, args=[str(src), str(dst)]).start()
# copy current tyzc data to NAS
save_directory_path = Path(save_directory)
remote_directory = Path('y:/') / Path(save_directory_path.parts[1])
src= Path(save_directory) / Path(save_name_tyzc+ '_1')
dst= Path(remote_directory) / Path(save_name_tyzc+ '_1')
Thread(target=shutil.copytree, args=[str(src), str(dst)]).start()
'''
# set lasers to zero power
channel_powers = [0.,0.,0.,0.,0.]
core.set_property('Coherent-Scientific Remote','Laser 405-100C - PowerSetpoint (%)',channel_powers[0])
core.set_property('Coherent-Scientific Remote','Laser 488-150C - PowerSetpoint (%)',channel_powers[1])
core.set_property('Coherent-Scientific Remote','Laser OBIS LS 561-150 - PowerSetpoint (%)',channel_powers[2])
core.set_property('Coherent-Scientific Remote','Laser 637-140C - PowerSetpoint (%)',channel_powers[3])
core.set_property('Coherent-Scientific Remote','Laser 730-30C - PowerSetpoint (%)',channel_powers[4])
# turn all lasers off
core.set_config('Laser','Off')
core.wait_for_config('Laser','Off')
# set all lasers back to software control
core.set_config('Modulation-405','CW (constant power)')
core.wait_for_config('Modulation-405','CW (constant power)')
core.set_config('Modulation-488','CW (constant power)')
core.wait_for_config('Modulation-488','CW (constant power)')
core.set_config('Modulation-561','CW (constant power)')
core.wait_for_config('Modulation-561','CW (constant power)')
core.set_config('Modulation-637','CW (constant power)')
core.wait_for_config('Modulation-637','CW (constant power)')
core.set_config('Modulation-730','CW (constant power)')
core.wait_for_config('Modulation-730','CW (constant power)')
# set camera to internal control
core.set_config('Camera-TriggerSource','INTERNAL')
core.wait_for_config('Camera-TriggerSource','INTERNAL')
bridge.close()
#-----------------------------------------------------------------------------
if __name__ == "__main__":
main()
| 26,737
| 44.318644
| 177
|
py
|
OPM
|
OPM-master/reconstruction/run_opm_galvoscan.py
|
#!/usr/bin/env python
'''
OPM galvo scan using Pycromanager.
D. Shepherd 09/21 - bring metadata in line with new reconstruction code. Attempt timelapse with pause using event structure.
D. Shepherd 04/21 - streamline code for fast acquisition and immediate upload to server
P. Brown 03/21 - multiline digital and analog NI DAQ control using camera as master
D. Shepherd 01/21 - initial pycromanager work, ported from stage control code
'''
# imports
from pycromanager import Bridge, Acquisition
from pathlib import Path
import numpy as np
import PyDAQmx as daq
import ctypes as ct
import subprocess
import shutil
from threading import Thread
import data_io
def main():
#------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------Begin setup of scan parameters--------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
# set up lasers
channel_labels = ["405", "488", "561", "635", "730"]
channel_states = [False, False, True, False, False] # true -> active, false -> inactive
channel_powers = [30, 30, 100, 100, 0] # (0 -> 100%)
do_ind = [0, 1, 2, 3, 4] # digital output line corresponding to each channel
# parse which channels are active
active_channel_indices = [ind for ind, st in zip(do_ind, channel_states) if st]
n_active_channels = len(active_channel_indices)
print("%d active channels: " % n_active_channels, end="")
for ind in active_channel_indices:
print("%s " % channel_labels[ind], end="")
print("")
# exposure time
exposure_ms = 2.0 #unit: ms
# scan axis range
scan_axis_range_um = 20.0 # unit: microns
# galvo voltage at neutral
#galvo_neutral_volt = 0 # unit: volts
galvo_neutral_volt = -.150
scan_axis_step_um = 0.4 # unit: um
# timepoints
timepoints = 400
# timepoint interval (s)
timing_interval = 0
# setup file name
save_directory=Path('D:/20211204')
save_name = 'fla-suc40'
# automatically transfer files to NAS at end of dataset
transfer_files = False
# display data
display_flag = False
#------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------End setup of scan parameters----------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
with Bridge() as bridge:
core = bridge.get_core()
# give camera time to change modes if necessary
core.set_config('Camera-Setup','ScanMode3')
core.wait_for_config('Camera-Setup','ScanMode3')
# set camera to internal trigger
core.set_config('Camera-TriggerSource','INTERNAL')
core.wait_for_config('Camera-TriggerSource','INTERNAL')
# set camera to internal trigger
# give camera time to change modes if necessary
core.set_property('OrcaFusionBT','OUTPUT TRIGGER KIND[0]','EXPOSURE')
core.set_property('OrcaFusionBT','OUTPUT TRIGGER KIND[1]','EXPOSURE')
core.set_property('OrcaFusionBT','OUTPUT TRIGGER KIND[2]','EXPOSURE')
core.set_property('OrcaFusionBT','OUTPUT TRIGGER POLARITY[0]','POSITIVE')
core.set_property('OrcaFusionBT','OUTPUT TRIGGER POLARITY[1]','POSITIVE')
core.set_property('OrcaFusionBT','OUTPUT TRIGGER POLARITY[2]','POSITIVE')
# set exposure time
core.set_exposure(exposure_ms)
# determine image size
core.snap_image()
y_pixels = core.get_image_height()
x_pixels = core.get_image_width()
# turn all lasers on
core.set_config('Laser','Off')
core.wait_for_config('Laser','Off')
# set all laser to external triggering
core.set_config('Modulation-405','External-Digital')
core.wait_for_config('Modulation-405','External-Digital')
core.set_config('Modulation-488','External-Digital')
core.wait_for_config('Modulation-488','External-Digital')
core.set_config('Modulation-561','External-Digital')
core.wait_for_config('Modulation-561','External-Digital')
core.set_config('Modulation-637','External-Digital')
core.wait_for_config('Modulation-637','External-Digital')
core.set_config('Modulation-730','External-Digital')
core.wait_for_config('Modulation-730','External-Digital')
# turn all lasers on
core.set_config('Laser','AllOn')
core.wait_for_config('Laser','AllOn')
core.set_property('Coherent-Scientific Remote','Laser 405-100C - PowerSetpoint (%)',channel_powers[0])
core.set_property('Coherent-Scientific Remote','Laser 488-150C - PowerSetpoint (%)',channel_powers[1])
core.set_property('Coherent-Scientific Remote','Laser OBIS LS 561-150 - PowerSetpoint (%)',channel_powers[2])
core.set_property('Coherent-Scientific Remote','Laser 637-140C - PowerSetpoint (%)',channel_powers[3])
core.set_property('Coherent-Scientific Remote','Laser 730-30C - PowerSetpoint (%)',channel_powers[4])
# camera pixel size
pixel_size_um = .115 # unit: um
# galvo scan setup
scan_axis_calibration = 0.043 # unit: V / um
min_volt = -(scan_axis_range_um * scan_axis_calibration / 2.) + galvo_neutral_volt # unit: volts
scan_axis_step_volts = scan_axis_step_um * scan_axis_calibration # unit: V
scan_axis_range_volts = scan_axis_range_um * scan_axis_calibration # unit: V
scan_steps = np.rint(scan_axis_range_volts / scan_axis_step_volts).astype(np.int16) # galvo steps
# handle case where no scan steps
if scan_steps == 0:
scan_steps = 1
# output experiment info
print("Scan axis range: %.1f um = %0.3fV, Scan axis step: %.1f nm = %0.3fV , Number of galvo positions: %d" %
(scan_axis_range_um, scan_axis_range_volts, scan_axis_step_um * 1000, scan_axis_step_volts, scan_steps))
print('Galvo neutral (Volt): ' + str(galvo_neutral_volt)+', Min voltage (volt): '+str(min_volt))
print('Time points: ' + str(timepoints))
# create events to execute scan
events = []
# Changes to event structure motivated by Henry's notes that pycromanager struggles to read "non-standard" axes.
# https://github.com/micro-manager/pycro-manager/issues/220
for t in range(timepoints):
for x in range(scan_steps):
ch_idx = 0
for c in range(len(do_ind)):
if channel_states[c]:
if timing_interval == 0:
evt = { 'axes': {'t': t, 'z': x, 'c': ch_idx }}
else:
evt = { 'axes': {'t': t, 'z': x, 'c': ch_idx },
'min_start_time': t*timing_interval}
ch_idx = ch_idx+1
events.append(evt)
print("Generated %d events" % len(events))
# setup DAQ
nvoltage_steps = scan_steps
# 2 time steps per frame, except for first frame plus one final frame to reset voltage
#samples_per_ch = (nvoltage_steps * 2 - 1) + 1
samples_per_ch = (nvoltage_steps * 2 * n_active_channels - 1) + 1
DAQ_sample_rate_Hz = 10000
#retriggerable = True
num_DI_channels = 8
# Generate values for DO
dataDO = np.zeros((samples_per_ch, num_DI_channels), dtype=np.uint8)
for ii, ind in enumerate(active_channel_indices):
dataDO[2*ii::2*n_active_channels, ind] = 1
dataDO[-1, :] = 0
# generate voltage steps
max_volt = min_volt + scan_axis_range_volts # 2
voltage_values = np.linspace(min_volt, max_volt, nvoltage_steps)
# Generate values for AO
waveform = np.zeros(samples_per_ch)
# one less voltage value for first frame
waveform[0:2*n_active_channels - 1] = voltage_values[0]
if len(voltage_values) > 1:
# (2 * # active channels) voltage values for all other frames
waveform[2*n_active_channels - 1:-1] = np.kron(voltage_values[1:], np.ones(2 * n_active_channels))
# set back to initial value at end
waveform[-1] = voltage_values[0]
#def read_di_hook(event):
try:
# ----- DIGITAL input -------
taskDI = daq.Task()
taskDI.CreateDIChan("/Dev1/PFI0", "", daq.DAQmx_Val_ChanForAllLines)
## Configure change detectin timing (from wave generator)
taskDI.CfgInputBuffer(0) # must be enforced for change-detection timing, i.e no buffer
taskDI.CfgChangeDetectionTiming("/Dev1/PFI0", "/Dev1/PFI0", daq.DAQmx_Val_ContSamps, 0)
## Set where the starting trigger
taskDI.CfgDigEdgeStartTrig("/Dev1/PFI0", daq.DAQmx_Val_Rising)
## Export DI signal to unused PFI pins, for clock and start
taskDI.ExportSignal(daq.DAQmx_Val_ChangeDetectionEvent, "/Dev1/PFI2")
taskDI.ExportSignal(daq.DAQmx_Val_StartTrigger, "/Dev1/PFI1")
# ----- DIGITAL output ------
taskDO = daq.Task()
# TO DO: Write each laser line separately!
taskDO.CreateDOChan("/Dev1/port0/line0:7", "", daq.DAQmx_Val_ChanForAllLines)
## Configure timing (from DI task)
taskDO.CfgSampClkTiming("/Dev1/PFI2", DAQ_sample_rate_Hz, daq.DAQmx_Val_Rising, daq.DAQmx_Val_ContSamps, samples_per_ch)
## Write the output waveform
samples_per_ch_ct_digital = ct.c_int32()
taskDO.WriteDigitalLines(samples_per_ch, False, 10.0, daq.DAQmx_Val_GroupByChannel, dataDO, ct.byref(samples_per_ch_ct_digital), None)
# ------- ANALOG output -----------
# first, set the galvo to the initial point if it is not already
taskAO_first = daq.Task()
taskAO_first.CreateAOVoltageChan("/Dev1/ao0", "", -6.0, 6.0, daq.DAQmx_Val_Volts, None)
taskAO_first.WriteAnalogScalarF64(True, -1, waveform[0], None)
taskAO_first.StopTask()
taskAO_first.ClearTask()
# now set up the task to ramp the galvo
taskAO = daq.Task()
taskAO.CreateAOVoltageChan("/Dev1/ao0", "", -6.0, 6.0, daq.DAQmx_Val_Volts, None)
## Configure timing (from DI task)
taskAO.CfgSampClkTiming("/Dev1/PFI2", DAQ_sample_rate_Hz, daq.DAQmx_Val_Rising, daq.DAQmx_Val_ContSamps, samples_per_ch)
## Write the output waveform
samples_per_ch_ct = ct.c_int32()
taskAO.WriteAnalogF64(samples_per_ch, False, 10.0, daq.DAQmx_Val_GroupByScanNumber, waveform, ct.byref(samples_per_ch_ct), None)
## ------ Start both tasks ----------
taskAO.StartTask()
taskDO.StartTask()
taskDI.StartTask()
except daq.DAQError as err:
print("DAQmx Error %s"%err)
# run acquisition
with Acquisition(directory=save_directory, name=save_name, show_display=display_flag, max_multi_res_index=0, saving_queue_size=5000) as acq:
acq.acquire(events)
acq = None
# stop DAQ
try:
## Stop and clear both tasks
taskDI.StopTask()
taskDO.StopTask()
taskAO.StopTask()
taskDI.ClearTask()
taskAO.ClearTask()
taskDO.ClearTask()
except daq.DAQError as err:
print("DAQmx Error %s"%err)
# save galvo scan parameters
scan_param_data = [{'root_name': str(save_name),
'scan_type': 'galvo',
'theta': 30.0,
'exposure_ms': exposure_ms,
'scan_step': scan_axis_step_um*1000.,
'pixel_size': pixel_size_um*1000.,
'galvo_scan_range_um': scan_axis_range_um,
'galvo_volts_per_um': scan_axis_calibration,
'num_t': int(timepoints),
'num_y': 1, # might need to change this eventually
'num_z': 1, # might need to change this eventually
'num_ch': int(n_active_channels),
'scan_axis_positions': int(scan_steps),
'y_pixels': y_pixels,
'x_pixels': x_pixels,
'405_active': channel_states[0],
'488_active': channel_states[1],
'561_active': channel_states[2],
'635_active': channel_states[3],
'730_active': channel_states[4]}]
data_io.write_metadata(scan_param_data[0], save_directory / 'scan_metadata.csv')
with Bridge() as bridge:
core = bridge.get_core()
# turn all lasers off
core.set_config('Laser','Off')
core.wait_for_config('Laser','Off')
# set all lasers back to software control
core.set_config('Modulation-405','CW (constant power)')
core.wait_for_config('Modulation-405','CW (constant power)')
core.set_config('Modulation-488','CW (constant power)')
core.wait_for_config('Modulation-488','CW (constant power)')
core.set_config('Modulation-561','CW (constant power)')
core.wait_for_config('Modulation-561','CW (constant power)')
core.set_config('Modulation-637','CW (constant power)')
core.wait_for_config('Modulation-637','CW (constant power)')
core.set_config('Modulation-730','CW (constant power)')
core.wait_for_config('Modulation-730','CW (constant power)')
# set all laser to zero power
channel_powers=[0,0,0,0,0]
core.set_property('Coherent-Scientific Remote','Laser 405-100C - PowerSetpoint (%)',channel_powers[0])
core.set_property('Coherent-Scientific Remote','Laser 488-150C - PowerSetpoint (%)',channel_powers[1])
core.set_property('Coherent-Scientific Remote','Laser OBIS LS 561-150 - PowerSetpoint (%)',channel_powers[2])
core.set_property('Coherent-Scientific Remote','Laser 637-140C - PowerSetpoint (%)',channel_powers[3])
core.set_property('Coherent-Scientific Remote','Laser 730-30C - PowerSetpoint (%)',channel_powers[4])
# put the galvo back to neutral
# first, set the galvo to the initial point if it is not already
taskAO_last = daq.Task()
taskAO_last.CreateAOVoltageChan("/Dev1/ao0","",-6.0,6.0,daq.DAQmx_Val_Volts,None)
taskAO_last.WriteAnalogScalarF64(True, -1, galvo_neutral_volt, None)
taskAO_last.StopTask()
taskAO_last.ClearTask()
if transfer_files:
# make parent directory on NAS and start reconstruction script on the server
# make home directory on NAS
save_directory_path = Path(save_directory)
remote_directory = Path('y:/') / Path(save_directory_path.parts[1])
cmd='mkdir ' + str(remote_directory)
status_mkdir = subprocess.run(cmd, shell=True)
# copy full experiment metadata to NAS
src= Path(save_directory) / Path('scan_metadata.csv')
dst= Path(remote_directory) / Path('scan_metadata.csv')
Thread(target=shutil.copy, args=[str(src), str(dst)]).start()
# copy data to NAS
save_directory_path = Path(save_directory)
remote_directory = Path('y:/') / Path(save_directory_path.parts[1])
src= Path(save_directory) / Path(save_name+ '_1')
dst= Path(remote_directory) / Path(save_name+ '_1')
Thread(target=shutil.copytree, args=[str(src), str(dst)]).start()
# run
if __name__ == "__main__":
main()
| 15,693
| 42.715877
| 144
|
py
|
OPM
|
OPM-master/reconstruction/image_post_processing.py
|
'''
QI2lab OPM suite
Reconstruction tools
Image processing tools for OPM reconstruction
Last updated: Shepherd 01/22 - changes to include dexp deconvolution and recent other changes.
'''
#!/usr/bin/env python
import sys
import numpy as np
from pathlib import Path
from tifffile import tifffile
from numba import njit, prange
from flat_field import calc_flatfield
from functools import partial
import dask.array as da
from dask.diagnostics import ProgressBar
import gc
import cupy as cp
try:
import microvolution_py as mv
DECON_LIBRARY = 'mv'
except:
from dexp.utils.backends import Backend, CupyBackend, NumpyBackend
from dexp.processing.deconvolution.lr_deconvolution import lucy_richardson_deconvolution
from dexp.processing.restoration.dehazing import dehaze
DECON_LIBRARY = 'dexp'
# http://numba.pydata.org/numba-doc/latest/user/parallel.html#numba-parallel
@njit(parallel=True)
def deskew(data,theta,distance,pixel_size):
"""
Perform parallelized orthogonal interpolation into a uniform pixel size grid.
:param data: ndarray
image stack of uniformly spaced OPM planes
:param theta: float
angle relative to coverslip
:param distance: float
step between image planes along coverslip
:param pizel_size: float
in-plane camera pixel size in OPM coordinates
:return output: ndarray
image stack of deskewed OPM planes on uniform grid
"""
# unwrap parameters
[num_images,ny,nx]=data.shape # (pixels)
# change step size from physical space (nm) to camera space (pixels)
pixel_step = distance/pixel_size # (pixels)
# calculate the number of pixels scanned during stage scan
scan_end = num_images * pixel_step # (pixels)
# calculate properties for final image
final_ny = np.int64(np.ceil(scan_end+ny*np.cos(theta*np.pi/180))) # (pixels)
final_nz = np.int64(np.ceil(ny*np.sin(theta*np.pi/180))) # (pixels)
final_nx = np.int64(nx) # (pixels)
# create final image
output = np.zeros((final_nz, final_ny, final_nx),dtype=np.float32) # (time, pixels,pixels,pixels - data is float32)
# precalculate trig functions for scan angle
tantheta = np.float32(np.tan(theta * np.pi/180)) # (float32)
sintheta = np.float32(np.sin(theta * np.pi/180)) # (float32)
costheta = np.float32(np.cos(theta * np.pi/180)) # (float32)
# perform orthogonal interpolation
# loop through output z planes
# defined as parallel loop in numba
# http://numba.pydata.org/numba-doc/latest/user/parallel.html#numba-parallel
for z in prange(0,final_nz):
# calculate range of output y pixels to populate
y_range_min=np.minimum(0,np.int64(np.floor(np.float32(z)/tantheta)))
y_range_max=np.maximum(final_ny,np.int64(np.ceil(scan_end+np.float32(z)/tantheta+1)))
# loop through final y pixels
# defined as parallel loop in numba
# http://numba.pydata.org/numba-doc/latest/user/parallel.html#numba-parallel
for y in prange(y_range_min,y_range_max):
# find the virtual tilted plane that intersects the interpolated plane
virtual_plane = y - z/tantheta
# find raw data planes that surround the virtual plane
plane_before = np.int64(np.floor(virtual_plane/pixel_step))
plane_after = np.int64(plane_before+1)
# continue if raw data planes are within the data range
if ((plane_before>=0) and (plane_after<num_images)):
# find distance of a point on the interpolated plane to plane_before and plane_after
l_before = virtual_plane - plane_before * pixel_step
l_after = pixel_step - l_before
# determine location of a point along the interpolated plane
za = z/sintheta
virtual_pos_before = za + l_before*costheta
virtual_pos_after = za - l_after*costheta
# determine nearest data points to interpoloated point in raw data
pos_before = np.int64(np.floor(virtual_pos_before))
pos_after = np.int64(np.floor(virtual_pos_after))
# continue if within data bounds
if ((pos_before>=0) and (pos_after >= 0) and (pos_before<ny-1) and (pos_after<ny-1)):
# determine points surrounding interpolated point on the virtual plane
dz_before = virtual_pos_before - pos_before
dz_after = virtual_pos_after - pos_after
# compute final image plane using orthogonal interpolation
output[z,y,:] = (l_before * dz_after * data[plane_after,pos_after+1,:] +
l_before * (1-dz_after) * data[plane_after,pos_after,:] +
l_after * dz_before * data[plane_before,pos_before+1,:] +
l_after * (1-dz_before) * data[plane_before,pos_before,:]) /pixel_step
# return output
return output
def manage_flat_field_py(stack):
"""
Manage performing flat and dark-field using python adapation of BaSiC algorithm.
Returns flat- and darkfield corrected image
:param stack: ndarray
matrix of OPM planes
:return corrected_stack: ndarray of deskewed OPM planes on uniform grid
"""
num_images = 500
if stack.shape[0] > num_images:
stack_for_flat_field = stack[np.random.choice(stack.shape[0], num_images, replace=False)]
else:
stack_for_flat_field = stack
flat_field, dark_field = calc_flatfield(images=stack_for_flat_field)
corrected_stack = perform_flat_field(flat_field,dark_field,stack)
return corrected_stack, flat_field, dark_field
def perform_flat_field(flat_field,dark_field,stack):
"""
Calculate flat- and darkfield corrected image. Returns corrected image.
:param flat_field: ndarray
flatfield correction
:param dark_field: ndarray
darkfield correction
:param stack: dask.array
matrix of OPM planes
:return corrected_stack: ndarray
corrected OPM image planes
"""
#dark_field[dark_field>50]=50
#corrected_stack = stack.astype(np.float32) - dark_field
stack[stack<0] = 0
corrected_stack = stack/flat_field
return corrected_stack
def lr_deconvolution(image,psf,iterations=50):
"""
Tiled Lucy-Richardson deconvolution using DECON_LIBRARY
:param image: ndarray
raw data
:param psf: ndarray
theoretical PSF
:param iterations: int
number of iterations to run
:return deconvolved: ndarray
deconvolved image
"""
# create dask array
scan_chunk_size = 512
if image.shape[0]<scan_chunk_size:
dask_raw = da.from_array(image,chunks=(image.shape[0],image.shape[1],image.shape[2]))
overlap_depth = (0,2*psf.shape[1],2*psf.shape[1])
else:
dask_raw = da.from_array(image,chunks=(scan_chunk_size,image.shape[1],image.shape[2]))
overlap_depth = 2*psf.shape[0]
del image
gc.collect()
if DECON_LIBRARY=='dexp':
# define dask dexp partial function for GPU LR deconvolution
lr_dask = partial(dexp_lr_decon,psf=psf,num_iterations=iterations,padding=2*psf.shape[0],internal_dtype=np.float16)
else:
lr_dask = partial(mv_lr_decon,psf=psf,num_iterations=iterations)
# create dask plan for overlapped blocks
dask_decon = da.map_overlap(lr_dask,dask_raw,depth=overlap_depth,boundary=None,trim=True,meta=np.array((), dtype=np.uint16))
# perform LR deconvolution in blocks
if DECON_LIBRARY=='dexp':
with CupyBackend(enable_cutensor=True,enable_cub=True,enable_fft_planning=True):
with ProgressBar():
decon_data = dask_decon.compute(scheduler='single-threaded')
else:
with ProgressBar():
decon_data = dask_decon.compute(scheduler='single-threaded')
# clean up memory
cp.clear_memo()
del dask_decon
gc.collect()
return decon_data.astype(np.uint16)
def dexp_lr_decon(image,psf,num_iterations,padding,internal_dtype):
"""
Lucy-Richardson deconvolution using dexp library
:param image: ndarray
data tile generated by dask
:param skewed_psf: ndarray
theoretical PSF
:param padding: int
internal padding for deconvolution
:param internal_dtype: dtype
data type to use on GPU
:return result: ndarray
deconvolved data tile
"""
# LR deconvolution on GPU
deconvolved = lucy_richardson_deconvolution(image.astype(np.float16), psf.astype(np.float16), num_iterations=num_iterations, padding=padding, blind_spot=3, internal_dtype=internal_dtype)
deconvolved = _c(dehaze(deconvolved, in_place=True, internal_dtype=internal_dtype))
# clean up memory
del image, psf
cp.clear_memo()
gc.collect()
return deconvolved.astype(np.uint16)
def mv_lr_decon(image,psf,iterations):
'''
Lucy-Richardson deconvolution using commerical Microvolution library.
:param image: ndarray
raw image
:param ch_idx: int
wavelength index
:param iterations: int
number of iterations
:return image: ndarray
deconvolved image
'''
params = mv.DeconParameters()
params.generatePsf = False
params.nx = image.shape[2]
params.ny = image.shape[1]
params.nz = image.shape[0]
params.blind = False
params.psfNx = psf.shape[2]
params.psfNy = psf.shape[1]
params.psfNz = psf.shape[0]
params.dr = 115.0
params.dz = 400.0
params.psfDr = 115.0
params.psfDz = 400.0
params.iterations = iterations
params.background = 50
params.regularizationType=mv.RegularizationType_TV
params.scaling = mv.Scaling_U16
try:
launcher = mv.DeconvolutionLauncher()
image = image.astype(np.float16)
launcher.SetParameters(params)
for z in range(params.nz):
launcher.SetImageSlice(z, image[z,:])
psf_image = psf.astype(np.float16)
for z in range(params.psfNz):
launcher.SetPsfSlice(z,psf_image[z,:])
new_image = np.zeros(image.shape,dtype=np.uint16)
del image
launcher.Run()
for z in range(params.nz):
launcher.RetrieveImageSlice(z, new_image[z,:])
except:
err = sys.exc_info()
print("Unexpected error:", err[0])
print(err[1])
print(err[2])
new_image = np.zeros(image.shape,dtype=np.uint16)
return new_image.astype(np.uint16)
def _c(array):
"""
Transfer dexp image from GPU to CPU
:param array: dexp
array on dexp backend
:return array: ndarray
array converted to numpy
"""
return Backend.to_numpy(array)
| 10,957
| 32.716923
| 190
|
py
|
OPM
|
OPM-master/reconstruction/data_io.py
|
#!/usr/bin/env python
'''
QI2lab OPM suite
Reconstruction tools
Read and write metadata; read raw data; read pre-generated OPM psfs
'''
import re
from npy2bdv.npy2bdv import BdvEditor
import pandas as pd
import numpy as np
from pathlib import Path
from tifffile import tifffile
def read_metadata(fname):
"""
Read data from csv file consisting of one line giving titles, and the other giving values. Return as dictionary
:param fname: str
filename
:return metadata: dict
metadata dictionary
"""
scan_data_raw_lines = []
with open(fname, "r") as f:
for line in f:
scan_data_raw_lines.append(line.replace("\n", ""))
titles = scan_data_raw_lines[0].split(",")
# convert values to appropriate datatypes
vals = scan_data_raw_lines[1].split(",")
for ii in range(len(vals)):
if re.fullmatch("\d+", vals[ii]):
vals[ii] = int(vals[ii])
elif re.fullmatch("\d*.\d+", vals[ii]):
vals[ii] = float(vals[ii])
elif vals[ii].lower() == "False".lower():
vals[ii] = False
elif vals[ii].lower() == "True".lower():
vals[ii] = True
else:
# otherwise, leave as string
pass
# convert to dictionary
metadata = {}
for t, v in zip(titles, vals):
metadata[t] = v
return metadata
def write_metadata(data_dict, save_path):
"""
Write dictionary as CSV file
:param data_dict: dict
metadata dictionary
:param save_path: Path
path for file
:return: None
"""
pd.DataFrame([data_dict]).to_csv(save_path)
def return_data_numpy(dataset, time_axis, channel_axis, num_images, excess_images, y_pixels,x_pixels):
"""
:param dataset: dataset
pycromanager dataset object
:param channel_axis: int
channel axis index
:param time_axis: int
time axis index
:param num_images: int
number of images in scan direction (TO DO: change to tuple to load range)
:param excess_images: int
number of excess images acquired during stage warmup
:param y_pixels: int
y pixels
:param x_pixels: int
x pixels
:return data_numpy: ndarray
3D numpy array of OPM data. First axis is scan sxis
"""
data_numpy = np.empty([(num_images-excess_images),y_pixels,x_pixels]).astype(np.uint16)
j = 0
for i in range(excess_images,num_images):
if (time_axis is None):
if (channel_axis is None):
data_numpy[j,:,:] = dataset.read_image(z=i)
else:
data_numpy[j,:,:] = dataset.read_image(z=i, c=channel_axis)
else:
if (channel_axis is None):
data_numpy[j,:,:] = dataset.read_image(z=i, t=time_axis)
else:
data_numpy[j,:,:] = dataset.read_image(z=i, t=time_axis, c=channel_axis)
j = j + 1
return data_numpy
def return_data_numpy_widefield(dataset, channel_axis, ch_BDV_idx, num_z, y_pixels,x_pixels):
"""
:param dataset: dataset
pycromanager dataset object
:param channel_axis: int
channel axis index
:param time_axis: int
time axis index
:param num_images: int
number of images in z stack
:param y_pixels: int
y pixels
:param x_pixels: int
x pixels
:return data_numpy: ndarray
3D numpy array of requested data
"""
data_numpy = np.empty([num_z,y_pixels,x_pixels]).astype(np.uint16)
for i in range(num_z):
if (channel_axis is None):
data_numpy[i,:,:] = dataset.read_image(z=i)
else:
data_numpy[i,:,:] = dataset.read_image(z=i, c=channel_axis, channel=ch_BDV_idx)
return data_numpy
def stitch_data(path_to_xml,iterative_flag):
"""
Call BigStitcher via Python to calculate rigid stitching transformations across tiles and rounds
:param path_to_xml: Path
path to BDV XML. BDV H5 must be present for loading
:param iterative_flag: Bool
flag if multiple rounds need to be aligned
"""
# TO DO: 1. write either pyimagej bridge + macro OR call FIJI/BigStitcher in headless mode.
# 2. fix flipped x-axis between Python and FIJI. Easier to flip data in Python than deal with
# annoying affine that flips data.
def return_affine_xform(path_to_xml,r_idx,y_idx,z_idx,total_z_pos):
"""
Return affine transformation for a given tile from BDV XML
:param path_to_xml: Path
path to BDV XML. BDV H5 must be present for loading
:param r_idx: integer
round index
:param t_idx: integer
time index
:param y_idx: integer
y tile index
:param z_idx: integer
z tile index
:param total_z_pos: integer
total number of z tiles in data
:return data_numpy: ndarray
4D numpy array of all affine transforms
"""
bdv_editor = BdvEditor(str(path_to_xml))
tile_idx = (y_idx+z_idx)+(y_idx*(total_z_pos-1))
affine_xforms = []
read_affine_success = True
affine_idx = 0
while read_affine_success:
try:
affine_xform = bdv_editor.read_affine(time=r_idx,illumination=0,channel=0,tile=tile_idx,angle=0,index=affine_idx)
except:
read_affine_success = False
else:
affine_xforms.append(affine_xform)
affine_idx = affine_idx + 1
read_affine_success = True
return affine_xforms
def return_opm_psf(wavelength_um):
"""
Load pre-generated OPM psf
TO DO: write checks and generate PSF if it does not exist on disk
:param wavelength: float
wavelength in um
:return psf: ndarray
pre-generated skewed PSF
"""
wavelength_nm = int(np.round(wavelength_um*1000,0))
psf_path = Path('opm_psf_'+str(wavelength_nm).zfill(0)+'_nm.tif')
opm_psf = tifffile.imread(psf_path)
return opm_psf
| 5,989
| 27.388626
| 125
|
py
|
OPM
|
OPM-master/reconstruction/flat_field.py
|
#!/usr/bin/env python
'''
Python and cupy implementation of BaSiC flat-field correction (doi: 10.1038/ncomms14836)
Adapted from code found at: https://github.com/peng-lab/PyBasicCellprofilerPlugin
TO DO: Tons of otpimization opportunities with cupy, numba, and cucim. Maybe need to write our own DCT operator for use on GPU?
Licensing of python code unclear, sent an email to clarify if we can reproduce here since we don't need full
CellProfiler plugin framework
Last updated: Shepherd 06/21
'''
import numpy as np
from typing import List
from skimage.transform import resize as skresize
from scipy.fftpack import dct, idct
import cupy as cp
RESIZE_ORDER = 1
RESIZE_MODE = "symmetric"
PRESERVE_RANGE = True
OUTPUT_IMAGE = "OutputImage"
FIRST_CYCLE = "First Cycle"
LAST_CYCLE = "Last Cycle"
def calc_flatfield(images,if_darkfield=True,if_baseline_drift=False,lambda_flatfield=0,lambda_darkfield=0,max_iterations=100,optimization_tolerance=1.0e-6,
max_reweight_iterations=10,eplson=0.1,varying_coeff=True,reweight_tolerance=1.0e-3):
"""
Function to calculate darkfield and brightfield correction from an image stack
:param images: ndarray
:param calc_darkfield: boolean
:param lambda_flatfield: float
:param lambda_darkfield: float
:param max_iterations: int
:param optimization_tolerance: float
:param max_reweight_iterations: int
:param epsilon: float
:param varying_coeff: float
:param reweight_tolerance: float
:return darkfield: ndarray
:return flatfield: ndarray
"""
_saved_size = images[0].shape
nrows = _saved_size[0]//16
ncols = _saved_size[1]//16
D = np.zeros((images.shape[0],nrows,ncols), dtype=np.uint16)
for i in range(images.shape[0]):
D[i,:,:] = _resize_image(image=images[i,:], y_side_size=ncols,x_side_size=nrows)
meanD = np.mean(D, axis=2)
meanD = meanD / np.mean(meanD)
W_meanD = _dct2d(meanD.T)
# setting lambda_flatfield and lambda_darkfield if they are not set by the user
if lambda_flatfield <= 0:
lambda_flatfield = np.sum(np.abs(W_meanD)) / 400 * 0.5
if lambda_darkfield <= 0:
lambda_darkfield = lambda_flatfield * 0.2
D = np.sort(D, axis=2)
XAoffset = np.zeros((nrows, ncols))
weight = np.ones(D.shape)
reweighting_iter = 0
flag_reweighting = True
flatfield_last = np.ones((nrows, ncols))
darkfield_last = np.random.randn(nrows, ncols)
while flag_reweighting:
reweighting_iter += 1
initial_flatfield = False
if initial_flatfield:
raise IOError('Initial flatfield option not implemented yet!')
else:
X_k_A, X_k_E, X_k_Aoffset = _inexact_alm_rspca_l1(
images = D,
lambda_flatfield = lambda_flatfield,
if_darkfield = if_darkfield,
lambda_darkfield = lambda_darkfield,
optimization_tolerance = optimization_tolerance,
max_iterations = max_iterations,
weight=weight
)
XA = np.reshape(X_k_A, [nrows, ncols, -1], order='F')
XE = np.reshape(X_k_E, [nrows, ncols, -1], order='F')
XAoffset = np.reshape(X_k_Aoffset, [nrows, ncols], order='F')
XE_norm = XE / np.mean(XA, axis=(0, 1))
weight = np.ones_like(XE_norm) / (np.abs(XE_norm) + eplson)
weight = weight * weight.size / np.sum(weight)
temp = np.mean(XA, axis=2) - XAoffset
flatfield_current = temp / np.mean(temp)
darkfield_current = XAoffset
mad_flatfield = np.sum(np.abs(flatfield_current - flatfield_last)) / np.sum(np.abs(flatfield_last))
temp_diff = np.sum(np.abs(darkfield_current - darkfield_last))
if temp_diff < 1e-7:
mad_darkfield = 0
else:
mad_darkfield = temp_diff / np.maximum(np.sum(np.abs(darkfield_last)), 1e-6)
flatfield_last = flatfield_current
darkfield_last = darkfield_current
if np.maximum(mad_flatfield,
mad_darkfield) <= reweight_tolerance or \
reweighting_iter >= max_reweight_iterations:
flag_reweighting = False
shading = np.mean(XA, 2) - XAoffset
flatfield = _resize_image(
image = shading,
x_side_size = _saved_size[0],
y_side_size = _saved_size[1]
)
flatfield = flatfield / np.mean(flatfield)
if if_darkfield:
darkfield = _resize_image(
image = XAoffset,
x_side_size = _saved_size[0],
y_side_size = _saved_size[1]
)
else:
darkfield = np.zeros_like(flatfield)
return flatfield.astype(np.float32), darkfield.astype(np.float32)
def baseline_drift(images_list,working_size = 128, flatfield: np.ndarray = None, darkfield: np.ndarray = None, **kwargs):
#TODO: Rename s.t. fluorescence is included? E.g. background_fluorescence?
"""
Estimation of background fluoresence signal for time-lapse movie.
Used in conjunction with BaSiC.
"""
nrows = ncols = working_size
# Preparing input images
resized_images = np.stack(_resize_images_list(images_list = images_list, side_size = working_size))
resized_images = resized_images.reshape([-1, nrows * nrows], order = 'F')
# Reszing flat- and dark-field
resized_flatfield = _resize_image(image = flatfield, side_size = working_size)
resized_darkfield = _resize_image(image = darkfield, side_size = working_size)
# reweighting
_weights = np.ones(resized_images.shape)
eplson = 0.1
tol = 1e-6
for reweighting_iter in range(1,6):
W_idct_hat = np.reshape(resized_flatfield, (1,-1), order='F')
A_offset = np.reshape(resized_darkfield, (1,-1), order='F')
A1_coeff = np.mean(resized_images, 1).reshape([-1,1])
# main iteration loop starts:
# The first element of the second array of np.linalg.svd
_temp = np.linalg.svd(resized_images, full_matrices=False)[1]
norm_two = _temp[0]
mu = 12.5/norm_two # this one can be tuned
mu_bar = mu * 1e7
rho = 1.5 # this one can be tuned
d_norm = np.linalg.norm(resized_images, ord = 'fro')
ent1 = 1
_iter = 0
total_svd = 0
converged = False;
A1_hat = np.zeros(resized_images.shape)
E1_hat = np.zeros(resized_images.shape)
Y1 = 0
while not converged:
_iter = _iter + 1;
A1_hat = W_idct_hat * A1_coeff + A_offset
# update E1 using l0 norm
E1_hat = E1_hat + np.divide((resized_images - A1_hat - E1_hat + (1/mu)*Y1), ent1)
E1_hat = np.maximum(E1_hat - _weights/(ent1*mu), 0) +\
np.minimum(E1_hat + _weights/(ent1*mu), 0)
# update A1_coeff, A2_coeff and A_offset
#if coeff_flag
R1 = resized_images - E1_hat
A1_coeff = np.mean(R1,1).reshape(-1,1) - np.mean(A_offset,1)
A1_coeff[A1_coeff<0] = 0
Z1 = resized_images - A1_hat - E1_hat
Y1 = Y1 + mu*Z1
mu = min(mu*rho, mu_bar)
# stop Criterion
stopCriterion = np.linalg.norm(Z1, ord = 'fro') / d_norm
if stopCriterion < tol:
converged = True
# updating weight
# XE_norm = E1_hat / np.mean(A1_hat)
XE_norm = E1_hat
mean_vec = np.mean(A1_hat, axis=1)
XE_norm = np.transpose(np.tile(mean_vec, (16384, 1))) / XE_norm
_weights = 1./(abs(XE_norm)+eplson)
_weights = np.divide( np.multiply(_weights, _weights.shape[0] * _weights.shape[1]), np.sum(_weights))
return A1_coeff
def _inexact_alm_rspca_l1(images,lambda_flatfield,if_darkfield,lambda_darkfield,optimization_tolerance,max_iterations,weight=None):
if weight is not None and weight.size != images.size:
raise IOError('weight matrix has different size than input sequence')
# if
# Initialization and given default variables
p = images.shape[2]
q = images.shape[1]
m = p*q
n = images.shape[0]
images = np.reshape(images, (m, n), order='F')
if weight is not None:
weight = np.reshape(weight, (m, n), order='F')
else:
weight = np.ones_like(images)
#_, svd, _ = np.linalg.svd(images, full_matrices=False) #TODO: Is there a more efficient implementation of SVD?
c_images = cp.asarray(images)
_, c_svd, _ = cp.linalg.svd(c_images,full_matrices=False)
svd = cp.asnumpy(c_svd)
norm_two = svd[0]
Y1 = 0
#Y2 = 0
ent1 = 1
ent2 = 10
A1_hat = np.zeros_like(images)
A1_coeff = np.ones((1, images.shape[1]))
E1_hat = np.zeros_like(images)
W_hat = _dct2d(np.zeros((p, q)).T)
mu = 12.5 / norm_two
mu_bar = mu * 1e7
rho = 1.5
d_norm = np.linalg.norm(images, ord='fro')
A_offset = np.zeros((m, 1))
B1_uplimit = np.min(images)
B1_offset = 0
#A_uplimit = np.expand_dims(np.min(images, axis=1), 1)
A_inmask = np.zeros((p, q))
A_inmask[int(np.round(p / 6) - 1): int(np.round(p*5 / 6)), int(np.round(q / 6) - 1): int(np.round(q * 5 / 6))] = 1
# main iteration loop starts
iter = 0
total_svd = 0
converged = False
#time_zero = time.time()
#time_zero_it = time.time()
while not converged:
# time_zero_it = time.time()
iter += 1
if len(A1_coeff.shape) == 1:
A1_coeff = np.expand_dims(A1_coeff, 0)
if len(A_offset.shape) == 1:
A_offset = np.expand_dims(A_offset, 1)
W_idct_hat = _idct2d(W_hat.T)
A1_hat = np.dot(np.reshape(W_idct_hat, (-1,1), order='F'), A1_coeff) + A_offset
temp_W = (images - A1_hat - E1_hat + (1 / mu) * Y1) / ent1
temp_W = np.reshape(temp_W, (p, q, n), order='F')
temp_W = np.mean(temp_W, axis=2)
W_hat = W_hat + _dct2d(temp_W.T)
W_hat = np.maximum(W_hat - lambda_flatfield / (ent1 * mu), 0) + np.minimum(W_hat + lambda_flatfield / (ent1 * mu), 0)
W_idct_hat = _idct2d(W_hat.T)
if len(A1_coeff.shape) == 1:
A1_coeff = np.expand_dims(A1_coeff, 0)
if len(A_offset.shape) == 1:
A_offset = np.expand_dims(A_offset, 1)
A1_hat = np.dot(np.reshape(W_idct_hat, (-1,1), order='F'), A1_coeff) + A_offset
E1_hat = images - A1_hat + (1 / mu) * Y1 / ent1
E1_hat = _shrinkageOperator(E1_hat, weight / (ent1 * mu))
R1 = images - E1_hat
A1_coeff = np.mean(R1, 0) / np.mean(R1)
A1_coeff[A1_coeff < 0] = 0
if if_darkfield:
validA1coeff_idx = np.where(A1_coeff < 1)
B1_coeff = (np.mean(R1[np.reshape(W_idct_hat, -1, order='F') > np.mean(W_idct_hat) - 1e-6][:, validA1coeff_idx[0]], 0) - \
np.mean(R1[np.reshape(W_idct_hat, -1, order='F') < np.mean(W_idct_hat) + 1e-6][:, validA1coeff_idx[0]], 0)) / np.mean(R1)
k = np.array(validA1coeff_idx).shape[1]
temp1 = np.sum(A1_coeff[validA1coeff_idx[0]]**2)
temp2 = np.sum(A1_coeff[validA1coeff_idx[0]])
temp3 = np.sum(B1_coeff)
temp4 = np.sum(A1_coeff[validA1coeff_idx[0]] * B1_coeff)
temp5 = temp2 * temp3 - temp4 * k
if temp5 == 0:
B1_offset = 0
else:
B1_offset = (temp1 * temp3 - temp2 * temp4) / temp5
# limit B1_offset: 0<B1_offset<B1_uplimit
B1_offset = np.maximum(B1_offset, 0)
B1_offset = np.minimum(B1_offset, B1_uplimit / (np.mean(W_idct_hat)+1e-5))
B_offset = B1_offset * np.reshape(W_idct_hat, -1, order='F') * (-1)
B_offset = B_offset + np.ones_like(B_offset) * B1_offset * np.mean(W_idct_hat)
A1_offset = np.mean(R1[:, validA1coeff_idx[0]], axis=1) - np.mean(A1_coeff[validA1coeff_idx[0]]) * np.reshape(W_idct_hat, -1, order='F')
A1_offset = A1_offset - np.mean(A1_offset)
A_offset = A1_offset - np.mean(A1_offset) - B_offset
# smooth A_offset
W_offset = _dct2d(np.reshape(A_offset, (p,q), order='F').T)
W_offset = np.maximum(W_offset - lambda_darkfield / (ent2 * mu), 0) + \
np.minimum(W_offset + lambda_darkfield / (ent2 * mu), 0)
A_offset = _idct2d(W_offset.T)
A_offset = np.reshape(A_offset, -1, order='F')
# encourage sparse A_offset
A_offset = np.maximum(A_offset - lambda_darkfield / (ent2 * mu), 0) + \
np.minimum(A_offset + lambda_darkfield / (ent2 * mu), 0)
A_offset = A_offset + B_offset
Z1 = images - A1_hat - E1_hat
Y1 = Y1 + mu * Z1
mu = np.minimum(mu * rho, mu_bar)
# Stop Criterion
stopCriterion = np.linalg.norm(Z1, ord='fro') / d_norm
if stopCriterion < optimization_tolerance:
converged = True
if not converged and iter >= max_iterations:
converged = True
A_offset = np.squeeze(A_offset)
A_offset = A_offset + B1_offset * np.reshape(W_idct_hat, -1, order='F')
return A1_hat, E1_hat, A_offset
def _resize_image(image: np.ndarray, x_side_size: float = None, y_side_size: float = None):
if image.shape[0] != x_side_size or image.shape[1] != y_side_size:
return skresize(
image,
(x_side_size, y_side_size),
order = RESIZE_ORDER,
mode = RESIZE_MODE,
preserve_range = PRESERVE_RANGE
)
else:
return image
def _shrinkageOperator(matrix, epsilon):
temp1 = matrix - epsilon
temp1[temp1 < 0] = 0
temp2 = matrix + epsilon
temp2[temp2 > 0] = 0
res = temp1 + temp2
return res
def _dct2d(mtrx: np.array):
"""
Calculates 2D discrete cosine transform.
Parameters
----------
mtrx
Input matrix.
Returns
-------
Discrete cosine transform of the input matrix.
"""
# Check if input object is 2D.
if mtrx.ndim != 2:
raise ValueError("Passed object should be a matrix or a numpy array with dimension of two.")
return dct(dct(mtrx.T, norm='ortho').T, norm='ortho')
def _idct2d(mtrx: np.array):
"""
Calculates 2D inverse discrete cosine transform.
Parameters
----------
mtrx
Input matrix.
Returns
-------
Inverse of discrete cosine transform of the input matrix.
"""
# Check if input object is 2D.
if mtrx.ndim != 2:
raise ValueError("Passed object should be a matrix or a numpy array with dimension of two.")
return idct(idct(mtrx.T, norm='ortho').T, norm='ortho')
| 14,796
| 34.569712
| 155
|
py
|
OPM
|
OPM-master/reconstruction/recon_opm_galvoscan.py
|
#!/usr/bin/env python
'''
Galvo scanning OPM post-processing using numpy, numba, skimage, pyimagej, and npy2bdv.
Orthgonal interpolation method adapted from original description by Vincent Maioli (http://doi.org/10.25560/68022)
Last updated: Shepherd 06/21
'''
# imports
import numpy as np
from pathlib import Path
from pycromanager import Dataset
import npy2bdv
import sys
import argparse
from skimage.measure import block_reduce
from image_post_processing import deskew
from itertools import compress
from itertools import product
import data_io
import tifffile
import gc
import zarr
# parse experimental directory, load data, perform orthogonal deskew, and save as BDV H5 file
def main(argv):
# parse command line arguments
parser = argparse.ArgumentParser(description="Process raw OPM data.")
parser.add_argument("-i", "--ipath", type=str, nargs="+", help="supply the directories to be processed")
parser.add_argument("-d", "--decon", type=int, default=0, help="0: no deconvolution (DEFAULT), 1: deconvolution")
parser.add_argument("-f", "--flatfield", type=int, default=0, help="0: No flat field (DEFAULT), 1: flat field")
parser.add_argument("-k", "--deskew", type=int, default=1, help="0: no deskewing, 1: deskewing (DEFAULT)")
parser.add_argument("-s", "--save_type", type=int, default=0, help="0: TIFF stack output (DEFAULT), 1: BDV output, 2: Zarr output")
parser.add_argument("-t", "--tilt_orientation",type=str, default='new', help="new: new orientation (DEFAULT), prev: previous orientation")
parser.add_argument("--time_steps", nargs='+', type=int, default=-1, help="-1: all time steps (DEFAULT), else list of time steps")
parser.add_argument("--channels", nargs='+', type=int, default=-1, help="-1: all channels (DEFAULT), else list of all channels")
parser.add_argument("--overwrite", type=int, default=0, help="0: do not overwrite existing folder (DEFAULT), 1: overwrite")
args = parser.parse_args()
input_dir_strings = args.ipath
decon_flag = args.decon
flatfield_flag = args.flatfield
deskew_flag = args.deskew
save_type= args.save_type
tilt_orientation = args.tilt_orientation
overwrite_flag = args.overwrite == 1
# Loop over all user supplied directories for batch reconstruction
for ii, input_dir_string in enumerate(input_dir_strings):
print("Processing directory %d/%d" % (ii + 1, len(input_dir_strings)))
# https://docs.python.org/3/library/pathlib.html
# Create Path object to directory
input_dir_path=Path(input_dir_string)
# create parameter array from scan parameters saved by acquisition code
df_metadata = data_io.read_metadata(input_dir_path.resolve().parents[0] / 'scan_metadata.csv')
root_name = df_metadata['root_name']
scan_type = df_metadata['scan_type']
theta = df_metadata['theta']
scan_step = df_metadata['scan_step']
pixel_size = df_metadata['pixel_size']
num_t = df_metadata['num_t']
num_y = df_metadata['num_y']
num_z = df_metadata['num_z']
num_ch = df_metadata['num_ch']
num_images = df_metadata['scan_axis_positions']
excess_images = 0
y_pixels = df_metadata['y_pixels']
x_pixels = df_metadata['x_pixels']
chan_405_active = df_metadata['405_active']
chan_488_active = df_metadata['488_active']
chan_561_active = df_metadata['561_active']
chan_635_active = df_metadata['635_active']
chan_730_active = df_metadata['730_active']
active_channels = [chan_405_active,chan_488_active,chan_561_active,chan_635_active,chan_730_active]
channel_idxs = [0,1,2,3,4]
channels_in_data = list(compress(channel_idxs, active_channels))
n_active_channels = len(channels_in_data)
if not (num_ch == n_active_channels):
print('Channel setup error. Check metatdata file and directory names.')
sys.exit()
# calculate pixel sizes of deskewed image in microns
deskewed_x_pixel = pixel_size / 1000.
deskewed_y_pixel = pixel_size / 1000.
deskewed_z_pixel = pixel_size / 1000.
print('Deskewed pixel sizes before downsampling (um). x='+str(deskewed_x_pixel)+', y='+str(deskewed_y_pixel)+', z='+str(deskewed_z_pixel)+'.')
# amount of down sampling in z
z_down_sample = 1
# load dataset
if str(input_dir_path).endswith('zarr'):
dataset = zarr.open(input_dir_path, mode='r')
im_type = 'zarr'
else:
dataset = Dataset(str(input_dir_path))
im_type = 'pycro'
# create output directory
im_processes = []
if decon_flag == 1:
im_processes.append('decon')
if flatfield_flag == 1:
im_processes.append('flatfield')
if deskew_flag == 1:
im_processes.append('deskew')
if len(im_processes) == 0:
str_processes = 'original_output'
else:
str_processes = '_'.join(im_processes) + '_output'
input_dir_path=Path(input_dir_string)
output_dir_path = input_dir_path.resolve().parents[0] / str_processes
output_dir_path.mkdir(parents=True, exist_ok=True)
# initialize counters
timepoints_in_data = list(range(num_t))
ch_in_BDV = list(range(n_active_channels))
em_wavelengths=[.450,.520,.580,.670,.780]
# if specific time steps or channels are provided, we use them only
# by default -1, list of int if provided by user
if not isinstance(args.time_steps, int):
timepoints_in_data = args.time_steps
num_t = len(timepoints_in_data)
if not isinstance(args.channels, int):
ch_in_BDV = args.channels
num_ch = len(ch_in_BDV)
# Create TIFF if requested
if (save_type==0):
# create directory for data type
tiff_output_dir_path = output_dir_path / Path('tiff')
tiff_output_dir_path.mkdir(parents=True, exist_ok=overwrite_flag)
# Create BDV if requested
elif (save_type == 1):
# create directory for data type
bdv_output_dir_path = output_dir_path / Path('bdv')
bdv_output_dir_path.mkdir(parents=True, exist_ok=overwrite_flag)
# https://github.com/nvladimus/npy2bdv
# create BDV H5 file with sub-sampling for BigStitcher
bdv_output_path = bdv_output_dir_path / Path(root_name+'_bdv.h5')
bdv_writer = npy2bdv.BdvWriter(str(bdv_output_path), nchannels=num_ch, ntiles=1, subsamp=((1,1,1),),blockdim=((16, 16, 16),))
# create blank affine transformation to use for stage translation
unit_matrix = np.array(((1.0, 0.0, 0.0, 0.0), # change the 4. value for x_translation (px)
(0.0, 1.0, 0.0, 0.0), # change the 4. value for y_translation (px)
(0.0, 0.0, 1.0, 0.0)))# change the 4. value for z_translation (px)
# Create Zarr if requested
elif (save_type == 2):
# create directory for data type
zarr_output_dir_path = output_dir_path / Path('zarr')
zarr_output_dir_path.mkdir(parents=True, exist_ok=overwrite_flag)
# create name for zarr directory
zarr_output_path = zarr_output_dir_path / Path(root_name + '_zarr.zarr')
# calculate size of one volume
# change step size from physical space (nm) to camera space (pixels)
pixel_step = scan_step/pixel_size # (pixels)
# calculate the number of pixels scanned during stage scan
scan_end = num_images * pixel_step # (pixels)
# calculate properties for final image
ny = np.int64(np.ceil(scan_end+y_pixels*np.cos(theta*np.pi/180))) # (pixels)
nz = np.int64(np.ceil(y_pixels*np.sin(theta*np.pi/180))) # (pixels)
nx = np.int64(x_pixels) # (pixels)
# create and open zarr file
root = zarr.open(str(zarr_output_path), mode="w")
opm_data = root.zeros("opm_data", shape=(num_t, num_ch, nz, ny, nx), chunks=(1, 1, 32, 256, 256), dtype=np.uint16)
root = zarr.open(str(zarr_output_path), mode="rw")
opm_data = root["opm_data"]
# if retrospective flatfield is requested, import and open pyimagej in interactive mode
# TO DO: need to fix for new call
if flatfield_flag == 1:
from image_post_processing import manage_flat_field
# if decon is requested, import microvolution wrapper
if decon_flag == 1:
from image_post_processing import lr_deconvolution
# loop over all timepoints and channels
for (t_idx, ch_BDV_idx) in product(timepoints_in_data,ch_in_BDV):
ch_idx = channels_in_data[ch_BDV_idx]
# pull data stack into memory
print('Process timepoint '+str(t_idx)+'; channel '+str(ch_BDV_idx) +'.')
if im_type == 'pycro':
raw_data = data_io.return_data_numpy(dataset, t_idx, ch_BDV_idx, num_images, excess_images, y_pixels, x_pixels)
elif im_type == 'zarr':
raw_data = dataset[t_idx, ch_BDV_idx, :, :, :]
# run deconvolution on skewed image
if decon_flag == 1:
print('Deconvolve.')
em_wvl = em_wavelengths[ch_idx]
channel_opm_psf = data_io.return_opm_psf(em_wvl)
if tilt_orientation == 'new':
channel_opm_psf = np.flip(channel_opm_psf, axis=1)
#decon = mv_lr_decon(image=raw_data,psf=channel_opm_psf,iterations=50)
decon = lr_deconvolution(image=raw_data,psf=channel_opm_psf,iterations=50)
else:
decon = raw_data
del raw_data
gc.collect()
# perform flat-fielding
if flatfield_flag == 0:
corrected_stack = decon
else:
print('Flatfield.')
corrected_stack, flat_field, dark_field = manage_flat_field(decon, ij)
del decon
gc.collect()
# deskew raw_data
if deskew_flag == 1:
print('Deskew.')
if tilt_orientation == 'new':
deskewed = deskew(data=np.flip(corrected_stack, axis=1),theta=theta,distance=scan_step,pixel_size=pixel_size)
else:
deskewed = deskew(data=np.flip(corrected_stack, axis=0),theta=theta,distance=scan_step,pixel_size=pixel_size)
else:
if tilt_orientation == 'new':
deskewed = np.flip(corrected_stack, axis=1)
else:
deskewed = np.flip(corrected_stack, axis=0)
del corrected_stack
gc.collect()
# downsample in z due to oversampling when going from OPM to coverslip geometry
if z_down_sample==1:
downsampled = deskewed
else:
print('Downsample.')
downsampled = block_reduce(deskewed, block_size=(z_down_sample,1,1), func=np.mean)
del deskewed
gc.collect()
# save deskewed image into TIFF stack
if (save_type==0):
print('Write TIFF stack')
tiff_filename= 'f_'+root_name+'_c'+str(ch_idx).zfill(3)+'_t'+str(t_idx).zfill(5)+'.tiff'
tiff_output_path = tiff_output_dir_path / Path(tiff_filename)
tifffile.imwrite(str(tiff_output_path), downsampled.astype(np.int16), imagej=True, resolution=(1/deskewed_x_pixel, 1/deskewed_y_pixel),
metadata={'unit': 'um', 'axes': 'ZYX'})
# save tile in BDV H5 with actual stage positions
elif (save_type==1):
print('Write data into BDV H5.')
bdv_writer.append_view(downsampled, time=t_idx, channel=ch_BDV_idx,
tile=0,
voxel_size_xyz=(deskewed_y_pixel, deskewed_y_pixel, z_down_sample*deskewed_z_pixel),
voxel_units='um')
# save deskewed image into Zarr container
elif (save_type==2):
print('Write data into Zarr container')
opm_data[t_idx, ch_BDV_idx, :, :, :] = downsampled
# free up memory
del downsampled
gc.collect()
if (save_type==1):
# write BDV xml file
# https://github.com/nvladimus/npy2bdv
bdv_writer.write_xml()
bdv_writer.close()
# shut down pyimagej
if flatfield_flag==1:
ij.getContext().dispose()
# exit
print('Finished.')
sys.exit()
# run
if __name__ == "__main__":
main(sys.argv[1:])
| 13,051
| 44.006897
| 151
|
py
|
OPM
|
OPM-master/reconstruction/localize_gui.py
|
"""
Localize skewed data with GUI for setting different parameters
"""
import os
import time
import numpy as np
import localize
import localize_skewed
import image_post_processing as ipp
import pycromanager
import napari
from napari.qt.threading import thread_worker
from magicgui import magicgui
root_dir = r"C:\Users\ptbrown2\Desktop"
# root_dir = r"\\10.206.26.21\opm2\20210628\new area"
dir_format = "bDNA_stiff_gel_human_lung_r%04d_y%04d_z%04d_ch%04d_1"
chunk_size_planes = 200
chunk_size_x = 325
chunk_overlap = 5
round = 1
tl = 14
iz = 0
ch = 1
data_fdir = os.path.join(root_dir, dir_format % (round, tl, iz, ch + 1))
dset = pycromanager.Dataset(data_fdir)
md = dset.read_metadata(z=0, channel=0)
frame_time_ms = float(md["OrcaFusionBT-Exposure"])
# load all images
imgs = []
for kk in range(len(dset.axes["z"])):
imgs.append(dset.read_image(z=kk, channel=0))
imgs = np.flip(np.asarray(imgs), axis=0)
nplanes, nyp, nxp = imgs.shape
# object storing any data we want to access inside/outside of GUI
class LocObj():
def __init__(self, imgs):
self.raw_imgs = imgs
self.dc = None
self.dstep = None
self.theta = None
self.deskewed_data = None
# filtering params
self.filter_sigma_small = None
self.filter_sigma_large = None
# filtering data
self.filtered_chunks = None
self.chunk_coords = None
self.chunk_rois = None
self.filtered = None
self.filtered_deskewed = None
# localization params
self.min_spot_sep = None
self.threshold = None
self.roi_size = None
# localization data
self.init_params = None
self.fit_params = None
self.conditions = None
self.centers_pix = None
# fit filtering params
self.fit_dist_max_err = None
self.sigmas_max = None
self.sigmas_min = None
self.fit_threshold = None
self.dist_boundary_min = None
# filtered results
self.to_keep = None
self.conditions = None
self.condition_names = None
def update_deskew(self):
tstart = time.perf_counter()
deskewed_data = ipp.deskew(self.raw_imgs, self.theta, self.dstep, self.dc)
print("Deskewed images in %0.2fs" % (time.perf_counter() - tstart))
self.deskewed_data = deskewed_data
def update_filtered(self):
tstart = time.perf_counter()
nchunks = int(np.ceil(nplanes / (chunk_size_planes - chunk_overlap)) *
np.ceil(nxp / (chunk_size_x - chunk_overlap)))
filtered_chunks = []
chunk_coords = []
chunk_rois = []
more_chunks = True
ichunk = 0
chunk_counter_p = 0
chunk_counter_x = 0
while more_chunks:
print("Chunk %d/%d, x index = %d, step index = %d" % (ichunk + 1, nchunks, chunk_counter_x, chunk_counter_p))
ix_start = int(np.max([chunk_counter_x * chunk_size_x - chunk_overlap, 0]))
ix_end = int(np.min([ix_start + chunk_size_x, nxp]))
ip_start = int(np.max([chunk_counter_p * chunk_size_planes - chunk_overlap, 0]))
ip_end = int(np.min([ip_start + chunk_size_planes, nplanes]))
imgs_chunk = imgs[ip_start:ip_end, :, ix_start:ix_end]
ks = localize_skewed.get_filter_kernel_skewed(self.filter_sigma_small, self.dc,
self.theta * np.pi / 180,
self.dstep, sigma_cutoff=2)
kl = localize_skewed.get_filter_kernel_skewed(self.filter_sigma_large, self.dc,
self.theta * np.pi / 180,
self.dstep, sigma_cutoff=2)
imgs_hp = localize.filter_convolve(imgs_chunk, ks)
imgs_lp = localize.filter_convolve(imgs_chunk, kl, use_gpu=True)
filtered = imgs_hp - imgs_lp
# get image coordinates
npos, ny, nx = imgs_chunk.shape
y_offset = ip_start * self.dstep
x_offset = ix_start * self.dc
x, y, z = localize_skewed.get_skewed_coords((npos, ny, nx), self.dc, self.dstep, self.theta * np.pi/180)
x += x_offset
y += y_offset
# store information for this chunk
filtered_chunks.append(filtered)
chunk_coords.append((z, y, x))
chunk_rois.append([ip_start, ip_end, 0, filtered.shape[1], ix_start, ix_end])
# update chunk counters
if ix_end < nxp:
chunk_counter_x += 1
ichunk += 1
elif ip_end < nplanes:
chunk_counter_x = 0
chunk_counter_p += 1
ichunk += 1
else:
more_chunks = False
print("Filtered images in %0.2fs" % (time.perf_counter() - tstart))
self.filtered_chunks = filtered_chunks
self.chunk_coords = chunk_coords
self.chunk_rois = chunk_rois
filtered_all = np.zeros(self.raw_imgs.shape)
for ii in range(len(chunk_rois)):
roi = chunk_rois[ii]
filtered_all[roi[0]:roi[1], roi[2]:roi[3], roi[4]:roi[5]] = filtered_chunks[ii]
self.filtered = filtered_all
self.filtered_deskewed = ipp.deskew(self.filtered, self.theta, self.dstep, self.dc)
def update_localizations(self):
# ###################################################
# identify candidate beads
# ###################################################
tstart = time.perf_counter()
dz_min, dxy_min = self.min_spot_sep
footprint = localize_skewed.get_skewed_footprint((dz_min, dxy_min, dxy_min), self.dc, self.dstep, self.theta * np.pi/180)
fit_params_vol = []
init_params_vol = []
rois_vol = []
for ii, (imgs_filtered, coords, grand_roi) in enumerate(zip(self.filtered_chunks, self.chunk_coords, self.chunk_rois)):
print("localizing chunk %d/%d" % (ii + 1, len(self.filtered_chunks)))
z, y, x = coords
imgs_chunk = self.raw_imgs[grand_roi[0]:grand_roi[1], grand_roi[2]:grand_roi[3], grand_roi[4]:grand_roi[5]]
centers_guess_inds, amps = localize.find_peak_candidates(imgs_filtered, footprint, self.threshold)
# convert to xyz coordinates
xc = x[0, 0, centers_guess_inds[:, 2]]
yc = y[centers_guess_inds[:, 0], centers_guess_inds[:, 1], 0]
zc = z[0, centers_guess_inds[:, 1], 0] # z-position is determined by the y'-index in OPM image
centers_guess = np.stack((zc, yc, xc), axis=1)
print("Found %d points above threshold in %0.2fs" % (
len(centers_guess), time.perf_counter() - tstart))
if len(centers_guess) != 0:
# ###################################################
# average multiple points too close together. Necessary bc if naive threshold, may identify several points
# from same spot. Particularly important if spots have very different brightness levels.
# ###################################################
tstart = time.perf_counter()
inds = np.ravel_multi_index(centers_guess_inds.transpose(), imgs_filtered.shape)
weights = imgs_filtered.ravel()[inds]
centers_guess, inds_comb = localize.filter_nearby_peaks(centers_guess, dxy_min, dz_min, weights=weights,
mode="average")
amps = amps[inds_comb]
print("Found %d points separated by dxy > %0.5g and dz > %0.5g in %0.1fs" %
(len(centers_guess), dxy_min, dz_min, time.perf_counter() - tstart))
# ###################################################
# prepare ROIs
# ###################################################
tstart = time.perf_counter()
# cut rois out
roi_size_skew = localize_skewed.get_skewed_roi_size(self.roi_size, self.theta*np.pi/180, self.dc, self.dstep, ensure_odd=True)
rois, img_rois, xrois, yrois, zrois = zip(
*[localize_skewed.get_skewed_roi(c, imgs_chunk, x, y, z, roi_size_skew) for c in centers_guess])
rois = np.asarray(rois)
# exclude some regions of roi
roi_masks = [localize_skewed.get_roi_mask(c, (np.inf, 0.5 * self.roi_size[1]), (zrois[ii], yrois[ii], xrois[ii]))
for ii, c in enumerate(centers_guess)]
# mask regions
xrois, yrois, zrois, img_rois = zip(
*[(xr[rm][None, :], yr[rm][None, :], zr[rm][None, :], ir[rm][None, :])
for xr, yr, zr, ir, rm in zip(xrois, yrois, zrois, img_rois, roi_masks)])
# extract guess values
bgs = np.array([np.mean(r) for r in img_rois])
sxs = np.array([np.sqrt(np.sum(ir * (xr - cg[2]) ** 2) / np.sum(ir)) for ir, xr, cg in
zip(img_rois, xrois, centers_guess)])
sys = np.array([np.sqrt(np.sum(ir * (yr - cg[1]) ** 2) / np.sum(ir)) for ir, yr, cg in
zip(img_rois, yrois, centers_guess)])
sxys = np.expand_dims(0.5 * (sxs + sys), axis=1)
szs = np.expand_dims(np.array(
[np.sqrt(np.sum(ir * (zr - cg[0]) ** 2) / np.sum(ir)) for ir, zr, cg in
zip(img_rois, zrois, centers_guess)]), axis=1)
# get initial parameter guesses
init_params = np.concatenate((np.expand_dims(amps, axis=1),
centers_guess[:, 2][:, None],
centers_guess[:, 1][:, None],
centers_guess[:, 0][:, None],
sxys, szs,
np.expand_dims(bgs, axis=1)),
axis=1)
print("Prepared %d rois and estimated initial parameters in %0.2fs" % (
len(rois), time.perf_counter() - tstart))
# ###################################################
# localization
# ###################################################
print("starting fitting for %d rois" % centers_guess.shape[0])
tstart = time.perf_counter()
fit_params, fit_states, chi_sqrs, niters, fit_t = localize.fit_gauss_rois(img_rois, (zrois, yrois, xrois),
init_params, estimator="LSE",
model="gaussian",
sf=1, dc=self.dc,
angles=(0., self.theta * np.pi/180, 0.))
tend = time.perf_counter()
print("Localization took %0.2fs" % (tend - tstart))
# fitting
print("Fitting %d rois on GPU" % (len(rois)))
fit_results = np.concatenate((np.expand_dims(fit_states, axis=1),
np.expand_dims(chi_sqrs, axis=1),
np.expand_dims(niters, axis=1)), axis=1)
# ###################################################
# correct ROIs for full volume
# ###################################################
rois[:, :2] += grand_roi[0]
rois[:, 4:] += grand_roi[4]
# ###################################################
# store results
# ###################################################
fit_params_vol.append(fit_params)
init_params_vol.append(init_params)
rois_vol.append(rois)
self.fit_params = np.concatenate(fit_params_vol, axis=0)
self.init_params = np.concatenate(init_params_vol, axis=0)
self.rois = np.concatenate(rois_vol, axis=0)
self.centers_pix = np.stack((self.fit_params[:, 3], self.fit_params[:, 2], self.fit_params[:, 1]), axis=1) / self.dc
def update_fit_filters(self):
# ###################################################
# preliminary fitting of results
# ###################################################
tstart = time.perf_counter()
x, y, z = localize_skewed.get_skewed_coords(self.raw_imgs.shape, self.dc, self.dstep, self.theta * np.pi/180)
to_keep, conditions, condition_names, filter_settings = localize_skewed.filter_localizations(
self.fit_params, self.init_params, (z, y, x),
self.fit_dist_max_err, self.min_spot_sep,
(self.sigmas_min,self.sigmas_max),
self.fit_threshold,
dist_boundary_min=self.dist_boundary_min)
print("identified %d/%d localizations in %0.3f" % (
np.sum(to_keep), to_keep.size, time.perf_counter() - tstart))
self.to_keep = to_keep
self.conditions = conditions
self.condition_names = condition_names
obj = LocObj(imgs)
viewer = napari.Viewer(title=root_dir, ndisplay=3)
# viewer.dims.ndisplay = 3
viewer.camera.angles = (20, -77, -22)
# draw layers on napari
def update_img_layer(layers, names):
if not isinstance(layers, (list, tuple)):
layers = [layers]
if not isinstance(names, (list, tuple)):
names = [names]
for l, n in zip(layers, names):
if l is None:
continue
try:
viewer.layers[n].data = l
except KeyError:
viewer.add_image(l, name=n)
def update_point_layers(layers, names):
if not isinstance(layers, (list, tuple)):
layers = [layers]
if not isinstance(names, (list, tuple)):
names = [names]
for l, n in zip(layers, names):
if l is None:
continue
try:
viewer.layers[n].data = l
except KeyError:
viewer.add_points(l, size=2, face_color="red", name=n,
opacity=0.75, n_dimensional=True, visible=True)
def update_layers_helper(layers):
dimg, fimg, fcs = layers
update_img_layer((dimg, fimg), ("deskewed img", "filtered img"))
update_point_layers(fcs, "fit centers")
@thread_worker
def loc_thread(dc=None, theta=None, dstep=None, filter_sigma_small=None, filter_sigma_large=None,
min_spot_sep=None, threshold=None, roi_size=None,
sigmas_max=None, sigmas_min=None, fit_threshold=None, dist_boundary_min=None,
fit_dist_max_err=None):
# based on new parameters decide how much of the processing pipeline must be redone
update_deskew = dc != obj.dc or theta != obj.theta or dstep != obj.dstep
update_filter = update_deskew or obj.filter_sigma_small != filter_sigma_small or obj.filter_sigma_large != filter_sigma_large
update_locs = update_filter or obj.min_spot_sep != min_spot_sep or obj.threshold != threshold or obj.roi_size != roi_size
update_fit_filters = update_locs or obj.sigmas_max != sigmas_max or obj.sigmas_min != sigmas_min or \
obj.fit_threshold != fit_threshold or obj.dist_boundary_min != dist_boundary_min or \
obj.fit_dist_max_err != fit_dist_max_err
if update_deskew:
obj.dc = dc
obj.theta = theta
obj.dstep = dstep
obj.update_deskew()
if update_filter:
obj.filter_sigma_small = filter_sigma_small
obj.filter_sigma_large = filter_sigma_large
obj.update_filtered()
if update_locs:
obj.min_spot_sep = min_spot_sep
obj.threshold = threshold
obj.roi_size = roi_size
obj.update_localizations()
if update_fit_filters:
obj.fit_dist_max_err = fit_dist_max_err
obj.sigmas_max = sigmas_max
obj.sigmas_min = sigmas_min
obj.fit_threshold = fit_threshold
obj.dist_boundary_min = dist_boundary_min
obj.update_fit_filters()
yield (obj.deskewed_data, obj.filtered_deskewed, obj.centers_pix[obj.to_keep])
@magicgui(call_button="update")
def loc(dc=0.115, theta=30., dstep=0.4,
filter_sz_small=0.18, filter_sxy_small=0.025, filter_sz_large=1.8, filter_sxy_large=0.5,
min_sep_z=1.8, min_sep_xy=0.4, threshold=100, roi_size_z=1.8, roi_size_xy=1.2,
sz_max=1., sz_min=0.088, sxy_max=0.38, sxy_min=0.024, fit_threshold=100, dist_boundary_z=0.2, dist_boundary_xy=0.1,
z_err_fit_max=0.35, xy_fit_err_max=0.3):
worker_filter = loc_thread(dc=dc, theta=theta, dstep=dstep,
filter_sigma_small=(filter_sz_small, filter_sxy_small, filter_sxy_small),
filter_sigma_large=(filter_sz_large, filter_sxy_large, filter_sxy_large),
min_spot_sep=(min_sep_z, min_sep_xy), threshold=threshold,
roi_size=(roi_size_z, roi_size_xy, roi_size_xy),
sigmas_max=(sz_max, sxy_max), sigmas_min=(sz_min, sxy_min), fit_threshold=fit_threshold,
dist_boundary_min=(dist_boundary_z, dist_boundary_xy), fit_dist_max_err=(z_err_fit_max, xy_fit_err_max))
worker_filter.yielded.connect(update_layers_helper)
worker_filter.start()
viewer.window.add_dock_widget(loc)
| 17,910
| 42.055288
| 142
|
py
|
OPM
|
OPM-master/reconstruction/localize_skewed.py
|
"""
Code for localization in native OPM frame
This file stores tools specific to the OPM skewed geometry. Most localization functions are found in localize.py
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
import warnings
import time
import joblib
import fit_psf
import rois
import localize
# geometry functions
def nearest_pt_line(pt, slope, pt_line):
"""
Get shortest distance between a point and a line.
:param pt: (xo, yo), point of interest
:param slope: slope of line
:param pt_line: (xl, yl), point the line passes through
:return pt: (x_near, y_near), nearest point on line
:return d: shortest distance from point to line
"""
xo, yo = pt
xl, yl = pt_line
b = yl - slope * xl
x_int = (xo + slope * (yo - b)) / (slope ** 2 + 1)
y_int = slope * x_int + b
d = np.sqrt((xo - x_int) ** 2 + (yo - y_int) ** 2)
return (x_int, y_int), d
def point_in_trapezoid(pts, x, y, z):
"""
Test if a point is in the trapzoidal region described by x,y,z
:param pts: np.array([[cz0, cy0, cx0], [cz1, cy1, cx1], ...[czn, cyn, cxn]])
:param x:
:param y:
:param z:
:return:
"""
if pts.ndim == 1:
pts = pts[None, :]
# get theta
dc = x[0, 0, 1] - x[0, 0, 0]
dz = z[0, 1, 0] - z[0, 0, 0]
theta = np.arcsin(dz / dc)
# get edges
zstart = z.min()
ystart = y[0, 0, 0]
yend = y[-1, 0, 0]
# need to round near machine precision, or can get strange results when points right on boundary
decimals = 10
not_in_region_x = np.logical_or(np.round(pts[:, 2], decimals) < np.round(x.min(), decimals),
np.round(pts[:, 2], decimals) > np.round(x.max(), decimals))
not_in_region_z = np.logical_or(np.round(pts[:, 0], decimals) < np.round(z.min(), decimals),
np.round(pts[:, 0], decimals) > np.round(z.max(), decimals))
# tilted lines describing ends
not_in_region_yz = np.logical_or(np.round(pts[:, 0] - zstart, decimals) > np.round((pts[:, 1] - ystart) * np.tan(theta), decimals),
np.round(pts[:, 0] - zstart, decimals) < np.round((pts[:, 1] - yend) * np.tan(theta), decimals))
in_region = np.logical_not(np.logical_or(not_in_region_yz, np.logical_or(not_in_region_x, not_in_region_z)))
return in_region
# coordinate transformations between OPM and coverslip frames
def get_skewed_coords(sizes, dc, ds, theta, scan_direction="lateral"):
"""
Get laboratory coordinates (i.e. coverslip coordinates) for a stage-scanning OPM set
:param sizes: (n0, n1, n2)
:param dc: camera pixel size
:param ds: stage step size
:param theta: in radians
:return x, y, z:
"""
nimgs, ny_cam, nx_cam = sizes
if scan_direction == "lateral":
x = dc * np.arange(nx_cam)[None, None, :]
# y = stage_pos[:, None, None] + dc * np.cos(theta) * np.arange(ny_cam)[None, :, None]
y = ds * np.arange(nimgs)[:, None, None] + dc * np.cos(theta) * np.arange(ny_cam)[None, :, None]
z = dc * np.sin(theta) * np.arange(ny_cam)[None, :, None]
elif scan_direction == "axial":
x = dc * np.arange(nx_cam)[None, None, :]
y = dc * np.cos(theta) * np.arange(ny_cam)[None, :, None]
z = ds * np.arange(nimgs)[:, None, None] + dc * np.sin(theta) * np.arange(ny_cam)[None, :, None]
else:
raise ValueError("scan_direction must be `lateral` or `axial` but was `%s`" % scan_direction)
return x, y, z
def get_skewed_coords_deriv(sizes, dc, ds, theta):
"""
derivative with respect to theta
:param sizes:
:param dc:
:param ds:
:param theta:
:return:
"""
nimgs, ny_cam, nx_cam = sizes
dxdt = 0 * dc * np.arange(nx_cam)[None, None, :]
dydt = 0 * ds * np.arange(nimgs)[:, None, None] - dc * np.sin(theta) * np.arange(ny_cam)[None, :, None]
dzdt = dc * np.cos(theta) * np.arange(ny_cam)[None, :, None]
dxds = 0 * dc * np.arange(nx_cam)[None, None, :]
dyds = np.arange(nimgs)[:, None, None] + 0 * dc * np.cos(theta) * np.arange(ny_cam)[None, :, None]
dzds = 0 * dc * np.sin(theta) * np.arange(ny_cam)[None, :, None]
dxdc = np.arange(nx_cam)[None, None, :]
dydc = 0 * ds * np.arange(nimgs)[:, None, None] + np.cos(theta) * np.arange(ny_cam)[None, :, None]
dzdc = np.sin(theta) * np.arange(ny_cam)[None, :, None]
return [dxdt, dydt, dzdt], [dxds, dyds, dzds], [dxdc, dydc, dzdc]
def lab2cam(x, y, z, theta):
"""
Convert xyz coordinates to camera coordinates sytem, x', y', and stage position.
:param x:
:param y:
:param z:
:param theta:
:return xp:
:return yp: yp coordinate
:return stage_pos: distance of leading edge of camera frame from the y-axis
"""
xp = x
stage_pos = y - z / np.tan(theta)
yp = (y - stage_pos) / np.cos(theta)
return xp, yp, stage_pos
def xy_lab2cam(x, y, stage_pos, theta):
"""
Convert xy coordinates to x', y' coordinates at a certain stage position
:param x:
:param y:
:param stage_pos:
:param theta:
:return:
"""
xp = x
yp = (y - stage_pos) / np.cos(theta)
return xp, yp
def get_trapezoid_zbound(cy, coords):
"""
Find z-range of trapezoid for given center position cy
:param cy:
:param coords: (z, y, x)
:return zmax, zmin:
"""
cy = np.array(cy)
z, y, x = coords
slope = (z[:, -1, 0] - z[:, 0, 0]) / (y[0, -1, 0] - y[0, 0, 0])
# zmax
zmax = np.zeros(cy.shape)
cy_greater = cy > y[0, -1]
zmax[cy_greater] = z.max()
zmax[np.logical_not(cy_greater)] = slope * (cy[np.logical_not(cy_greater)] - y[0, 0])
# if cy > y[0, -1]:
# zmax = z.max()
# else:
# zmax = slope * (cy - y[0, 0])
# zmin
zmin = np.zeros(cy.shape)
cy_less = cy < y[-1, 0]
zmin[cy_less] = z.min()
zmin[np.logical_not(cy_less)] = slope * (cy[np.logical_not(cy_less)] - y[-1, 0])
# if cy < y[-1, 0]:
# zmin = z.min()
# else:
# zmin = slope * (cy - y[-1, 0])
return zmax, zmin
def get_trapezoid_ybound(cz, coords):
"""
Find y-range of trapezoid for given center position cz
:param cz:
:param coords: (z, y, x)
:return cy, ymax, ymin:
"""
cz = np.array(cz)
z, y, x = coords
slope = (z[:, -1, 0] - z[:, 0, 0]) / (y[0, -1, 0] - y[0, 0, 0])
ymin = cz / slope
ymax = cz / slope + y[-1, 0]
return ymax, ymin
# deskew
def interp_opm_data(imgs, dc, ds, theta, mode="ortho-interp"):
"""
Interpolate OPM stage-scan data to be equally spaced in coverslip frame
:param imgs: nz x ny x nx
:param dc: image spacing in camera space, i.e. camera pixel size reference to object space
:param ds: distance stage moves between frames
:param theta:
:return:
"""
# fix y-positions from raw images
nxp = imgs.shape[2]
nyp = imgs.shape[1]
nimgs = imgs.shape[0]
# set up interpolated coordinates
dx = dc
dy = dc * np.cos(theta)
dz = dc * np.sin(theta)
x = dx * np.arange(0, nxp)
y = dy * np.arange(0, nyp + int(ds / dy) * (nimgs - 1))
z = dz * np.arange(0, nyp)
nz = len(z)
ny = len(y)
img_unskew = np.nan * np.zeros((z.size, y.size, x.size))
# todo: using loops for a start ... optimize later
if mode == "row-interp": # interpolate using nearest two points on same row
for ii in range(nz):
for jj in range(ny):
# find coordinates of nearest two OPM images
jeff = (jj * dy - ii * dc * np.cos(theta)) / ds
jlow = int(np.floor(jeff))
if (jlow + 1) >= (imgs.shape[0]):
continue
# interpolate
img_unskew[ii, jj, :] = (imgs[jlow, ii, :] * (jj * dy - jlow * ds) + imgs[jlow + 1, ii, :] * ((jlow + 1) * ds - jj * dy)) / ds
# todo: this mode can be generalized to not use dy a multiple of dx
elif mode == "ortho-interp": # interpolate using nearest four points.
for ii in range(nz): # loop over z-positions
for jj in range(ny): # loop over large y-position steps (moving distance btw two real frames)
# find coordinates of nearest two OPM images
jeff = (jj * dy - ii * dc * np.cos(theta)) / ds
jlow = int(np.floor(jeff))
if (jlow + 1) >= (imgs.shape[0]) or jlow < 0:
continue
pt_now = (y[jj], z[ii])
# find nearest point to line along frame index jlow
# this line passes through the point (jlow * ds, 0)
pt_n1, dist_1 = nearest_pt_line(pt_now, np.tan(theta), (jlow * ds, 0))
dist_along_line1 = np.sqrt((pt_n1[0] - jlow * ds) ** 2 + pt_n1[1] ** 2) / dc
# as usual, need to round to avoid finite precision floor/ceiling issues if number is already an integer
i1_low = int(np.floor(np.round(dist_along_line1, 14)))
i1_high = int(np.ceil(np.round(dist_along_line1, 14)))
if i1_high >= (imgs.shape[1] - 1) or i1_low < 0:
continue
if np.round(dist_1, 14) == 0:
q1 = imgs[jlow, i1_low, :]
elif i1_low < 0 or i1_high >= nyp:
q1 = np.nan
else:
d1 = dist_along_line1 - i1_low
q1 = (1 - d1) * imgs[jlow, i1_low, :] + d1 * imgs[jlow, i1_high, :]
# find nearest point to line passing along frame index (jlow + 1)
# this line passes through the point ( (jlow + 1) * ds, 0)
pt_no, dist_o = nearest_pt_line(pt_now, np.tan(theta), ( (jlow + 1) * ds, 0))
dist_along_line0 = np.sqrt((pt_no[0] - (jlow + 1) * ds) ** 2 + pt_no[1] ** 2) / dc
io_low = int(np.floor(np.round(dist_along_line0, 14)))
io_high = int(np.ceil(np.round(dist_along_line0, 14)))
if io_high >= (imgs.shape[1] - 1) or io_low < 0:
continue
if np.round(dist_o, 14) == 0:
qo = imgs[jlow + 1, i1_low, :]
elif io_low < 0 or io_high >= nyp:
qo = np.nan
else:
do = dist_along_line0 - io_low
qo = (1 - do) * imgs[jlow + 1, io_low, :] + do * imgs[jlow + 1, io_high, :]
# weighted average of qo and q1 based on their distance
img_unskew[ii, jj, :] = (q1 * dist_o + qo * dist_1) / (dist_o + dist_1)
else:
raise Exception("mode must be 'row-interp' or 'ortho-interp' but was '%s'" % mode)
return x, y, z, img_unskew
# point spread function model and fitting
def gaussian3d_angle(shape, dc, p):
"""
:param shape:
:param dc:
:param p: [A, cx, cy, cz, sxy, sz, bg, theta, ds]
:return:
"""
x, y, z = get_skewed_coords(shape, dc, p[8], p[7])
val = p[0] * np.exp(-(x - p[1])**2 / 2 / p[4]**2 - (y - p[2])**2 / 2 / p[4]**2 - (z - p[3])**2 / 2 / p[5]**2) + p[6]
return val
def gaussian3d_angle_jacobian(shape, dc, p):
x, y, z = get_skewed_coords(shape, dc, p[8], p[7])
[dxdt, dydt, dzdt], [dxds, dyds, dzds], _ = get_skewed_coords_deriv(shape, dc, p[8], p[7])
exp = np.exp(-(x - p[1])**2 / 2 / p[4]**2 - (y - p[2])**2 / 2 / p[4]**2 - (z - p[3])**2 / 2 / p[5]**2)
jac = [exp,
p[0] * exp * (x - p[1]) / p[4]**2,
p[0] * exp * (y - p[2]) / p[4]**2,
p[0] * exp * (z - p[3]) / p[5]**2,
p[0] * exp * ((x - p[1])**2 + (y - p[2])**2) / p[4]**3,
p[0] * exp * (z - p[3])**2 / p[5]**3,
np.ones(shape),
p[0] * exp * ((-1) * (x - p[1]) / p[4]**2 * dxdt +
(-1) * (y - p[2]) / p[4]**2 * dydt +
(-1) * (z - p[3]) / p[5]**2 * dzdt),
p[0] * exp * ((-1) * (x - p[1]) / p[4] ** 2 * dxds +
(-1) * (y - p[2]) / p[4] ** 2 * dyds +
(-1) * (z - p[3]) / p[5] ** 2 * dzds)
]
return jac
# generate synthetic image
def simulate_img(scan_params, physical_params, ncenters=1, centers=None, sf=3):
# size and pixel size
dc = scan_params["dc"]
theta = scan_params["theta"]
dstep = scan_params["dstep"]
npos, ny, nx = scan_params["shape"]
normal = np.array([0, -np.sin(theta), np.cos(theta)]) # normal of camera pixel
# physical data
na = physical_params["na"]
ni = physical_params["ni"]
emission_wavelength = physical_params["emission_wavelength"]
# ideal sigmas
sxy = 0.22 * emission_wavelength / na
sz = np.sqrt(6) / np.pi * ni * emission_wavelength / na ** 2
amp = physical_params["peak_photons"]
bg = physical_params["background"]
# coordinates
x, y, z = get_skewed_coords((npos, ny, nx), dc, dstep, theta)
# define centers
if centers is None:
centers = []
while len(centers) < ncenters:
xc = np.random.uniform(x.min(), x.max())
yc = np.random.uniform(y.min(), y.max())
zc = np.random.uniform(z.min(), z.max())
c_proposed = np.array([zc, yc, xc])
if point_in_trapezoid(c_proposed, x, y, z):
centers.append(c_proposed)
centers = np.asarray(centers)
img_gt = np.zeros((x+y+z).shape)
for c in centers:
params = [amp, c[2], c[1], c[0], sxy, sz, bg]
img_gt += fit_psf.gaussian3d_psf(x, y, z, dc, params, sf=sf, angles=np.array([0, theta, 0]))
return img_gt, centers
def simulate_img_noise(ground_truth, max_photons, cam_gains=2, cam_offsets=100, cam_readout_noise_sds=5, photon_shot_noise=True):
"""
Convert ground truth image (with values between 0-1) to simulated camera image, including the effects of
photon shot noise and camera readout noise.
:param use_otf:
:param ground_truth: Relative intensity values of image
:param max_photons: Mean photons emitted by ber of photons will be different than expected. Furthermore, due to
the "blurring" of the point spread function and possible binning of the image, no point in the image
may realize "max_photons"
:param cam_gains: gains at each camera pixel
:param cam_offsets: offsets of each camera pixel
:param cam_readout_noise_sds: standard deviation characterizing readout noise at each camera pixel
:param pix_size: pixel size of ground truth image in ums. Note that the pixel size of the output image will be
pix_size * bin_size
:param otf: optical transfer function. If None, use na and wavelength to set values
:param na: numerical aperture. Only used if otf=None
:param wavelength: wavelength in microns. Only used if otf=None
:param photon_shot_noise: turn on/off photon shot-noise
:param bin_size: bin pixels before applying Poisson/camera noise. This is to allow defining a pattern on a
finer pixel grid.
:return img:
:return snr:
:return max_photons_real:
"""
if np.any(ground_truth > 1) or np.any(ground_truth < 0):
warnings.warn('ground_truth image values should be in the range [0, 1] for max_photons to be correct')
img = max_photons * ground_truth
max_photons_real = img.max()
# signal, used later to get SNR
sig = cam_gains * img
# add shot noise
if photon_shot_noise:
img = np.random.poisson(img)
# add camera noise and convert from photons to ADU
readout_noise = np.random.standard_normal(img.shape) * cam_readout_noise_sds
img = cam_gains * img + readout_noise + cam_offsets
# calculate SNR
# assuming photon number large enough ~gaussian
noise = np.sqrt(cam_readout_noise_sds ** 2 + cam_gains ** 2 * img)
snr = sig / noise
return img, snr, max_photons_real
# ROI tools
def get_skewed_roi_size(sizes, theta, dc, dstep, ensure_odd=True):
"""
Get ROI size in OPM matrix that includes sufficient xy and z points
:param sizes: [z-size, y-size, x-size] in same units as dc, dstep
:param theta: angle in radians
:param dc: camera pixel size
:param dstep: step size
:param bool ensure_odd:
:return [no, n1, n2]: integer size of roi in skewed coordinates
"""
# x-size determines n2 size
n2 = int(np.ceil(sizes[2] / dc))
# z-size determines n1
n1 = int(np.ceil(sizes[0] / dc / np.sin(theta)))
# set so that @ top and bottom z-points, ROI includes the full y-size
n0 = int(np.ceil((0.5 * (n1 + 1)) * dc * np.cos(theta) + sizes[1]) / dstep)
if ensure_odd:
if np.mod(n2, 2) == 0:
n2 += 1
if np.mod(n1, 2) == 0:
n1 += 1
if np.mod(n0, 2) == 0:
n0 += 1
return [n0, n1, n2]
def get_skewed_roi(center, imgs, coords, sizes):
"""
Given a center value (not necessarily aligned to the coordinates), find the closest region of interest (ROI)
centered around that point.
:param float center: [cz, cy, cx] in same units as x, y, z
:param coords: a tuple of coordinates (z, y, x), where z, y, and x are broadcastable to the same shape as imgs.
These coordinates are supplied as produced by get_skewed_coords()
:param list[int] sizes: [n0, n1, n2], the size of the desired ROI in number of pixels along the skewed coordinate
directions. This can be calculated with the help of get_skewed_roi_size()
:return roi, img_roi, x_roi, y_roi, z_roi:
"""
z, y, x = coords
shape = imgs.shape
i2 = np.argmin(np.abs(x[0, 0, :].ravel() - center[2]))
i1 = np.argmin(np.abs(z[0, :, 0] - center[0]))
i0 = np.argmin(np.abs(y[:, i1, 0] - center[1]))
# i0, i1, _ = np.unravel_index(np.argmin((y - center[1]) ** 2 + (z - center[0]) ** 2), y.shape)
roi = np.array(rois.get_centered_roi([i0, i1, i2], sizes, min_vals=[0, 0, 0], max_vals=np.array(shape)))
img_roi = rois.cut_roi(roi, imgs)
x_roi = x[:, :, roi[4]:roi[5]] # only roi on last one because x has only one entry on first two dims
y_roi = y[roi[0]:roi[1], roi[2]:roi[3], :]
z_roi = z[:, roi[2]:roi[3], :]
z_roi, y_roi, x_roi = np.broadcast_arrays(z_roi, y_roi, x_roi)
# x_roi = rois.cut_roi(roi, x)
# y_roi = rois.cut_roi(roi, y)
# z_roi = rois.cut_roi(roi, z)
return roi, img_roi, x_roi, y_roi, z_roi
def get_roi_mask(center, max_seps, coords):
"""
Get mask to exclude points in the ROI that are far from the center. We do not want to include regions at the edges
of the trapezoidal ROI in processing.
:param center: (cz, cy, cx)
:param max_seps: (dz, dxy)
:param coords: (z, y, x) sizes must be broadcastable
:return mask: same size as roi, 1 where point is allowed and nan otherwise
"""
z_roi, y_roi, x_roi = coords
x_roi_full, y_roi_full, z_roi_full = np.broadcast_arrays(x_roi, y_roi, z_roi)
mask = np.ones(x_roi_full.shape, dtype=bool)
# roi is parallelogram, so still want to cut out points which are too far from center
too_far_xy = np.sqrt((x_roi - center[2]) ** 2 + (y_roi - center[1]) ** 2) > max_seps[1]
too_far_z = np.abs(z_roi - center[0]) > max_seps[0]
too_far = np.logical_or(too_far_xy, too_far_z)
mask[too_far] = False
return mask
# filtering
def get_filter_kernel_skewed(sigmas, dc, theta, dstage, sigma_cutoff=2):
"""
Get gaussian filter convolution kernel in skewed coordinates
:param sigmas: (sz, sy, sx) in the same units as dc and stage
:param dc: pixel size
:param theta: angle in radians
:param dstage: stage step
:param sigma_cutoff: number of standard deviations to include in the filter. This parameter determines the fitler size
:return kernel:
"""
# normalize everything to camera pixel size
sigma_x_pix = sigmas[2] / dc
sigma_y_pix = sigmas[2] / dc
sigma_z_pix = sigmas[0] / dc
nk_x = 2 * int(np.round(sigma_x_pix * sigma_cutoff)) + 1
nk_y = 2 * int(np.round(sigma_y_pix * sigma_cutoff)) + 1
nk_z = 2 * int(np.round(sigma_z_pix * sigma_cutoff)) + 1
# determine how large the OPM geometry ROI needs to be to fit the desired filter
roi_sizes = get_skewed_roi_size([nk_z, nk_y, nk_x], theta, 1, dstage / dc, ensure_odd=True)
# get coordinates to evaluate kernel at
xk, yk, zk = get_skewed_coords(roi_sizes, 1, dstage / dc, theta)
xk = xk - np.mean(xk)
yk = yk - np.mean(yk)
zk = zk - np.mean(zk)
kernel = np.exp(-xk ** 2 / 2 / sigma_x_pix ** 2 - yk ** 2 / 2 / sigma_y_pix ** 2 - zk ** 2 / 2 / sigma_z_pix ** 2)
kernel = kernel / np.sum(kernel)
return kernel
def get_lapl_gauss_filter_kernel_skewed(sigmas_small, sigmas_large, dc, theta, dstage, sigma_cutoff=2):
"""
Get Laplacian of Gaussian filter convolution kernel in skewed coordinates
:param sigmas_small: (sz, sy, sx) in the same units as dc and stage
:param sigmas_large: (sz, sy, sx) in the same units as dc and stage
:param dc: pixel size
:param theta: angle in radians
:param dstage: stage step
:param sigma_cutoff: number of standard deviations to include in the filter. This parameter determines the fitler size
:return kernel:
"""
# normalize everything to camera pixel size
sxs_pix = sigmas_small[2] / dc
sys_pix = sigmas_small[2] / dc
szs_pix = sigmas_small[0] / dc
sxl_pix = sigmas_large[2] / dc
syl_pix = sigmas_large[2] / dc
szl_pix = sigmas_large[0] / dc
nk_x = 2 * int(np.round(sxl_pix * sigma_cutoff)) + 1
nk_y = 2 * int(np.round(syl_pix * sigma_cutoff)) + 1
nk_z = 2 * int(np.round(szl_pix * sigma_cutoff)) + 1
# determine how large the OPM geometry ROI needs to be to fit the desired filter
roi_sizes = get_skewed_roi_size([nk_z, nk_y, nk_x], theta, 1, dstage / dc, ensure_odd=True)
# get coordinates to evaluate kernel at
xk, yk, zk = get_skewed_coords(roi_sizes, 1, dstage / dc, theta)
xk = xk - np.mean(xk)
yk = yk - np.mean(yk)
zk = zk - np.mean(zk)
kl = np.exp(-xk ** 2 / 2 / sxl_pix ** 2 - yk ** 2 / 2 / syl_pix ** 2 - zk ** 2 / 2 / szl_pix ** 2)
kl = kl / np.sum(kl)
ks = np.exp(-xk ** 2 / 2 / sxs_pix ** 2 - yk ** 2 / 2 / sys_pix ** 2 - zk ** 2 / 2 / szs_pix ** 2)
ks = ks / np.sum(ks)
kernel = ks - kl
return kernel
# identify peaks
def get_skewed_footprint(min_sep_allowed, dc, ds, theta):
"""
Get footprint for maximum filter in skewed coordinates
:param min_sep_allowed: (dz, dy, dx)
:param dc: pixel size
:param ds: stage step
:param theta: angle in radians
:return footprint:
"""
footprint_roi_size = get_skewed_roi_size(min_sep_allowed, theta, dc, ds, ensure_odd=True)
footprint_form = np.ones(footprint_roi_size, dtype=bool)
xf, yf, zf = get_skewed_coords(footprint_form.shape, dc, ds, theta)
xf = xf - xf.mean()
yf = yf - yf.mean()
zf = zf - zf.mean()
footprint_mask = get_roi_mask((0, 0, 0), min_sep_allowed, (zf, yf, xf))
footprint_mask[np.isnan(footprint_mask)] = 0
footprint_mask = footprint_mask.astype(np.bool)
return footprint_form * footprint_mask
# localization functions
def localize_radial_symm(img, coords, mode="radial-symmetry"):
"""
"""
# todo: check quality of localizations
if img.ndim != 3:
raise ValueError("img must be a 3D array, but was %dD" % img.ndim)
nstep, ni1, ni2 = img.shape
z, y, x = coords
if mode == "centroid":
w = np.nansum(img)
xc = np.nansum(img * x) / w
yc = np.nansum(img * y) / w
zc = np.nansum(img * z) / w
elif mode == "radial-symmetry":
yk = 0.5 * (y[:-1, :-1, :] + y[1:, 1:, :])
xk = 0.5 * (x[:, :, :-1] + x[:, :, 1:])
zk = 0.5 * (z[:, :-1] + z[:, 1:])
coords = (zk, yk, xk)
# take a cube of 8 voxels, and compute gradients at the center, using the four pixel diagonals that pass
# through the center
grad_n1 = img[1:, 1:, 1:] - img[:-1, :-1, :-1]
# vectors go [nz, ny, nx]
n1 = np.array([zk[0, 1, 0] - zk[0, 0, 0], yk[1, 1, 0] - yk[0, 0, 0], xk[0, 0, 1] - xk[0, 0, 0]])
n1 = n1 / np.linalg.norm(n1)
grad_n2 = img[1:, :-1, 1:] - img[:-1, 1:, :-1]
n2 = np.array([zk[0, 0, 0] - zk[0, 1, 0], yk[1, 0, 0] - yk[0, 1, 0], xk[0, 0, 1]- xk[0, 0, 0]])
n2 = n2 / np.linalg.norm(n2)
grad_n3 = img[1:, :-1, :-1] - img[:-1, 1:, 1:]
n3 = np.array([zk[0, 0, 0] - zk[0, 1, 0], yk[1, 0, 0] - yk[0, 1, 0], xk[0, 0, 0] - xk[0, 0, 1]])
n3 = n3 / np.linalg.norm(n3)
grad_n4 = img[1:, 1:, :-1] - img[:-1, :-1, 1:]
n4 = np.array([zk[0, 1, 0] - zk[0, 0, 0], yk[1, 1, 0] - yk[0, 0, 0], xk[0, 0, 0] - xk[0, 0, 1]])
n4 = n4 / np.linalg.norm(n4)
# compute the gradient xyz components
# 3 unknowns and 4 eqns, so use pseudo-inverse to optimize overdetermined system
uvec_mat = np.concatenate((n1[None, :], n2[None, :], n3[None, :], n4[None, :]), axis=0)
dat_mat = np.concatenate((grad_n1.ravel()[None, :], grad_n2.ravel()[None, :],
grad_n3.ravel()[None, :], grad_n4.ravel()[None, :]), axis=0)
gradk = np.linalg.pinv(uvec_mat).dot(dat_mat)
gradk = np.reshape(gradk, [3, nstep - 1, ni1 - 1, ni2 - 1])
# compute weights by (1) increasing weight where gradient is large and (2) decreasing weight for points far away
# from the centroid (as small slope errors can become large as the line is extended to the centroi)
# approximate distance between (xk, yk) and (xc, yc) by assuming (xc, yc) is centroid of the gradient
grad_norm = np.sqrt(np.sum(gradk ** 2, axis=0))
centroid_gns = np.array([np.nansum(zk * grad_norm), np.nansum(yk * grad_norm), np.nansum(xk * grad_norm)]) / \
np.nansum(grad_norm)
dk_centroid = np.sqrt((zk - centroid_gns[0]) ** 2 + (yk - centroid_gns[1]) ** 2 + (xk - centroid_gns[2]) ** 2)
# weights
wk = grad_norm ** 2 / dk_centroid
# in 3D, parameterize a line passing through point Po along normal n by
# V(t) = Pk + n * t
# distance between line and point Pc minimized at
# tmin = -\sum_{i=1}^3 (Pk_i - Pc_i) / \sum_i n_i^2
# dk^2 = \sum_k \sum_i (Pk + n * tmin - Pc)^2
# again, we want to minimize the quantity
# chi^2 = \sum_k dk^2 * wk
# so we take the derivatives of chi^2 with respect to Pc_x, Pc_y, and Pc_z, which gives a system of linear
# equations, which we can recast into a matrix equation
# np.array([[A, B, C], [D, E, F], [G, H, I]]) * np.array([[Pc_z], [Pc_y], [Pc_x]]) = np.array([[J], [K], [L]])
with np.errstate(invalid="ignore"):
nk = gradk / np.linalg.norm(gradk, axis=0)
# def chi_sqr(xc, yc, zc):
# cs = (zc, yc, xc)
# chi = 0
# for ii in range(3):
# chi += np.sum((coords[ii] + nk[ii] * (cs[jj] - coords[jj]) - cs[ii]) ** 2 * wk)
# return chi
# build 3x3 matrix from above
mat = np.zeros((3, 3))
for ll in range(3): # rows of matrix
for ii in range(3): # columns of matrix
if ii == ll:
mat[ll, ii] += np.nansum(-wk * (nk[ii] * nk[ll] - 1))
else:
mat[ll, ii] += np.nansum(-wk * nk[ii] * nk[ll])
for jj in range(3): # internal sum
if jj == ll:
mat[ll, ii] += np.nansum(wk * nk[ii] * nk[jj] * (nk[jj] * nk[ll] - 1))
else:
mat[ll, ii] += np.nansum(wk * nk[ii] * nk[jj] * nk[jj] * nk[ll])
# build vector from above
vec = np.zeros((3, 1))
coord_sum = zk * nk[0] + yk * nk[1] + xk * nk[2]
for ll in range(3): # sum over J, K, L
for ii in range(3): # internal sum
if ii == ll:
vec[ll] += -np.nansum((coords[ii] - nk[ii] * coord_sum) * (nk[ii] * nk[ll] - 1) * wk)
else:
vec[ll] += -np.nansum((coords[ii] - nk[ii] * coord_sum) * nk[ii] * nk[ll] * wk)
# invert matrix
zc, yc, xc = np.linalg.inv(mat).dot(vec)
zc, yc, xc = np.float(zc), np.float(yc), np.float(xc)
else:
raise ValueError("mode must be 'centroid' or 'radial-symmetry', but was '%s'" % mode)
# compute useful parameters
# amplitude
amp = np.nanmax(img)
# compute standard devs to estimate sizes
w = np.nansum(img)
sx = np.sqrt(np.nansum((x - xc) ** 2 * img) / w)
sy = np.sqrt(np.nansum((y - yc) ** 2 * img) / w)
sigma_xy = np.sqrt(sx * sy)
sz = np.sqrt(np.nansum((z - zc) ** 2 * img) / w)
return np.array([amp, xc, yc, zc, sigma_xy, sz, np.nan])
# plotting functions
def plot_skewed_roi(fit_params, roi, imgs, theta, x, y, z, init_params=None, same_color_scale=True,
fit_fn=fit_psf.gaussian3d_psf,
figsize=(16, 8), prefix="", save_dir=None):
"""
plot results from fit_roi()
:param fit_params:
:param roi:
:param imgs:
:param dc:
:param theta:
:param x:
:param y:
:param z:
:param figsize:
:return:
"""
# extract useful coordinate info
dstage = y[1, 0] - y[0, 0]
dc = x[0, 0, 1] - x[0, 0, 0]
stage_pos = y[:, 0]
center_fit = np.array([fit_params[3], fit_params[2], fit_params[1]])
if init_params is not None:
center_guess = np.array([init_params[3], init_params[2], init_params[1]])
# get ROI and coordinates
img_roi = rois.cut_roi(roi, imgs)
x_roi = x[:, :, roi[4]:roi[5]] # only roi on last one because x has only one entry on first two dims
y_roi = y[roi[0]:roi[1], roi[2]:roi[3], :]
z_roi = z[:, roi[2]:roi[3], :]
vmin_roi = np.percentile(img_roi[np.logical_not(np.isnan(img_roi))], 1)
vmax_roi = np.percentile(img_roi[np.logical_not(np.isnan(img_roi))], 99.9)
# get fit
fit_volume = fit_fn(x_roi, y_roi, z_roi, dc, fit_params, sf=3, angles=np.array([0., theta, 0.]))
if same_color_scale:
vmin_fit = vmin_roi
vmax_fit = vmax_roi
else:
vmin_fit = np.percentile(fit_volume, 1)
vmax_fit = np.percentile(fit_volume, 99.9)
# interpolate on regular grid
xi_roi, yi_roi, zi_roi, img_roi_unskew = interp_opm_data(img_roi, dc, dstage, theta, mode="ortho-interp")
xi_roi += x_roi.min()
dxi_roi = xi_roi[1] - xi_roi[0]
yi_roi += y_roi.min()
dyi_roi = yi_roi[1] - yi_roi[0]
zi_roi += z_roi.min()
dzi_roi = zi_roi[1] - zi_roi[0]
# fit on regular grid
fit_roi_unskew = fit_fn(xi_roi[None, None, :], yi_roi[None, :, None], zi_roi[:, None, None], dc, fit_params, sf=1)
# ################################
# plot results interpolated on regular grid
# ################################
figh_interp = plt.figure(figsize=figsize)
st_str = "fn = %s, " % fit_fn.__name__ + "ROI = [%d, %d, %d, %d, %d, %d]\n" % tuple(roi) + \
("params = (" + "%3.5f, " * (len(fit_params) - 1) + "%3.5f)") % tuple(fit_params)
if init_params is not None:
st_str += ("\nguess = (" + "%3.5f, " * (len(fit_params) - 1) + "%3.5f)") % tuple(init_params)
plt.suptitle("Fit, max projections, interpolated, " +st_str)
grid = plt.GridSpec(2, 3)
# ################################
# XY, data
# ################################
ax = plt.subplot(grid[0, 0])
extent = [yi_roi[0] - 0.5 * dyi_roi, yi_roi[-1] + 0.5 * dyi_roi,
xi_roi[0] - 0.5 * dxi_roi, xi_roi[-1] + 0.5 * dxi_roi]
plt.imshow(np.nanmax(img_roi_unskew, axis=0).transpose(), vmin=vmin_roi, vmax=vmax_roi, origin="lower",
extent=extent, cmap="bone")
plt.plot(center_fit[1], center_fit[2], 'mx')
if init_params is not None:
plt.plot(center_guess[1], center_guess[2], 'gx')
ax.set_xlim(extent[0:2])
ax.set_ylim(extent[2:4])
ax.set_xlabel("Y (um)")
ax.set_ylabel("X (um)")
ax.set_title("XY")
# ################################
# XZ, data
# ################################
ax = plt.subplot(grid[0, 1])
extent = [xi_roi[0] - 0.5 * dxi_roi, xi_roi[-1] + 0.5 * dxi_roi,
zi_roi[0] - 0.5 * dzi_roi, zi_roi[-1] + 0.5 * dzi_roi]
plt.imshow(np.nanmax(img_roi_unskew, axis=1), vmin=vmin_roi, vmax=vmax_roi, origin="lower",
extent=extent, cmap="bone")
plt.plot(center_fit[2], center_fit[0], 'mx')
if init_params is not None:
plt.plot(center_guess[2], center_guess[0], 'gx')
ax.set_xlim(extent[0:2])
ax.set_ylim(extent[2:4])
ax.set_xlabel("X (um)")
ax.set_ylabel("Z (um)")
ax.set_title("XZ")
# ################################
# YZ, data
# ################################
ax = plt.subplot(grid[0, 2])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
extent = [yi_roi[0] - 0.5 * dyi_roi, yi_roi[-1] + 0.5 * dyi_roi,
zi_roi[0] - 0.5 * dzi_roi, zi_roi[-1] + 0.5 * dzi_roi]
plt.imshow(np.nanmax(img_roi_unskew, axis=2), vmin=vmin_roi, vmax=vmax_roi, origin="lower",
extent=extent, cmap="bone")
plt.plot(center_fit[1], center_fit[0], 'mx')
if init_params is not None:
plt.plot(center_guess[1], center_guess[0], 'gx')
ax.set_xlim(extent[0:2])
ax.set_ylim(extent[2:4])
ax.set_xlabel("Y (um)")
ax.set_ylabel("Z (um)")
ax.set_title("YZ")
# ################################
# XY, fit
# ################################
ax = plt.subplot(grid[1, 0])
extent = [yi_roi[0] - 0.5 * dyi_roi, yi_roi[-1] + 0.5 * dyi_roi,
xi_roi[0] - 0.5 * dxi_roi, xi_roi[-1] + 0.5 * dxi_roi]
plt.imshow(np.nanmax(fit_roi_unskew, axis=0).transpose(), vmin=vmin_fit, vmax=vmax_fit, origin="lower",
extent=extent, cmap="bone")
plt.plot(center_fit[1], center_fit[2], 'mx')
if init_params is not None:
plt.plot(center_guess[ 1], center_guess[2], 'gx')
ax.set_xlim(extent[0:2])
ax.set_ylim(extent[2:4])
plt.xlabel("Y (um)")
plt.ylabel("X (um)")
# ################################
# XZ, fit
# ################################
ax = plt.subplot(grid[1, 1])
extent = [xi_roi[0] - 0.5 * dxi_roi, xi_roi[-1] + 0.5 * dxi_roi,
zi_roi[0] - 0.5 * dzi_roi, zi_roi[-1] + 0.5 * dzi_roi]
plt.imshow(np.nanmax(fit_roi_unskew, axis=1), vmin=vmin_fit, vmax=vmax_fit, origin="lower",
extent=extent, cmap="bone")
plt.plot(center_fit[2], center_fit[0], 'mx')
if init_params is not None:
plt.plot(center_guess[2], center_guess[0], 'gx')
ax.set_xlim(extent[0:2])
ax.set_ylim(extent[2:4])
plt.xlabel("X (um)")
plt.ylabel("Z (um)")
# ################################
# YZ, fit
# ################################
ax = plt.subplot(grid[1, 2])
extent = [yi_roi[0] - 0.5 * dyi_roi, yi_roi[-1] + 0.5 * dyi_roi,
zi_roi[0] - 0.5 * dzi_roi, zi_roi[-1] + 0.5 * dzi_roi]
plt.imshow(np.nanmax(fit_roi_unskew, axis=2), vmin=vmin_fit, vmax=vmax_fit, origin="lower",
extent=extent, cmap="bone")
plt.plot(center_fit[1], center_fit[0], 'mx')
if init_params is not None:
plt.plot(center_guess[1], center_guess[0], 'gx')
ax.set_xlim(extent[0:2])
ax.set_ylim(extent[2:4])
plt.xlabel("Y (um)")
plt.ylabel("Z (um)")
if save_dir is not None:
figh_interp.savefig(os.path.join(save_dir, "%smax_projection.png" % prefix))
plt.close(figh_interp)
# ################################
# plot fits in raw OPM coords
# ################################
figh_raw = plt.figure(figsize=figsize)
plt.suptitle("ROI single PSF fit, " + st_str)
grid = plt.GridSpec(3, roi[1] - roi[0])
xp = np.arange(imgs.shape[2]) * dc + x.min()
yp = np.arange(imgs.shape[1]) * dc
extent_roi = [xp[roi[4]] - 0.5 * dc, xp[roi[5] - 1] + 0.5 * dc,
yp[roi[2]] - 0.5 * dc, yp[roi[3] - 1] + 0.5 * dc]
# stage positions contained in this ROI
stage_pos_roi = stage_pos[roi[0]:roi[1]]
# find the one closest to the center
_, _, closest_stage_pos = lab2cam(fit_params[1], fit_params[2], fit_params[3], theta)
jj_min = np.argmin(np.abs(closest_stage_pos - stage_pos_roi))
for jj in range(len(stage_pos_roi)):
xp, yp = xy_lab2cam(fit_params[1], fit_params[2], stage_pos_roi[jj], theta)
ax = plt.subplot(grid[0, jj])
plt.imshow(img_roi[jj], vmin=vmin_roi, vmax=vmax_roi, extent=extent_roi, origin="lower", cmap="bone")
if jj != jj_min:
plt.plot(xp, yp, 'mx')
else:
plt.plot(xp, yp, 'rx')
ax.set_xlim(extent_roi[0:2])
ax.set_ylim(extent_roi[2:4])
plt.title("%0.2fum" % stage_pos_roi[jj])
if jj == 0:
plt.ylabel("Data\ny' (um)")
else:
ax.axes.yaxis.set_ticks([])
ax = plt.subplot(grid[1, jj])
plt.imshow(fit_volume[jj], vmin=vmin_fit, vmax=vmax_fit, extent=extent_roi, origin="lower", cmap="bone")
if jj != jj_min:
plt.plot(xp, yp, 'mx')
else:
plt.plot(xp, yp, 'rx')
ax.set_xlim(extent_roi[0:2])
ax.set_ylim(extent_roi[2:4])
if jj == 0:
plt.ylabel("Fit\ny' (um)")
else:
ax.axes.yaxis.set_ticks([])
ax = plt.subplot(grid[2, jj])
plt.imshow(img_roi[jj] - fit_volume[jj], extent=extent_roi, origin="lower", cmap="bone")
if jj != jj_min:
plt.plot(xp, yp, 'mx')
else:
plt.plot(xp, yp, 'rx')
ax.set_xlim(extent_roi[0:2])
ax.set_ylim(extent_roi[2:4])
if jj == 0:
plt.ylabel("Data - fit\ny' (um)")
else:
ax.axes.yaxis.set_ticks([])
if save_dir is not None:
figh_raw.savefig(os.path.join(save_dir, "%sraw.png" % prefix))
plt.close(figh_raw)
return figh_interp, figh_raw
# orchestration functions
def localize_skewed(imgs, params, abs_threshold, roi_size, filter_sigma_small, filter_sigma_large,
min_spot_sep, offsets=(0, 0, 0), allowed_polygon=None, sf=3,
mode="fit", use_gpu_fit=True, use_gpu_filter=True):
"""
:param imgs: raw OPM data
:param params: {"dc", "dstage", "theta"}
:param abs_threshold:
:param roi_size: (sz, sy, sx) size to include in xyz directions for fit rois. Note: currently sy=sx and sx is unused
:param filter_sigma_small: (sz, sy, sx) sigmas for small size filter to be used in difference of gaussian filter
:param filter_sigma_large:
:param min_spot_sep: (dz, dxy) assume points separated by less than this distance come from one spot
:param offsets: offset to apply to y-coordinates. Useful for analyzing datasets in chunks
:return:
"""
dz_min, dxy_min = min_spot_sep
# ###################################################
# set up geometry
# ###################################################
npos, ny, nx = imgs.shape
dc = params["dc"]
theta = params["theta"]
stage_step = params["dstep"]
x, y, z = get_skewed_coords((npos, ny, nx), dc, stage_step, theta)
x += offsets[2]
y += offsets[1]
z += offsets[0]
# ###################################################
# smooth image and remove background with difference of gaussians filter
# ###################################################
tstart = time.perf_counter()
ks = get_filter_kernel_skewed(filter_sigma_small, dc, theta, stage_step, sigma_cutoff=2)
kl = get_filter_kernel_skewed(filter_sigma_large, dc, theta, stage_step, sigma_cutoff=2)
imgs_hp = localize.filter_convolve(imgs, ks, use_gpu=use_gpu_filter)
imgs_lp = localize.filter_convolve(imgs, kl, use_gpu=use_gpu_filter)
imgs_filtered = imgs_hp - imgs_lp
print("Filtered images in %0.2fs" % (time.perf_counter() - tstart))
# ###################################################
# mask off region of each camera frame
# ###################################################
tstart = time.perf_counter()
if allowed_polygon is None:
mask = np.expand_dims(np.ones(imgs_filtered[0].shape, dtype=bool), axis=0)
else:
p = Path(allowed_polygon)
xx, yy = np.meshgrid(range(imgs.shape[2]), range(imgs.shape[1]))
rs = np.concatenate((xx.ravel()[:, None], yy.ravel()[:, None]), axis=1)
mask = p.contains_points(rs).reshape([1, imgs.shape[1], imgs.shape[2]])
print("Masked region in %0.2fs" % (time.perf_counter() - tstart))
# ###################################################
# identify candidate beads
# ###################################################
tstart = time.perf_counter()
footprint = get_skewed_footprint((dz_min, dxy_min, dxy_min), dc, stage_step, theta)
centers_guess_inds, amps = localize.find_peak_candidates(imgs_filtered * mask, footprint, abs_threshold, use_gpu_filter=use_gpu_filter)
# convert to xyz coordinates
xc = x[0, 0, centers_guess_inds[:, 2]]
yc = y[centers_guess_inds[:, 0], centers_guess_inds[:, 1], 0]
zc = z[0, centers_guess_inds[:, 1], 0] # z-position is determined by the y'-index in OPM image
centers_guess = np.concatenate((zc[:, None], yc[:, None], xc[:, None]), axis=1)
print("Found %d points above threshold in %0.2fs" % (len(centers_guess), time.perf_counter() - tstart))
# ###################################################
# average multiple points too close together. Necessary bc if naive threshold, may identify several points
# from same spot. Particularly important if spots have very different brightness levels.
# ###################################################
tstart = time.perf_counter()
inds = np.ravel_multi_index(centers_guess_inds.transpose(), imgs_filtered.shape)
weights = imgs_filtered.ravel()[inds]
centers_guess, inds_comb = localize.filter_nearby_peaks(centers_guess, dxy_min, dz_min, weights=weights, mode="average")
amps = amps[inds_comb]
print("Found %d points separated by dxy > %0.5g and dz > %0.5g in %0.1fs" %
(len(centers_guess), dxy_min, dz_min, time.perf_counter() - tstart))
# ###################################################
# prepare ROIs
# ###################################################
tstart = time.perf_counter()
# cut rois out
roi_size_skew = get_skewed_roi_size(roi_size, theta, dc, stage_step, ensure_odd=True)
rois, img_rois, xrois, yrois, zrois = zip(*[get_skewed_roi(c, imgs, x, y, z, roi_size_skew) for c in centers_guess])
rois = np.asarray(rois)
# exclude some regions of roi
roi_masks = [get_roi_mask(c, (np.inf, 0.5 * roi_size[1]), (zrois[ii], yrois[ii], xrois[ii])) for ii, c in enumerate(centers_guess)]
# mask regions
xrois, yrois, zrois, img_rois = zip(*[(xr[rm][None, :], yr[rm][None, :], zr[rm][None, :], ir[rm][None, :])
for xr, yr, zr, ir, rm in zip(xrois, yrois, zrois, img_rois, roi_masks)])
# extract guess values
bgs = np.array([np.mean(r) for r in img_rois])
sxs = np.array([np.sqrt(np.sum(ir * (xr - cg[2]) ** 2) / np.sum(ir)) for ir, xr, cg in zip(img_rois, xrois, centers_guess)])
sys = np.array([np.sqrt(np.sum(ir * (yr - cg[1]) ** 2) / np.sum(ir)) for ir, yr, cg in zip(img_rois, yrois, centers_guess)])
sxys = np.expand_dims(0.5 * (sxs + sys), axis=1)
szs = np.expand_dims(np.array([np.sqrt(np.sum(ir * (zr - cg[0]) ** 2) / np.sum(ir)) for ir, zr, cg in zip(img_rois, zrois, centers_guess)]), axis=1)
# get initial parameter guesses
init_params = np.concatenate((np.expand_dims(amps, axis=1),
centers_guess[:, 2][:, None],
centers_guess[:, 1][:, None],
centers_guess[:, 0][:, None],
sxys, szs,
np.expand_dims(bgs, axis=1)),
axis=1)
print("Prepared %d rois in %0.2fs" % (len(rois), time.perf_counter() - tstart))
# ###################################################
# localization
# ###################################################
if mode == "fit":
print("starting fitting for %d rois" % centers_guess.shape[0])
tstart = time.perf_counter()
fit_params, fit_states, chi_sqrs, niters, fit_t = localize.fit_gauss_rois(img_rois, (zrois, yrois, xrois),
init_params, estimator="LSE",
sf=sf, dc=dc, angles=(0., theta, 0.),
use_gpu=use_gpu_fit)
elif mode == "radial-symmetry":
print("starting radial-symmetry localization for %d rois" % len(centers_guess))
img_roi_masked = [rois.cut_roi(rois[ii], imgs_filtered) * roi_masks[ii] for ii in range(len(centers_guess))]
for ii in range(len(img_roi_masked)):
img_roi_masked[ii][img_roi_masked[ii] < 0] = 0
results = joblib.Parallel(n_jobs=-1, verbose=1, timeout=None)(
joblib.delayed(localize_radial_symm)(img_roi_masked[ii], (zrois[ii], yrois[ii], xrois[ii]),
mode="radial-symmetry")
for ii in range(len(centers_guess)))
fit_params = np.asarray(results)
else:
raise ValueError("'mode' must be 'fit' or 'radial-symmetry' but was '%s'" % mode)
tend = time.perf_counter()
print("Localization took %0.2fs" % (tend - tstart))
fit_results = np.concatenate((np.expand_dims(fit_states, axis=1),
np.expand_dims(chi_sqrs, axis=1),
np.expand_dims(niters, axis=1)), axis=1)
return rois, fit_params, init_params, fit_results, imgs_filtered, (z, y, x)
def filter_localizations(fit_params, init_params, coords, fit_dist_max_err, min_spot_sep,
sigma_bounds, amp_min=0, dist_boundary_min=(0, 0), mode="skewed"):
"""
Given a collection of fits, determine which fits are plausible localizations based on the fit parameters.
:param fit_params: nfits x 7 array
:param init_params: nfits x 7 array
:param coords: (z, y, x)
:param fit_dist_max_err = (dz_max, dxy_max)
:param min_spot_sep: (dz, dxy) assume points separated by less than this distance come from one spot
:param sigma_bounds: ((sz_min, sxy_min), (sz_max, sxy_max)) exclude fits with sigmas that fall outside
these ranges
:param amp_min: exclude fits with smaller amplitude
:param dist_boundary_min: (dz_min, dxy_min)
:return to_keep, conditions, condition_names, filter_settings:
"""
filter_settings = {"fit_dist_max_err": fit_dist_max_err, "min_spot_sep": min_spot_sep,
"sigma_bounds": sigma_bounds, "amp_min": amp_min, "dist_boundary_min": dist_boundary_min}
z, y, x = coords
centers_guess = np.concatenate((init_params[:, 3][:, None], init_params[:, 2][:, None], init_params[:, 1][:, None]), axis=1)
centers_fit = np.concatenate((fit_params[:, 3][:, None], fit_params[:, 2][:, None], fit_params[:, 1][:, None]), axis=1)
# ###################################################
# only keep points if size and position were reasonable
# ###################################################
dz_min, dxy_min = dist_boundary_min
if mode == "skewed":
in_bounds = point_in_trapezoid(centers_fit, x, y, z)
zmax, zmin = get_trapezoid_zbound(centers_fit[:, 1], coords)
far_from_boundary_z = np.logical_and(centers_fit[:, 0] > zmin + dz_min, centers_fit[:, 0] < zmax - dz_min)
ymax, ymin = get_trapezoid_ybound(centers_fit[:, 0], coords)
far_from_boundary_y = np.logical_and(centers_fit[:, 1] > ymin + dxy_min, centers_fit[:, 0] < ymax - dxy_min)
xmin = np.min(x)
xmax = np.max(x)
far_from_boundary_x = np.logical_and(centers_fit[:, 2] > xmin + dxy_min, centers_fit[:, 2] < xmax - dxy_min)
in_bounds = np.logical_and.reduce((in_bounds, far_from_boundary_x, far_from_boundary_y, far_from_boundary_z))
elif mode == "straight":
in_bounds = np.logical_and.reduce((fit_params[:, 1] >= x.min() + dxy_min,
fit_params[:, 1] <= x.max() - dxy_min,
fit_params[:, 2] >= y.min() + dxy_min,
fit_params[:, 2] <= y.max() - dxy_min,
fit_params[:, 3] >= z.min() + dz_min,
fit_params[:, 3] <= z.max() - dz_min))
else:
raise ValueError("mode must be 'skewed' or 'straight' but was '%s'" % mode)
# maximum distance between fit center and guess center
z_err_fit_max, xy_fit_err_max = fit_dist_max_err
center_close_to_guess_xy = np.sqrt((centers_guess[:, 2] - fit_params[:, 1])**2 +
(centers_guess[:, 1] - fit_params[:, 2])**2) <= xy_fit_err_max
center_close_to_guess_z = np.abs(centers_guess[:, 0] - fit_params[:, 3]) <= z_err_fit_max
# maximum/minimum sigmas AND combine all conditions
(sz_min, sxy_min), (sz_max, sxy_max) = sigma_bounds
conditions = np.stack((in_bounds, center_close_to_guess_xy, center_close_to_guess_z,
fit_params[:, 4] <= sxy_max, fit_params[:, 4] >= sxy_min,
fit_params[:, 5] <= sz_max, fit_params[:, 5] >= sz_min,
fit_params[:, 0] >= amp_min), axis=1)
condition_names = ["in_bounds", "center_close_to_guess_xy", "center_close_to_guess_z",
"xy_size_small_enough", "xy_size_big_enough", "z_size_small_enough",
"z_size_big_enough", "amp_ok"]
to_keep_temp = np.logical_and.reduce(conditions, axis=1)
# ###################################################
# check for unique points
# ###################################################
dz, dxy = min_spot_sep
if np.sum(to_keep_temp) > 0:
# only keep unique center if close enough
_, unique_inds = localize.filter_nearby_peaks(centers_fit[to_keep_temp], dxy, dz, mode="keep-one")
# unique mask for those in to_keep_temp
is_unique = np.zeros(np.sum(to_keep_temp), dtype=bool)
is_unique[unique_inds] = True
# get indices of non-unique points among all points
not_unique_inds_full = np.arange(len(to_keep_temp), dtype=int)[to_keep_temp][np.logical_not(is_unique)]
# get mask in full space
unique = np.ones(len(fit_params), dtype=bool)
unique[not_unique_inds_full] = False
else:
unique = np.ones(len(fit_params), dtype=bool)
conditions = np.concatenate((conditions, np.expand_dims(unique, axis=1)), axis=1)
condition_names += ["unique"]
to_keep = np.logical_and(to_keep_temp, unique)
return to_keep, conditions, condition_names, filter_settings
| 51,449
| 38.668466
| 152
|
py
|
OPM
|
OPM-master/pycromanager-control/run_opm_stagescan.py
|
#!/usr/bin/env python
'''
OPM stage control without fluidics.
Shepherd 04/21 - large-scale changes for new metadata and on-the-fly uploading to server for simultaneous reconstruction
'''
# imports
from pycromanager import Bridge, Acquisition
from pathlib import Path
import numpy as np
import time
import sys
import msvcrt
import pandas as pd
import subprocess
import PyDAQmx as daq
import ctypes as ct
from itertools import compress
import shutil
from threading import Thread
import data_io
import gc
def camera_hook_fn(event,bridge,event_queue):
core = bridge.get_core()
command='1SCAN'
core.set_property('TigerCommHub','SerialCommand',command)
return event
def main():
#------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------Begin setup of scan parameters--------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
# set up lasers
channel_labels = ["405", "488", "561", "635", "730"]
channel_states = [False, False, True, False, False] # true -> active, false -> inactive
channel_powers = [50, 10, 90, 100, 95] # (0 -> 100%)
do_ind = [0, 1, 2, 3, 4] # digital output line corresponding to each channel
# parse which channels are active
active_channel_indices = [ind for ind, st in zip(do_ind, channel_states) if st]
n_active_channels = len(active_channel_indices)
print("%d active channels: " % n_active_channels, end="")
for ind in active_channel_indices:
print("%s " % channel_labels[ind], end="")
print("")
# exposure time
exposure_ms = 50.0
# excess scan positions
excess_scan_positions = 10
# galvo voltage at neutral
galvo_neutral_volt = -0.15 # unit: volts
# scan axis limits. Use stage positions reported by MM
scan_axis_start_um = 8680. #unit: um
scan_axis_end_um = 8800. #unit: um
# tile axis limits. Use stage positions reported by MM
tile_axis_start_um = -3841.28 #unit: um
tile_axis_end_um = -3841.28 #unit: um
# height axis limits. Use stage positions reported by MM
height_axis_start_um = 13128.63 #unit: um
height_axis_end_um = 13128.63 #unit: um
# number of timepoints to execute
# TO DO: add in control for rate of experiment
timepoints = 1
# FOV parameters
# ONLY MODIFY IF NECESSARY
# ROI = [0, 1152, 2304, 512] #unit: pixels
# setup file name
save_directory=Path('D:/20210831')
save_name = 'stage_scan'
#------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------End setup of scan parameters----------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
# connect to Micromanager instance
bridge = Bridge()
core = bridge.get_core()
# turn off lasers
core.set_config('Laser','Off')
core.wait_for_config('Laser','Off')
# set camera to fast readout mode
core.set_config('Camera-Setup','ScanMode3')
core.wait_for_config('Camera-Setup','ScanMode3')
# set camera to START mode upon input trigger
core.set_config('Camera-TriggerType','START')
core.wait_for_config('Camera-TriggerType','START')
# set camera to positive input trigger
core.set_config('Camera-TriggerPolarity','POSITIVE')
core.wait_for_config('Camera-TriggerPolarity','POSITIVE')
# set camera to internal control
core.set_config('Camera-TriggerSource','INTERNAL')
core.wait_for_config('Camera-TriggerSource','INTERNAL')
# set camera to output positive triggers on all lines for exposure
core.set_property('OrcaFusionBT','OUTPUT TRIGGER KIND[0]','EXPOSURE')
core.set_property('OrcaFusionBT','OUTPUT TRIGGER KIND[1]','EXPOSURE')
core.set_property('OrcaFusionBT','OUTPUT TRIGGER KIND[2]','EXPOSURE')
core.set_property('OrcaFusionBT','OUTPUT TRIGGER POLARITY[0]','POSITIVE')
core.set_property('OrcaFusionBT','OUTPUT TRIGGER POLARITY[1]','POSITIVE')
core.set_property('OrcaFusionBT','OUTPUT TRIGGER POLARITY[2]','POSITIVE')
# change core timeout for long stage moves
core.set_property('Core','TimeoutMs',100000)
time.sleep(1)
# set exposure
core.set_exposure(exposure_ms)
# determine image size
core.snap_image()
y_pixels = core.get_image_height()
x_pixels = core.get_image_width()
# grab exposure
true_exposure = core.get_exposure()
# get actual framerate from micromanager properties
actual_readout_ms = true_exposure+float(core.get_property('OrcaFusionBT','ReadoutTime')) #unit: ms
# camera pixel size
pixel_size_um = .115 # unit: um
# scan axis setup
scan_axis_step_um = 0.4 # unit: um
scan_axis_step_mm = scan_axis_step_um / 1000. #unit: mm
scan_axis_start_mm = scan_axis_start_um / 1000. #unit: mm
scan_axis_end_mm = scan_axis_end_um / 1000. #unit: mm
scan_axis_range_um = np.abs(scan_axis_end_um-scan_axis_start_um) # unit: um
scan_axis_range_mm = scan_axis_range_um / 1000 #unit: mm
actual_exposure_s = actual_readout_ms / 1000. #unit: s
scan_axis_speed = np.round(scan_axis_step_mm / actual_exposure_s,4) #unit: mm/s
scan_axis_positions = np.rint(scan_axis_range_mm / scan_axis_step_mm).astype(int) #unit: number of positions
# tile axis setup
tile_axis_overlap=0.2 #unit: percentage
tile_axis_range_um = np.abs(tile_axis_end_um - tile_axis_start_um) #unit: um
tile_axis_range_mm = tile_axis_range_um / 1000 #unit: mm
tile_axis_ROI = x_pixels*pixel_size_um #unit: um
tile_axis_step_um = np.round((tile_axis_ROI) * (1-tile_axis_overlap),2) #unit: um
tile_axis_step_mm = tile_axis_step_um / 1000 #unit: mm
tile_axis_positions = np.rint(tile_axis_range_mm / tile_axis_step_mm).astype(int)+1 #unit: number of positions
# if tile_axis_positions rounded to zero, make sure we acquire at least one position
if tile_axis_positions == 0:
tile_axis_positions=1
# height axis setup
height_axis_overlap=0.2 #unit: percentage
height_axis_range_um = np.abs(height_axis_end_um-height_axis_start_um) #unit: um
height_axis_range_mm = height_axis_range_um / 1000 #unit: mm
height_axis_ROI = y_pixels*pixel_size_um*np.sin(30.*np.pi/180.) #unit: um
height_axis_step_um = np.round((height_axis_ROI)*(1-height_axis_overlap),2) #unit: um
height_axis_step_mm = height_axis_step_um / 1000 #unit: mm
height_axis_positions = np.rint(height_axis_range_mm / height_axis_step_mm).astype(int)+1 #unit: number of positions
# if height_axis_positions rounded to zero, make sure we acquire at least one position
if height_axis_positions==0:
height_axis_positions=1
# get handle to xy and z stages
xy_stage = core.get_xy_stage_device()
z_stage = core.get_focus_device()
# galvo voltage at neutral
galvo_neutral_volt = -.15 # unit: volts
# set the galvo to the neutral position if it is not already
try:
taskAO_first = daq.Task()
taskAO_first.CreateAOVoltageChan("/Dev1/ao0","",-4.0,4.0,daq.DAQmx_Val_Volts,None)
taskAO_first.WriteAnalogScalarF64(True, -1, galvo_neutral_volt, None)
taskAO_first.StopTask()
taskAO_first.ClearTask()
except:
print("DAQmx Error %s"%err)
# Setup Tiger controller to pass signal when the scan stage cross the start position to the PLC
plcName = 'PLogic:E:36'
propPosition = 'PointerPosition'
propCellConfig = 'EditCellConfig'
#addrOutputBNC3 = 35 # BNC3 on the PLC front panel
addrOutputBNC1 = 33 # BNC1 on the PLC front panel
addrStageSync = 46 # TTL5 on Tiger backplane = stage sync signal
# connect stage sync signal to BNC output
core.set_property(plcName, propPosition, addrOutputBNC1)
core.set_property(plcName, propCellConfig, addrStageSync)
# turn on 'transmit repeated commands' for Tiger
core.set_property('TigerCommHub','OnlySendSerialCommandOnChange','No')
# set tile axis speed for all moves
command = 'SPEED Y=.1'
core.set_property('TigerCommHub','SerialCommand',command)
# check to make sure Tiger is not busy
ready='B'
while(ready!='N'):
command = 'STATUS'
core.set_property('TigerCommHub','SerialCommand',command)
ready = core.get_property('TigerCommHub','SerialResponse')
time.sleep(.500)
# set scan axis speed for large move to initial position
command = 'SPEED X=.1'
core.set_property('TigerCommHub','SerialCommand',command)
# check to make sure Tiger is not busy
ready='B'
while(ready!='N'):
command = 'STATUS'
core.set_property('TigerCommHub','SerialCommand',command)
ready = core.get_property('TigerCommHub','SerialResponse')
time.sleep(.500)
# turn off 'transmit repeated commands' for Tiger
core.set_property('TigerCommHub','OnlySendSerialCommandOnChange','Yes')
# move scan scan stage to initial position
core.set_xy_position(scan_axis_start_um,tile_axis_start_um)
core.wait_for_device(xy_stage)
core.set_position(height_axis_start_um)
core.wait_for_device(z_stage)
# turn on 'transmit repeated commands' for Tiger
core.set_property('TigerCommHub','OnlySendSerialCommandOnChange','No')
# set scan axis speed to correct speed for continuous stage scan
# expects mm/s
command = 'SPEED X='+str(scan_axis_speed)
core.set_property('TigerCommHub','SerialCommand',command)
# check to make sure Tiger is not busy
ready='B'
while(ready!='N'):
command = 'STATUS'
core.set_property('TigerCommHub','SerialCommand',command)
ready = core.get_property('TigerCommHub','SerialResponse')
time.sleep(.500)
# set scan axis to true 1D scan with no backlash
command = '1SCAN X? Y=0 Z=9 F=0'
core.set_property('TigerCommHub','SerialCommand',command)
# check to make sure Tiger is not busy
ready='B'
while(ready!='N'):
command = 'STATUS'
core.set_property('TigerCommHub','SerialCommand',command)
ready = core.get_property('TigerCommHub','SerialResponse')
time.sleep(.500)
# set range and return speed (5% of max) for scan axis
# expects mm
command = '1SCANR X='+str(scan_axis_start_mm)+' Y='+str(scan_axis_end_mm)+' R=10'
core.set_property('TigerCommHub','SerialCommand',command)
# check to make sure Tiger is not busy
ready='B'
while(ready!='N'):
command = 'STATUS'
core.set_property('TigerCommHub','SerialCommand',command)
ready = core.get_property('TigerCommHub','SerialResponse')
time.sleep(.500)
# turn off 'transmit repeated commands' for Tiger
core.set_property('TigerCommHub','OnlySendSerialCommandOnChange','Yes')
# set all laser to external triggering
core.set_config('Modulation-405','External-Digital')
core.wait_for_config('Modulation-405','External-Digital')
core.set_config('Modulation-488','External-Digital')
core.wait_for_config('Modulation-488','External-Digital')
core.set_config('Modulation-561','External-Digital')
core.wait_for_config('Modulation-561','External-Digital')
core.set_config('Modulation-637','External-Digital')
core.wait_for_config('Modulation-637','External-Digital')
core.set_config('Modulation-730','External-Digital')
core.wait_for_config('Modulation-730','External-Digital')
# turn all lasers on
core.set_config('Laser','AllOn')
core.wait_for_config('Laser','AllOn')
# set lasers to user defined power
core.set_property('Coherent-Scientific Remote','Laser 405-100C - PowerSetpoint (%)',channel_powers[0])
core.set_property('Coherent-Scientific Remote','Laser 488-150C - PowerSetpoint (%)',channel_powers[1])
core.set_property('Coherent-Scientific Remote','Laser OBIS LS 561-150 - PowerSetpoint (%)',channel_powers[2])
core.set_property('Coherent-Scientific Remote','Laser 637-140C - PowerSetpoint (%)',channel_powers[3])
core.set_property('Coherent-Scientific Remote','Laser 730-30C - PowerSetpoint (%)',channel_powers[4])
# setup DAQ
samples_per_ch = 2
DAQ_sample_rate_Hz = 10000
num_DI_channels = 8
# set the galvo to neutral
taskAO_last = daq.Task()
taskAO_last.CreateAOVoltageChan("/Dev1/ao0","",-4.0,4.0,daq.DAQmx_Val_Volts,None)
taskAO_last.WriteAnalogScalarF64(True, -1, galvo_neutral_volt, None)
taskAO_last.StopTask()
taskAO_last.ClearTask()
# output experiment info
print('Number of X positions: '+str(scan_axis_positions))
print('Number of Y tiles: '+str(tile_axis_positions))
print('Number of Z slabs: '+str(height_axis_positions))
print('Number of channels: '+str(n_active_channels))
# flags for metadata and processing
setup_processing=True
setup_metadata=True
# create events to execute scan
events = []
for x in range(scan_axis_positions+excess_scan_positions):
evt = { 'axes': {'z': x}}
events.append(evt)
for t_idx in range(timepoints):
for y_idx in range(tile_axis_positions):
# calculate tile axis position
tile_position_um = tile_axis_start_um+(tile_axis_step_um*y_idx)
# move XY stage to new tile axis position
core.set_xy_position(scan_axis_start_um,tile_position_um)
core.wait_for_device(xy_stage)
for z_idx in range(height_axis_positions):
# calculate height axis position
height_position_um = height_axis_start_um+(height_axis_step_um*z_idx)
# move Z stage to new height axis position
core.set_position(height_position_um)
core.wait_for_device(z_stage)
for ch_idx in active_channel_indices:
# create DAQ pattern for laser strobing controlled via rolling shutter
dataDO = np.zeros((samples_per_ch,num_DI_channels),dtype=np.uint8)
dataDO[0,ch_idx]=1
dataDO[1,ch_idx]=0
#print(dataDO)
# update save_name with current tile information
save_name_tyzc = save_name +'_t'+str(t_idx).zfill(4)+'_y'+str(y_idx).zfill(4)+'_z'+str(z_idx).zfill(4)+'_ch'+str(ch_idx).zfill(4)
# turn on 'transmit repeated commands' for Tiger
core.set_property('TigerCommHub','OnlySendSerialCommandOnChange','No')
# check to make sure Tiger is not busy
ready='B'
while(ready!='N'):
command = 'STATUS'
core.set_property('TigerCommHub','SerialCommand',command)
ready = core.get_property('TigerCommHub','SerialResponse')
time.sleep(.500)
# turn off 'transmit repeated commands' for Tiger
core.set_property('TigerCommHub','OnlySendSerialCommandOnChange','Yes')
# save actual stage positions
xy_pos = core.get_xy_stage_position()
stage_x = xy_pos.x
stage_y = xy_pos.y
stage_z = core.get_position()
current_stage_data = [{'stage_x': stage_x, 'stage_y': stage_y, 'stage_z': stage_z}]
df_current_stage = pd.DataFrame(current_stage_data)
# setup DAQ for laser strobing
try:
# ----- DIGITAL input -------
taskDI = daq.Task()
taskDI.CreateDIChan("/Dev1/PFI0","",daq.DAQmx_Val_ChanForAllLines)
## Configure change detection timing (from wave generator)
taskDI.CfgInputBuffer(0) # must be enforced for change-detection timing, i.e no buffer
taskDI.CfgChangeDetectionTiming("/Dev1/PFI0","/Dev1/PFI0",daq.DAQmx_Val_ContSamps,0)
## Set where the starting trigger
taskDI.CfgDigEdgeStartTrig("/Dev1/PFI0",daq.DAQmx_Val_Rising)
## Export DI signal to unused PFI pins, for clock and start
taskDI.ExportSignal(daq.DAQmx_Val_ChangeDetectionEvent, "/Dev1/PFI2")
taskDI.ExportSignal(daq.DAQmx_Val_StartTrigger,"/Dev1/PFI1")
# ----- DIGITAL output ------
taskDO = daq.Task()
taskDO.CreateDOChan("/Dev1/port0/line0:7","",daq.DAQmx_Val_ChanForAllLines)
## Configure timing (from DI task)
taskDO.CfgSampClkTiming("/Dev1/PFI2",DAQ_sample_rate_Hz,daq.DAQmx_Val_Rising,daq.DAQmx_Val_ContSamps,samples_per_ch)
## Write the output waveform
samples_per_ch_ct_digital = ct.c_int32()
taskDO.WriteDigitalLines(samples_per_ch,False,10.0,daq.DAQmx_Val_GroupByChannel,dataDO,ct.byref(samples_per_ch_ct_digital),None)
## ------ Start digital input and output tasks ----------
taskDO.StartTask()
taskDI.StartTask()
except daq.DAQError as err:
print("DAQmx Error %s"%err)
# set camera to external control
# DCAM sets the camera back to INTERNAL mode after each acquisition
core.set_config('Camera-TriggerSource','EXTERNAL')
core.wait_for_config('Camera-TriggerSource','EXTERNAL')
# verify that camera actually switched back to external trigger mode
trigger_state = core.get_property('OrcaFusionBT','TRIGGER SOURCE')
# if not in external control, keep trying until camera changes settings
while not(trigger_state =='EXTERNAL'):
time.sleep(2.0)
core.set_config('Camera-TriggerSource','EXTERNAL')
core.wait_for_config('Camera-TriggerSource','EXTERNAL')
trigger_state = core.get_property('OrcaFusionBT','TRIGGER SOURCE')
print('T: '+str(t_idx)+' Y: '+str(y_idx)+' Z: '+str(z_idx)+' C: '+str(ch_idx))
# run acquisition for this tyzc combination
with Acquisition(directory=save_directory, name=save_name_tyzc,
post_camera_hook_fn=camera_hook_fn, show_display=False, max_multi_res_index=0,
saving_queue_size=5000) as acq:
acq.acquire(events)
# clean up acquisition so that AcqEngJ releases directory.
# NOTE: This currently does not work.
acq = None
acq_deleted = False
while not(acq_deleted):
try:
del acq
except:
time.sleep(0.1)
acq_deleted = False
else:
gc.collect()
acq_deleted = True
# stop DAQ and make sure it is at zero
try:
## Stop and clear both tasks
taskDI.StopTask()
taskDO.StopTask()
taskDI.ClearTask()
taskDO.ClearTask()
except daq.DAQError as err:
print("DAQmx Error %s"%err)
# save experimental info after first tile.
# we do it this way so that Pycromanager can manage the directories.
if (setup_metadata):
# save stage scan parameters
scan_param_data = [{'root_name': str(save_name),
'scan_type': str('stage'),
'theta': float(30.0),
'scan_step': float(scan_axis_step_um*1000.),
'pixel_size': float(pixel_size_um*1000.),
'num_t': int(timepoints),
'num_y': int(tile_axis_positions),
'num_z': int(height_axis_positions),
'num_ch': int(n_active_channels),
'scan_axis_positions': int(scan_axis_positions),
'excess_scan_positions': int(excess_scan_positions),
'y_pixels': int(y_pixels),
'x_pixels': int(x_pixels),
'405_active': bool(channel_states[0]),
'488_active': bool(channel_states[1]),
'561_active': bool(channel_states[2]),
'635_active': bool(channel_states[3]),
'730_active': bool(channel_states[4])}]
# df_stage_scan_params = pd.DataFrame(scan_param_data)
# save_name_stage_params = save_directory / 'scan_metadata.csv'
# df_stage_scan_params.to_csv(save_name_stage_params)
data_io.write_metadata(scan_param_data[0], save_directory / Path('scan_metadata.csv'))
setup_metadata=False
# save stage scan positions after each tile
save_name_stage_positions = Path('t'+str(t_idx).zfill(4)+'_y'+str(y_idx).zfill(4)+'_z'+str(z_idx).zfill(4)+'_ch'+str(ch_idx).zfill(4)+'_stage_positions.csv')
save_name_stage_positions = save_directory / save_name_stage_positions
# todo: use data_io instead
df_current_stage.to_csv(save_name_stage_positions)
# turn on 'transmit repeated commands' for Tiger
core.set_property('TigerCommHub','OnlySendSerialCommandOnChange','No')
# check to make sure Tiger is not busy
ready='B'
while(ready!='N'):
command = 'STATUS'
core.set_property('TigerCommHub','SerialCommand',command)
ready = core.get_property('TigerCommHub','SerialResponse')
time.sleep(.500)
# turn off 'transmit repeated commands' for Tiger
core.set_property('TigerCommHub','OnlySendSerialCommandOnChange','Yes')
'''
# if first tile, make parent directory on NAS and start reconstruction script on the server
if setup_processing:
# make home directory on NAS
save_directory_path = Path(save_directory)
remote_directory = Path('y:/') / Path(save_directory_path.parts[1])
cmd='mkdir ' + str(remote_directory)
status_mkdir = subprocess.run(cmd, shell=True)
# copy full experiment metadata to NAS
src= Path(save_directory) / Path('scan_metadata.csv')
dst= Path(remote_directory) / Path('scan_metadata.csv')
Thread(target=shutil.copy, args=[str(src), str(dst)]).start()
setup_processing=False
# copy current tyzc metadata to NAS
save_directory_path = Path(save_directory)
remote_directory = Path('y:/') / Path(save_directory_path.parts[1])
src= Path(save_directory) / Path(save_name_stage_positions.parts[2])
dst= Path(remote_directory) / Path(save_name_stage_positions.parts[2])
Thread(target=shutil.copy, args=[str(src), str(dst)]).start()
# copy current tyzc data to NAS
save_directory_path = Path(save_directory)
remote_directory = Path('y:/') / Path(save_directory_path.parts[1])
src= Path(save_directory) / Path(save_name_tyzc+ '_1')
dst= Path(remote_directory) / Path(save_name_tyzc+ '_1')
Thread(target=shutil.copytree, args=[str(src), str(dst)]).start()
'''
# set lasers to zero power
channel_powers = [0.,0.,0.,0.,0.]
core.set_property('Coherent-Scientific Remote','Laser 405-100C - PowerSetpoint (%)',channel_powers[0])
core.set_property('Coherent-Scientific Remote','Laser 488-150C - PowerSetpoint (%)',channel_powers[1])
core.set_property('Coherent-Scientific Remote','Laser OBIS LS 561-150 - PowerSetpoint (%)',channel_powers[2])
core.set_property('Coherent-Scientific Remote','Laser 637-140C - PowerSetpoint (%)',channel_powers[3])
core.set_property('Coherent-Scientific Remote','Laser 730-30C - PowerSetpoint (%)',channel_powers[4])
# turn all lasers off
core.set_config('Laser','Off')
core.wait_for_config('Laser','Off')
# set all lasers back to software control
core.set_config('Modulation-405','CW (constant power)')
core.wait_for_config('Modulation-405','CW (constant power)')
core.set_config('Modulation-488','CW (constant power)')
core.wait_for_config('Modulation-488','CW (constant power)')
core.set_config('Modulation-561','CW (constant power)')
core.wait_for_config('Modulation-561','CW (constant power)')
core.set_config('Modulation-637','CW (constant power)')
core.wait_for_config('Modulation-637','CW (constant power)')
core.set_config('Modulation-730','CW (constant power)')
core.wait_for_config('Modulation-730','CW (constant power)')
# set camera to internal control
core.set_config('Camera-TriggerSource','INTERNAL')
core.wait_for_config('Camera-TriggerSource','INTERNAL')
bridge.close()
#-----------------------------------------------------------------------------
if __name__ == "__main__":
main()
| 26,737
| 44.318644
| 177
|
py
|
OPM
|
OPM-master/pycromanager-control/run_opm_galvoscan.py
|
#!/usr/bin/env python
'''
OPM galvo scan using Pycromanager.
D. Shepherd 09/21 - bring metadata in line with new reconstruction code. Attempt timelapse with pause using event structure.
D. Shepherd 04/21 - streamline code for fast acquisition and immediate upload to server
P. Brown 03/21 - multiline digital and analog NI DAQ control using camera as master
D. Shepherd 01/21 - initial pycromanager work, ported from stage control code
'''
# imports
from pycromanager import Bridge, Acquisition
from pathlib import Path
import numpy as np
import PyDAQmx as daq
import ctypes as ct
import subprocess
import shutil
from threading import Thread
import data_io
def main():
#------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------Begin setup of scan parameters--------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
# set up lasers
channel_labels = ["405", "488", "561", "635", "730"]
channel_states = [False, False, True, False, False] # true -> active, false -> inactive
channel_powers = [30, 30, 100, 100, 0] # (0 -> 100%)
do_ind = [0, 1, 2, 3, 4] # digital output line corresponding to each channel
# parse which channels are active
active_channel_indices = [ind for ind, st in zip(do_ind, channel_states) if st]
n_active_channels = len(active_channel_indices)
print("%d active channels: " % n_active_channels, end="")
for ind in active_channel_indices:
print("%s " % channel_labels[ind], end="")
print("")
# exposure time
exposure_ms = 2.0 #unit: ms
# scan axis range
scan_axis_range_um = 20.0 # unit: microns
# galvo voltage at neutral
#galvo_neutral_volt = 0 # unit: volts
galvo_neutral_volt = -.150
scan_axis_step_um = 0.4 # unit: um
# timepoints
timepoints = 400
# timepoint interval (s)
timing_interval = 0
# setup file name
save_directory=Path('D:/20211204')
save_name = 'fla-suc40'
# automatically transfer files to NAS at end of dataset
transfer_files = False
# display data
display_flag = False
#------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------End setup of scan parameters----------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------
with Bridge() as bridge:
core = bridge.get_core()
# give camera time to change modes if necessary
core.set_config('Camera-Setup','ScanMode3')
core.wait_for_config('Camera-Setup','ScanMode3')
# set camera to internal trigger
core.set_config('Camera-TriggerSource','INTERNAL')
core.wait_for_config('Camera-TriggerSource','INTERNAL')
# set camera to internal trigger
# give camera time to change modes if necessary
core.set_property('OrcaFusionBT','OUTPUT TRIGGER KIND[0]','EXPOSURE')
core.set_property('OrcaFusionBT','OUTPUT TRIGGER KIND[1]','EXPOSURE')
core.set_property('OrcaFusionBT','OUTPUT TRIGGER KIND[2]','EXPOSURE')
core.set_property('OrcaFusionBT','OUTPUT TRIGGER POLARITY[0]','POSITIVE')
core.set_property('OrcaFusionBT','OUTPUT TRIGGER POLARITY[1]','POSITIVE')
core.set_property('OrcaFusionBT','OUTPUT TRIGGER POLARITY[2]','POSITIVE')
# set exposure time
core.set_exposure(exposure_ms)
# determine image size
core.snap_image()
y_pixels = core.get_image_height()
x_pixels = core.get_image_width()
# turn all lasers on
core.set_config('Laser','Off')
core.wait_for_config('Laser','Off')
# set all laser to external triggering
core.set_config('Modulation-405','External-Digital')
core.wait_for_config('Modulation-405','External-Digital')
core.set_config('Modulation-488','External-Digital')
core.wait_for_config('Modulation-488','External-Digital')
core.set_config('Modulation-561','External-Digital')
core.wait_for_config('Modulation-561','External-Digital')
core.set_config('Modulation-637','External-Digital')
core.wait_for_config('Modulation-637','External-Digital')
core.set_config('Modulation-730','External-Digital')
core.wait_for_config('Modulation-730','External-Digital')
# turn all lasers on
core.set_config('Laser','AllOn')
core.wait_for_config('Laser','AllOn')
core.set_property('Coherent-Scientific Remote','Laser 405-100C - PowerSetpoint (%)',channel_powers[0])
core.set_property('Coherent-Scientific Remote','Laser 488-150C - PowerSetpoint (%)',channel_powers[1])
core.set_property('Coherent-Scientific Remote','Laser OBIS LS 561-150 - PowerSetpoint (%)',channel_powers[2])
core.set_property('Coherent-Scientific Remote','Laser 637-140C - PowerSetpoint (%)',channel_powers[3])
core.set_property('Coherent-Scientific Remote','Laser 730-30C - PowerSetpoint (%)',channel_powers[4])
# camera pixel size
pixel_size_um = .115 # unit: um
# galvo scan setup
scan_axis_calibration = 0.043 # unit: V / um
min_volt = -(scan_axis_range_um * scan_axis_calibration / 2.) + galvo_neutral_volt # unit: volts
scan_axis_step_volts = scan_axis_step_um * scan_axis_calibration # unit: V
scan_axis_range_volts = scan_axis_range_um * scan_axis_calibration # unit: V
scan_steps = np.rint(scan_axis_range_volts / scan_axis_step_volts).astype(np.int16) # galvo steps
# handle case where no scan steps
if scan_steps == 0:
scan_steps = 1
# output experiment info
print("Scan axis range: %.1f um = %0.3fV, Scan axis step: %.1f nm = %0.3fV , Number of galvo positions: %d" %
(scan_axis_range_um, scan_axis_range_volts, scan_axis_step_um * 1000, scan_axis_step_volts, scan_steps))
print('Galvo neutral (Volt): ' + str(galvo_neutral_volt)+', Min voltage (volt): '+str(min_volt))
print('Time points: ' + str(timepoints))
# create events to execute scan
events = []
# Changes to event structure motivated by Henry's notes that pycromanager struggles to read "non-standard" axes.
# https://github.com/micro-manager/pycro-manager/issues/220
for t in range(timepoints):
for x in range(scan_steps):
ch_idx = 0
for c in range(len(do_ind)):
if channel_states[c]:
if timing_interval == 0:
evt = { 'axes': {'t': t, 'z': x, 'c': ch_idx }}
else:
evt = { 'axes': {'t': t, 'z': x, 'c': ch_idx },
'min_start_time': t*timing_interval}
ch_idx = ch_idx+1
events.append(evt)
print("Generated %d events" % len(events))
# setup DAQ
nvoltage_steps = scan_steps
# 2 time steps per frame, except for first frame plus one final frame to reset voltage
#samples_per_ch = (nvoltage_steps * 2 - 1) + 1
samples_per_ch = (nvoltage_steps * 2 * n_active_channels - 1) + 1
DAQ_sample_rate_Hz = 10000
#retriggerable = True
num_DI_channels = 8
# Generate values for DO
dataDO = np.zeros((samples_per_ch, num_DI_channels), dtype=np.uint8)
for ii, ind in enumerate(active_channel_indices):
dataDO[2*ii::2*n_active_channels, ind] = 1
dataDO[-1, :] = 0
# generate voltage steps
max_volt = min_volt + scan_axis_range_volts # 2
voltage_values = np.linspace(min_volt, max_volt, nvoltage_steps)
# Generate values for AO
waveform = np.zeros(samples_per_ch)
# one less voltage value for first frame
waveform[0:2*n_active_channels - 1] = voltage_values[0]
if len(voltage_values) > 1:
# (2 * # active channels) voltage values for all other frames
waveform[2*n_active_channels - 1:-1] = np.kron(voltage_values[1:], np.ones(2 * n_active_channels))
# set back to initial value at end
waveform[-1] = voltage_values[0]
#def read_di_hook(event):
try:
# ----- DIGITAL input -------
taskDI = daq.Task()
taskDI.CreateDIChan("/Dev1/PFI0", "", daq.DAQmx_Val_ChanForAllLines)
## Configure change detectin timing (from wave generator)
taskDI.CfgInputBuffer(0) # must be enforced for change-detection timing, i.e no buffer
taskDI.CfgChangeDetectionTiming("/Dev1/PFI0", "/Dev1/PFI0", daq.DAQmx_Val_ContSamps, 0)
## Set where the starting trigger
taskDI.CfgDigEdgeStartTrig("/Dev1/PFI0", daq.DAQmx_Val_Rising)
## Export DI signal to unused PFI pins, for clock and start
taskDI.ExportSignal(daq.DAQmx_Val_ChangeDetectionEvent, "/Dev1/PFI2")
taskDI.ExportSignal(daq.DAQmx_Val_StartTrigger, "/Dev1/PFI1")
# ----- DIGITAL output ------
taskDO = daq.Task()
# TO DO: Write each laser line separately!
taskDO.CreateDOChan("/Dev1/port0/line0:7", "", daq.DAQmx_Val_ChanForAllLines)
## Configure timing (from DI task)
taskDO.CfgSampClkTiming("/Dev1/PFI2", DAQ_sample_rate_Hz, daq.DAQmx_Val_Rising, daq.DAQmx_Val_ContSamps, samples_per_ch)
## Write the output waveform
samples_per_ch_ct_digital = ct.c_int32()
taskDO.WriteDigitalLines(samples_per_ch, False, 10.0, daq.DAQmx_Val_GroupByChannel, dataDO, ct.byref(samples_per_ch_ct_digital), None)
# ------- ANALOG output -----------
# first, set the galvo to the initial point if it is not already
taskAO_first = daq.Task()
taskAO_first.CreateAOVoltageChan("/Dev1/ao0", "", -6.0, 6.0, daq.DAQmx_Val_Volts, None)
taskAO_first.WriteAnalogScalarF64(True, -1, waveform[0], None)
taskAO_first.StopTask()
taskAO_first.ClearTask()
# now set up the task to ramp the galvo
taskAO = daq.Task()
taskAO.CreateAOVoltageChan("/Dev1/ao0", "", -6.0, 6.0, daq.DAQmx_Val_Volts, None)
## Configure timing (from DI task)
taskAO.CfgSampClkTiming("/Dev1/PFI2", DAQ_sample_rate_Hz, daq.DAQmx_Val_Rising, daq.DAQmx_Val_ContSamps, samples_per_ch)
## Write the output waveform
samples_per_ch_ct = ct.c_int32()
taskAO.WriteAnalogF64(samples_per_ch, False, 10.0, daq.DAQmx_Val_GroupByScanNumber, waveform, ct.byref(samples_per_ch_ct), None)
## ------ Start both tasks ----------
taskAO.StartTask()
taskDO.StartTask()
taskDI.StartTask()
except daq.DAQError as err:
print("DAQmx Error %s"%err)
# run acquisition
with Acquisition(directory=save_directory, name=save_name, show_display=display_flag, max_multi_res_index=0, saving_queue_size=5000) as acq:
acq.acquire(events)
acq = None
# stop DAQ
try:
## Stop and clear both tasks
taskDI.StopTask()
taskDO.StopTask()
taskAO.StopTask()
taskDI.ClearTask()
taskAO.ClearTask()
taskDO.ClearTask()
except daq.DAQError as err:
print("DAQmx Error %s"%err)
# save galvo scan parameters
scan_param_data = [{'root_name': str(save_name),
'scan_type': 'galvo',
'theta': 30.0,
'exposure_ms': exposure_ms,
'scan_step': scan_axis_step_um*1000.,
'pixel_size': pixel_size_um*1000.,
'galvo_scan_range_um': scan_axis_range_um,
'galvo_volts_per_um': scan_axis_calibration,
'num_t': int(timepoints),
'num_y': 1, # might need to change this eventually
'num_z': 1, # might need to change this eventually
'num_ch': int(n_active_channels),
'scan_axis_positions': int(scan_steps),
'y_pixels': y_pixels,
'x_pixels': x_pixels,
'405_active': channel_states[0],
'488_active': channel_states[1],
'561_active': channel_states[2],
'635_active': channel_states[3],
'730_active': channel_states[4]}]
data_io.write_metadata(scan_param_data[0], save_directory / 'scan_metadata.csv')
with Bridge() as bridge:
core = bridge.get_core()
# turn all lasers off
core.set_config('Laser','Off')
core.wait_for_config('Laser','Off')
# set all lasers back to software control
core.set_config('Modulation-405','CW (constant power)')
core.wait_for_config('Modulation-405','CW (constant power)')
core.set_config('Modulation-488','CW (constant power)')
core.wait_for_config('Modulation-488','CW (constant power)')
core.set_config('Modulation-561','CW (constant power)')
core.wait_for_config('Modulation-561','CW (constant power)')
core.set_config('Modulation-637','CW (constant power)')
core.wait_for_config('Modulation-637','CW (constant power)')
core.set_config('Modulation-730','CW (constant power)')
core.wait_for_config('Modulation-730','CW (constant power)')
# set all laser to zero power
channel_powers=[0,0,0,0,0]
core.set_property('Coherent-Scientific Remote','Laser 405-100C - PowerSetpoint (%)',channel_powers[0])
core.set_property('Coherent-Scientific Remote','Laser 488-150C - PowerSetpoint (%)',channel_powers[1])
core.set_property('Coherent-Scientific Remote','Laser OBIS LS 561-150 - PowerSetpoint (%)',channel_powers[2])
core.set_property('Coherent-Scientific Remote','Laser 637-140C - PowerSetpoint (%)',channel_powers[3])
core.set_property('Coherent-Scientific Remote','Laser 730-30C - PowerSetpoint (%)',channel_powers[4])
# put the galvo back to neutral
# first, set the galvo to the initial point if it is not already
taskAO_last = daq.Task()
taskAO_last.CreateAOVoltageChan("/Dev1/ao0","",-6.0,6.0,daq.DAQmx_Val_Volts,None)
taskAO_last.WriteAnalogScalarF64(True, -1, galvo_neutral_volt, None)
taskAO_last.StopTask()
taskAO_last.ClearTask()
if transfer_files:
# make parent directory on NAS and start reconstruction script on the server
# make home directory on NAS
save_directory_path = Path(save_directory)
remote_directory = Path('y:/') / Path(save_directory_path.parts[1])
cmd='mkdir ' + str(remote_directory)
status_mkdir = subprocess.run(cmd, shell=True)
# copy full experiment metadata to NAS
src= Path(save_directory) / Path('scan_metadata.csv')
dst= Path(remote_directory) / Path('scan_metadata.csv')
Thread(target=shutil.copy, args=[str(src), str(dst)]).start()
# copy data to NAS
save_directory_path = Path(save_directory)
remote_directory = Path('y:/') / Path(save_directory_path.parts[1])
src= Path(save_directory) / Path(save_name+ '_1')
dst= Path(remote_directory) / Path(save_name+ '_1')
Thread(target=shutil.copytree, args=[str(src), str(dst)]).start()
# run
if __name__ == "__main__":
main()
| 15,693
| 42.715877
| 144
|
py
|
OPM
|
OPM-master/pycromanager-control/data_io.py
|
#!/usr/bin/env python
import re
from npy2bdv import BdvEditor
import pandas as pd
import numpy as np
def read_metadata(fname):
"""
Read data from csv file consisting of one line giving titles, and the other giving values. Return as dictionary
:param fname:
:return metadata:
"""
scan_data_raw_lines = []
with open(fname, "r") as f:
for line in f:
scan_data_raw_lines.append(line.replace("\n", ""))
titles = scan_data_raw_lines[0].split(",")
# convert values to appropriate datatypes
vals = scan_data_raw_lines[1].split(",")
for ii in range(len(vals)):
if re.fullmatch("\d+", vals[ii]):
vals[ii] = int(vals[ii])
elif re.fullmatch("\d*.\d+", vals[ii]):
vals[ii] = float(vals[ii])
elif vals[ii].lower() == "False".lower():
vals[ii] = False
elif vals[ii].lower() == "True".lower():
vals[ii] = True
else:
# otherwise, leave as string
pass
# convert to dictionary
metadata = {}
for t, v in zip(titles, vals):
metadata[t] = v
return metadata
def write_metadata(data_dict, save_path):
"""
:param data_dict: dictionary of metadata entries
:param save_path:
:return:
"""
pd.DataFrame([data_dict]).to_csv(save_path)
def return_data_numpy(dataset, time_axis, channel_axis, num_images, excess_images, y_pixels,x_pixels):
"""
:param dataset: pycromanager dataset object
:param channel_axis: integer channel index
:param time_axis: integer time_axis
:param num_images: integer for number of images to return
:param y_pixels: integer for y pixel size
:param x_pixels: integer for x pixel size
:return data_numpy: 3D numpy array of requested data
"""
data_numpy = np.empty([(num_images-excess_images),y_pixels,x_pixels]).astype(np.uint16)
j = 0
for i in range(excess_images,num_images):
if (time_axis is None):
if (channel_axis is None):
data_numpy[j,:,:] = dataset.read_image(z=i)
else:
data_numpy[j,:,:] = dataset.read_image(z=i, c=channel_axis)
else:
if (channel_axis is None):
data_numpy[j,:,:] = dataset.read_image(z=i, t=time_axis)
else:
data_numpy[j,:,:] = dataset.read_image(z=i, t=time_axis, c=channel_axis)
j = j + 1
return data_numpy
def return_data_numpy_widefield(dataset, channel_axis, ch_BDV_idx, num_z, y_pixels,x_pixels):
"""
:param dataset: pycromanager dataset object
:param channel_axis: integer channel index
:param time_axis: integer time_axis
:param num_images: integer for number of images to return
:param y_pixels: integer for y pixel size
:param x_pixels: integer for x pixel size
:return data_numpy: 3D numpy array of requested data
"""
data_numpy = np.empty([num_z,y_pixels,x_pixels]).astype(np.uint16)
for i in range(num_z):
if (channel_axis is None):
data_numpy[i,:,:] = dataset.read_image(z=i)
else:
data_numpy[i,:,:] = dataset.read_image(z=i, c=channel_axis, channel=ch_BDV_idx)
return data_numpy
def stitch_data(path_to_xml,iterative_flag):
"""
:param path_to_xml: Path
path to BDV XML. BDV H5 must be present for loading
:param iterative_flag: Bool
flag if multiple rounds need to be aligned
"""
# TO DO: 1. write either pyimagej bridge + macro OR call FIJI/BigStitcher in headless mode.
# 2. fix flipped x-axis between Python and FIJI. Easier to flip data in Python than deal with
# annoying affine that flips data.
def return_affine_xform(path_to_xml,r_idx,y_idx,z_idx,total_z_pos):
"""
:param path_to_xml: Path
path to BDV XML. BDV H5 must be present for loading
:param r_idx: integer
round index
:param t_idx: integer
time index
:param y_idx: integer
y tile index
:param z_idx: integer
z tile index
:return data_numpy: NDarray
4D numpy array of all affine transforms
"""
bdv_editor = BdvEditor(str(path_to_xml))
tile_idx = (y_idx+z_idx)+(y_idx*(total_z_pos-1))
affine_xforms = []
read_affine_success = True
affine_idx = 0
while read_affine_success:
try:
affine_xform = bdv_editor.read_affine(time=r_idx,illumination=0,channel=0,tile=tile_idx,angle=0,index=affine_idx)
except:
read_affine_success = False
else:
affine_xforms.append(affine_xform)
affine_idx = affine_idx + 1
read_affine_success = True
return affine_xforms
| 4,729
| 29.915033
| 125
|
py
|
Max-value-Entropy-Search
|
Max-value-Entropy-Search-master/test_functions/python_related/generate_simudata3.py
|
#!/usr/bin/env python
# Copyright (c) 2017 Zi Wang
from push_world import *
import sys
if __name__ == '__main__':
rx = float(sys.argv[1])
ry = float(sys.argv[2])
gx = float(sys.argv[4])
gy = float(sys.argv[5])
simu_steps = int(float(sys.argv[3]) * 10)
# set it to False if no gui needed
world = b2WorldInterface(False)
oshape, osize, ofriction, odensity, bfriction, hand_shape, hand_size = 'circle', 1, 0.01, 0.05, 0.01, 'rectangle', (0.3,1)
thing,base = make_thing(500, 500, world, oshape, osize, ofriction, odensity, bfriction, (0,0))
init_angle = np.arctan(ry/rx)
robot = end_effector(world, (rx,ry), base, init_angle, hand_shape, hand_size)
ret = simu_push(world, thing, robot, base, simu_steps)
ret = np.linalg.norm(np.array([gx, gy]) - ret)
sys.stdout.write(str(ret))
| 839
| 35.521739
| 128
|
py
|
Max-value-Entropy-Search
|
Max-value-Entropy-Search-master/test_functions/python_related/generate_simudata4.py
|
#!/usr/bin/env python
# Copyright (c) 2017 Zi Wang
from push_world import *
import sys
# difference to generate_simudata is an input that control angle of push
if __name__ == '__main__':
rx = float(sys.argv[1])
ry = float(sys.argv[2])
gx = float(sys.argv[4])
gy = float(sys.argv[5])
init_angle = float(sys.argv[6])
simu_steps = int(float(sys.argv[3]) * 10)
# Set the parameter to True if need gui
world = b2WorldInterface(False)
oshape, osize, ofriction, odensity, bfriction, hand_shape, hand_size = 'circle', 1, 0.01, 0.05, 0.01, 'rectangle', (0.3,1)
thing,base = make_thing(500, 500, world, oshape, osize, ofriction, odensity, bfriction, (0,0))
xvel = -rx;
yvel = -ry;
regu = np.linalg.norm([xvel,yvel])
xvel = xvel / regu * 10;
yvel = yvel / regu * 10;
robot = end_effector(world, (rx,ry), base, init_angle, hand_shape, hand_size)
ret = simu_push2(world, thing, robot, base, xvel, yvel, simu_steps)
ret = np.linalg.norm(np.array([gx, gy]) - ret)
sys.stdout.write(str(ret))
| 1,054
| 39.576923
| 128
|
py
|
Max-value-Entropy-Search
|
Max-value-Entropy-Search-master/test_functions/python_related/generate_simudata_2robot2thing.py
|
#!/usr/bin/env python
# Copyright (c) 2017 Zi Wang
from push_world import *
import sys
# difference to generate_simudata is an input that control angle of push
if __name__ == '__main__':
rx = float(sys.argv[1])
ry = float(sys.argv[2])
xvel = float(sys.argv[3])
yvel = float(sys.argv[4])
simu_steps = int(float(sys.argv[5]) * 10)
init_angle = float(sys.argv[6])
rx2 = float(sys.argv[7])
ry2 = float(sys.argv[8])
xvel2 = float(sys.argv[9])
yvel2 = float(sys.argv[10])
simu_steps2 = int(float(sys.argv[11]) * 10)
init_angle2 = float(sys.argv[12])
rtor = float(sys.argv[13])
rtor2 = float(sys.argv[14])
gx = float(sys.argv[15])
gy = float(sys.argv[16])
gx2 = float(sys.argv[17])
gy2 = float(sys.argv[18])
world = b2WorldInterface(False)
oshape, osize, ofriction, odensity, bfriction, hand_shape, hand_size = 'circle', 1, 0.01, 0.05, 0.01, 'rectangle', (1,0.3) #'circle', 0.3#
#thing,base = make_thing(500, 500, world, oshape, osize, ofriction, odensity, bfriction, (0,0))
base = make_base(500, 500, world)
thing = make_1thing(base, world, 'rectangle', (0.5,0.5), ofriction, odensity, (0, 2))
thing2 = make_1thing(base, world, 'circle', 1, ofriction, odensity, (0,-2))
#xvel = np.cos(init_angle)*5;
#yvel = np.sin(init_angle)*5;
robot = end_effector(world, (rx,ry), base, init_angle, hand_shape, hand_size)
robot2 = end_effector(world, (rx2,ry2), base, init_angle2, hand_shape, hand_size)
(ret1, ret2) = simu_push_2robot2thing(world, thing, thing2, robot, robot2, base, xvel, yvel, xvel2, yvel2, rtor, rtor2, simu_steps, simu_steps2)
#print ret1, ret2
ret1 = np.linalg.norm(np.array([gx, gy]) - ret1)
ret2 = np.linalg.norm(np.array([gx2, gy2]) - ret2)
sys.stdout.write(str(ret1+ret2))
| 1,823
| 42.428571
| 148
|
py
|
Max-value-Entropy-Search
|
Max-value-Entropy-Search-master/test_functions/python_related/push_world.py
|
#!/usr/bin/env python
# Author: Ari Anders and Zi Wang
from Box2D import *
from Box2D.b2 import *
import numpy as np
import pygame
import scipy.io
from numpy import linalg as LA
# this just makes pygame show what's going on
class guiWorld:
def __init__(self, fps):
self.SCREEN_WIDTH, self.SCREEN_HEIGHT = 1000, 1000
self.TARGET_FPS = fps
self.PPM = 10.0 # pixels per meter
self.screen = pygame.display.set_mode((self.SCREEN_WIDTH, self.SCREEN_HEIGHT), 0, 32)
pygame.display.set_caption('push simulator')
self.clock = pygame.time.Clock()
self.screen_origin = b2Vec2(self.SCREEN_WIDTH/(2*self.PPM), self.SCREEN_HEIGHT/(self.PPM*2))
self.colors = {
b2_staticBody : (255,255,255,255),
b2_dynamicBody : (163,209,224,255)
}
def draw(self, bodies, bg_color=(64,64,64,0)):
#def draw(self, bodies, bg_color=(0,0,0,0)):
def my_draw_polygon(polygon, body, fixture):
vertices=[(self.screen_origin + body.transform*v)*self.PPM for v in polygon.vertices]
vertices=[(v[0], self.SCREEN_HEIGHT-v[1]) for v in vertices]
color = self.colors[body.type]
if body.userData == "obs":
color = (123,128,120,0)
if body.userData == "hand":
color = (174,136,218,0)
pygame.draw.polygon(self.screen, color, vertices)
def my_draw_circle(circle, body, fixture):
position=(self.screen_origin + body.transform*circle.pos)*self.PPM
position=(position[0], self.SCREEN_HEIGHT-position[1])
color = self.colors[body.type]
if body.userData == "hand":
color = (174,136,218,0)
pygame.draw.circle(self.screen, color, [int(x) for x in
position], int(circle.radius*self.PPM))
b2PolygonShape.draw=my_draw_polygon
b2CircleShape.draw=my_draw_circle
# draw the world
self.screen.fill(bg_color)
self.clock.tick(self.TARGET_FPS)
for body in bodies:
for fixture in body.fixtures:
fixture.shape.draw(body,fixture)
pygame.display.flip()
# this is the interface to pybox2d
class b2WorldInterface:
def __init__(self, do_gui=True):
self.world = b2World(gravity=(0.0,0.0), doSleep=True)
self.do_gui = do_gui
self.TARGET_FPS = 100
self.TIME_STEP = 1.0/self.TARGET_FPS
self.VEL_ITERS, self.POS_ITERS =10,10
self.bodies = []
if do_gui:
self.gui_world = guiWorld(self.TARGET_FPS)
#raw_input()
else:
self.gui_world = None
def initialize_gui(self):
if self.gui_world == None:
self.gui_world = guiWorld(self.TARGET_FPS)
self.do_gui = True
def stop_gui(self):
self.do_gui = False
def add_bodies(self, new_bodies):
""" add a single b2Body or list of b2Bodies to the world"""
if type(new_bodies) == list:
self.bodies += new_bodies
else:
self.bodies.append(new_bodies)
def step(self, show_display=True, idx=0):
self.world.Step(self.TIME_STEP, self.VEL_ITERS, self.POS_ITERS)
if show_display and self.do_gui:
self.gui_world.draw(self.bodies)
#if idx % 10 == 0:
# pygame.image.save(self.gui_world.screen,'tmp_images/'+str(int(sm.ttt*100)+idx)+'.bmp')
class end_effector:
def __init__(self, b2world_interface, init_pos, base, init_angle, hand_shape='rectangle', hand_size=(0.3,1)):
world= b2world_interface.world
self.hand = world.CreateDynamicBody(position=init_pos,angle=init_angle)
self.hand_shape = hand_shape
self.hand_size = hand_size
# forceunit for circle and rect
if hand_shape == 'rectangle':
rshape = b2PolygonShape(box=hand_size)
self.forceunit = 30.0
elif hand_shape == 'circle':
rshape = b2CircleShape(radius=hand_size)
self.forceunit = 100.0
elif hand_shape == 'polygon':
rshape = b2PolygonShape(vertices=hand_size)
else:
raise Exception("%s is not a correct shape" % hand_shape)
self.hand.CreateFixture(
shape = rshape,
density = .1,
friction = .1
)
self.hand.userData = "hand"
friction_joint = world.CreateFrictionJoint(
bodyA = base,
bodyB = self.hand,
maxForce = 2,
maxTorque = 2,
)
b2world_interface.add_bodies(self.hand)
def set_pos(self, pos, angle):
self.hand.position = pos
self.hand.angle = angle
def apply_wrench(self, rlvel=(0,0), ravel=0):
#self.hand.ApplyForce(force, self.hand.position,wake=True)
#if avel != 0:
avel = self.hand.angularVelocity
delta_avel = ravel - avel
torque = self.hand.mass*delta_avel*30.0
self.hand.ApplyTorque(torque, wake=True)
#else:
lvel = self.hand.linearVelocity
delta_lvel = b2Vec2(rlvel) - b2Vec2(lvel)
force = self.hand.mass*delta_lvel*self.forceunit
self.hand.ApplyForce(force, self.hand.position,wake=True)
def get_state(self, verbose=False):
state = list(self.hand.position) + [ self.hand.angle] + \
list(self.hand.linearVelocity) + [self.hand.angularVelocity]
if verbose:
print_state = ["%.3f" % x for x in state]
print "position, velocity: (%s), (%s) " % \
((", ").join(print_state[:3]), (", ").join(print_state[3:]) )
return state
def make_thing(table_width, table_length, b2world_interface, thing_shape, thing_size, thing_friction, thing_density, base_friction, obj_loc):
world = b2world_interface.world
base = world.CreateStaticBody(
position = (0,0),
#friction = base_friction,
shapes = b2PolygonShape(box=(table_length,table_width)),
)
link = world.CreateDynamicBody(position=obj_loc)
if thing_shape == 'rectangle':
linkshape = b2PolygonShape(box=thing_size)
elif thing_shape == 'circle':
linkshape = b2CircleShape(radius=thing_size)
elif thing_shape == 'polygon':
linkshape = b2PolygonShape(vertices=thing_size)
else:
raise Exception("%s is not a correct shape" % thing_shape)
link.CreateFixture(
shape = linkshape,
density = thing_density,
friction = thing_friction,
)
friction_joint = world.CreateFrictionJoint(
bodyA = base,
bodyB = link,
maxForce = 5,
maxTorque = 2,
)
b2world_interface.add_bodies([base,link])
return link,base
def simu_push(world, thing, robot, base, simulation_steps):
# simulating push with fixed direction pointing from robot location to thing location
desired_vel = thing.position - robot.hand.position
desired_vel = desired_vel / np.linalg.norm(desired_vel) * 5
rvel = b2Vec2(desired_vel[0]+np.random.normal(0,0.1),desired_vel[1]+np.random.normal(0,0.1))
rstop = False
for t in range(simulation_steps+100):
if not rstop:
robot.apply_wrench(rvel)
world.step()
ostate = list(thing.position) + [ thing.angle] + \
list(thing.linearVelocity) + [thing.angularVelocity]
if t == simulation_steps - 1:
rstop = True
return list(thing.position)
def simu_push2(world, thing, robot, base, xvel, yvel, simulation_steps):
desired_vel = np.array([xvel, yvel])
rvel = b2Vec2(desired_vel[0]+np.random.normal(0,0.1),desired_vel[1]+np.random.normal(0,0.1))
rstop = False
for t in range(simulation_steps+100):
if not rstop:
robot.apply_wrench(rvel)
world.step()
ostate = list(thing.position) + [ thing.angle] + \
list(thing.linearVelocity) + [thing.angularVelocity]
if t == simulation_steps - 1:
rstop = True
return list(thing.position)
def make_1thing(base, b2world_interface, thing_shape, thing_size, thing_friction, thing_density, obj_loc):
world = b2world_interface.world
link = world.CreateDynamicBody(position=obj_loc)
if thing_shape == 'rectangle':
linkshape = b2PolygonShape(box=thing_size)
elif thing_shape == 'circle':
linkshape = b2CircleShape(radius=thing_size)
elif thing_shape == 'polygon':
linkshape = b2PolygonShape(vertices=thing_size)
else:
raise Exception("%s is not a correct shape" % thing_shape)
link.CreateFixture(
shape = linkshape,
density = thing_density,
friction = thing_friction,
)
friction_joint = world.CreateFrictionJoint(
bodyA = base,
bodyB = link,
maxForce = 5,
maxTorque = 2,
)
b2world_interface.add_bodies([link])
return link
def simu_push_2robot2thing(world, thing, thing2, robot, robot2, base, xvel, yvel, xvel2, yvel2, rtor, rtor2, simulation_steps, simulation_steps2):
desired_vel = np.array([xvel, yvel])
rvel = b2Vec2(desired_vel[0]+np.random.normal(0,0.01),desired_vel[1]+np.random.normal(0,0.01))
desired_vel2 = np.array([xvel2, yvel2])
rvel2 = b2Vec2(desired_vel2[0]+np.random.normal(0,0.01),desired_vel2[1]+np.random.normal(0,0.01))
tmax = np.max([simulation_steps,simulation_steps2])
for t in range(tmax+100):
if t < simulation_steps:
robot.apply_wrench(rvel, rtor)
if t < simulation_steps2:
robot2.apply_wrench(rvel2, rtor2)
world.step()
return (list(thing.position), list(thing2.position))
def make_base(table_width, table_length, b2world_interface):
world = b2world_interface.world
base = world.CreateStaticBody(
position = (0,0),
#friction = base_friction,
shapes = b2PolygonShape(box=(table_length,table_width)),
)
b2world_interface.add_bodies([base])
return base
| 10,238
| 36.097826
| 146
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/setup.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import subprocess
import sys
from setuptools import setup, find_packages, Extension
from setuptools import Extension, find_packages, setup
if sys.version_info < (3, 6):
sys.exit("Sorry, Python >= 3.6 is required for fairseq.")
def write_version_py():
with open(os.path.join("fairseq", "version.txt")) as f:
version = f.read().strip()
# append latest commit hash to version string
try:
sha = (
subprocess.check_output(["git", "rev-parse", "HEAD"])
.decode("ascii")
.strip()
)
version += "+" + sha[:7]
except Exception:
pass
# write version info to fairseq/version.py
with open(os.path.join("fairseq", "version.py"), "w") as f:
f.write('__version__ = "{}"\n'.format(version))
return version
version = write_version_py()
with open("README.md") as f:
readme = f.read()
if sys.platform == "darwin":
extra_compile_args = ["-stdlib=libc++", "-O3"]
else:
extra_compile_args = ["-std=c++11", "-O3"]
class NumpyExtension(Extension):
"""Source: https://stackoverflow.com/a/54128391"""
def __init__(self, *args, **kwargs):
self.__include_dirs = []
super().__init__(*args, **kwargs)
@property
def include_dirs(self):
import numpy
return self.__include_dirs + [numpy.get_include()]
@include_dirs.setter
def include_dirs(self, dirs):
self.__include_dirs = dirs
extensions = [
Extension(
"fairseq.libbleu",
sources=[
"fairseq/clib/libbleu/libbleu.cpp",
"fairseq/clib/libbleu/module.cpp",
],
extra_compile_args=extra_compile_args,
),
NumpyExtension(
"fairseq.data.data_utils_fast",
sources=["fairseq/data/data_utils_fast.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
),
NumpyExtension(
"fairseq.data.token_block_utils_fast",
sources=["fairseq/data/token_block_utils_fast.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
),
]
cmdclass = {}
try:
# torch is not available when generating docs
from torch.utils import cpp_extension
extensions.extend(
[
cpp_extension.CppExtension(
"fairseq.libbase",
sources=[
"fairseq/clib/libbase/balanced_assignment.cpp",
],
)
]
)
extensions.extend(
[
cpp_extension.CppExtension(
"fairseq.libnat",
sources=[
"fairseq/clib/libnat/edit_dist.cpp",
],
),
cpp_extension.CppExtension(
"alignment_train_cpu_binding",
sources=[
"examples/operators/alignment_train_cpu.cpp",
],
),
]
)
if "CUDA_HOME" in os.environ:
extensions.extend(
[
cpp_extension.CppExtension(
"fairseq.libnat_cuda",
sources=[
"fairseq/clib/libnat_cuda/edit_dist.cu",
"fairseq/clib/libnat_cuda/binding.cpp",
],
),
cpp_extension.CppExtension(
"fairseq.ngram_repeat_block_cuda",
sources=[
"fairseq/clib/cuda/ngram_repeat_block_cuda.cpp",
"fairseq/clib/cuda/ngram_repeat_block_cuda_kernel.cu",
],
),
cpp_extension.CppExtension(
"alignment_train_cuda_binding",
sources=[
"examples/operators/alignment_train_kernel.cu",
"examples/operators/alignment_train_cuda.cpp",
],
),
]
)
cmdclass["build_ext"] = cpp_extension.BuildExtension
except ImportError:
pass
if "READTHEDOCS" in os.environ:
# don't build extensions when generating docs
extensions = []
if "build_ext" in cmdclass:
del cmdclass["build_ext"]
# use CPU build of PyTorch
dependency_links = [
"https://download.pytorch.org/whl/cpu/torch-1.7.0%2Bcpu-cp36-cp36m-linux_x86_64.whl"
]
else:
dependency_links = []
if "clean" in sys.argv[1:]:
# Source: https://bit.ly/2NLVsgE
print("deleting Cython files...")
import subprocess
subprocess.run(
["rm -f fairseq/*.so fairseq/**/*.so fairseq/*.pyd fairseq/**/*.pyd"],
shell=True,
)
extra_packages = []
if os.path.exists(os.path.join("fairseq", "model_parallel", "megatron", "mpu")):
extra_packages.append("fairseq.model_parallel.megatron.mpu")
def do_setup(package_data):
setup(
name="fairseq",
version=version,
description="Facebook AI Research Sequence-to-Sequence Toolkit",
url="https://github.com/pytorch/fairseq",
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
long_description=readme,
long_description_content_type="text/markdown",
setup_requires=[
"cython",
'numpy<1.20.0; python_version<"3.7"',
'numpy; python_version>="3.7"',
"setuptools>=18.0",
],
install_requires=[
"cffi",
"cython",
'dataclasses; python_version<"3.7"',
"hydra-core>=1.0.7,<1.1",
"omegaconf<2.1",
'numpy<1.20.0; python_version<"3.7"',
'numpy; python_version>="3.7"',
"regex",
"sacrebleu>=1.4.12",
"torch",
"tqdm",
"bitarray",
# "torchaudio>=0.8.0",
],
dependency_links=dependency_links,
packages=find_packages(
exclude=[
"examples",
"examples.*",
"scripts",
"scripts.*",
"tests",
"tests.*",
]
)
+ extra_packages,
package_data=package_data,
ext_modules=extensions,
test_suite="tests",
entry_points={
"console_scripts": [
"fairseq-eval-lm = fairseq_cli.eval_lm:cli_main",
"fairseq-generate = fairseq_cli.generate:cli_main",
"fairseq-hydra-train = fairseq_cli.hydra_train:cli_main",
"fairseq-interactive = fairseq_cli.interactive:cli_main",
"fairseq-preprocess = fairseq_cli.preprocess:cli_main",
"fairseq-score = fairseq_cli.score:cli_main",
"fairseq-train = fairseq_cli.train:cli_main",
"fairseq-validate = fairseq_cli.validate:cli_main",
],
},
cmdclass=cmdclass,
zip_safe=False,
)
def get_files(path, relative_to="fairseq"):
all_files = []
for root, _dirs, files in os.walk(path, followlinks=True):
root = os.path.relpath(root, relative_to)
for file in files:
if file.endswith(".pyc"):
continue
all_files.append(os.path.join(root, file))
return all_files
if __name__ == "__main__":
try:
# symlink examples into fairseq package so package_data accepts them
fairseq_examples = os.path.join("fairseq", "examples")
if "build_ext" not in sys.argv[1:] and not os.path.exists(fairseq_examples):
os.symlink(os.path.join("..", "examples"), fairseq_examples)
package_data = {
"fairseq": (
get_files(fairseq_examples)
+ get_files(os.path.join("fairseq", "config"))
)
}
do_setup(package_data)
finally:
if "build_ext" not in sys.argv[1:] and os.path.islink(fairseq_examples):
os.unlink(fairseq_examples)
| 8,435
| 28.291667
| 92
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/hubconf.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import functools
import importlib
dependencies = [
"dataclasses",
"hydra",
"numpy",
"omegaconf",
"regex",
"requests",
"torch",
]
# Check for required dependencies and raise a RuntimeError if any are missing.
missing_deps = []
for dep in dependencies:
try:
importlib.import_module(dep)
except ImportError:
# Hack: the hydra package is provided under the "hydra-core" name in
# pypi. We don't want the user mistakenly calling `pip install hydra`
# since that will install an unrelated package.
if dep == "hydra":
dep = "hydra-core"
missing_deps.append(dep)
if len(missing_deps) > 0:
raise RuntimeError("Missing dependencies: {}".format(", ".join(missing_deps)))
# only do fairseq imports after checking for dependencies
from fairseq.hub_utils import ( # noqa; noqa
BPEHubInterface as bpe,
TokenizerHubInterface as tokenizer,
)
from fairseq.models import MODEL_REGISTRY # noqa
# torch.hub doesn't build Cython components, so if they are not found then try
# to build them here
try:
import fairseq.data.token_block_utils_fast # noqa
except ImportError:
try:
import cython # noqa
import os
from setuptools import sandbox
sandbox.run_setup(
os.path.join(os.path.dirname(__file__), "setup.py"),
["build_ext", "--inplace"],
)
except ImportError:
print(
"Unable to build Cython components. Please make sure Cython is "
"installed if the torch.hub model you are loading depends on it."
)
# automatically expose models defined in FairseqModel::hub_models
for _model_type, _cls in MODEL_REGISTRY.items():
for model_name in _cls.hub_models().keys():
globals()[model_name] = functools.partial(
_cls.from_pretrained,
model_name,
)
| 2,099
| 27.378378
| 82
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/train.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Legacy entry point. Use fairseq_cli/train.py or fairseq-train instead.
"""
from fairseq_cli.train import cli_main
if __name__ == "__main__":
cli_main()
| 366
| 23.466667
| 70
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.version import __version__ # noqa
| 226
| 31.428571
| 65
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_to_text/prep_covost_data.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from pathlib import Path
import shutil
from tempfile import NamedTemporaryFile
from typing import Optional, Tuple
import pandas as pd
import torchaudio
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
filter_manifest_df,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
load_df_from_tsv,
save_df_to_tsv,
)
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import download_url, extract_archive
from tqdm import tqdm
log = logging.getLogger(__name__)
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"]
class CoVoST(Dataset):
"""Create a Dataset for CoVoST (https://github.com/facebookresearch/covost).
Args:
root (str): root path to the dataset and generated manifests/features
source_language (str): source (audio) language
target_language (str, optional): target (text) language,
None for no translation (default: None)
version (int, optional): CoVoST version. (default: 2)
download (bool, optional): Whether to download the dataset if it is not
found at root path. (default: ``False``).
"""
COVOST_URL_TEMPLATE = (
"https://dl.fbaipublicfiles.com/covost/"
"covost_v2.{src_lang}_{tgt_lang}.tsv.tar.gz"
)
VERSIONS = {2}
SPLITS = ["train", "dev", "test"]
XX_EN_LANGUAGES = {
1: ["fr", "de", "nl", "ru", "es", "it", "tr", "fa", "sv-SE", "mn", "zh-CN"],
2: [
"fr",
"de",
"es",
"ca",
"it",
"ru",
"zh-CN",
"pt",
"fa",
"et",
"mn",
"nl",
"tr",
"ar",
"sv-SE",
"lv",
"sl",
"ta",
"ja",
"id",
"cy",
],
}
EN_XX_LANGUAGES = {
1: [],
2: [
"de",
"tr",
"fa",
"sv-SE",
"mn",
"zh-CN",
"cy",
"ca",
"sl",
"et",
"id",
"ar",
"ta",
"lv",
"ja",
],
}
def __init__(
self,
root: str,
split: str,
source_language: str,
target_language: Optional[str] = None,
version: int = 2,
) -> None:
assert version in self.VERSIONS and split in self.SPLITS
assert source_language is not None
self.no_translation = target_language is None
if not self.no_translation:
assert "en" in {source_language, target_language}
if source_language == "en":
assert target_language in self.EN_XX_LANGUAGES[version]
else:
assert source_language in self.XX_EN_LANGUAGES[version]
else:
# Hack here so that we can get "split" column from CoVoST TSV.
# Note that we use CoVoST train split for ASR which is an extension
# to Common Voice train split.
target_language = "de" if source_language == "en" else "en"
self.root: Path = Path(root)
cv_tsv_path = self.root / "validated.tsv"
assert cv_tsv_path.is_file()
covost_url = self.COVOST_URL_TEMPLATE.format(
src_lang=source_language, tgt_lang=target_language
)
covost_archive = self.root / Path(covost_url).name
if not covost_archive.is_file():
download_url(covost_url, self.root.as_posix(), hash_value=None)
extract_archive(covost_archive.as_posix())
cv_tsv = load_df_from_tsv(cv_tsv_path)
covost_tsv = load_df_from_tsv(
self.root / Path(covost_url).name.replace(".tar.gz", "")
)
df = pd.merge(
left=cv_tsv[["path", "sentence", "client_id"]],
right=covost_tsv[["path", "translation", "split"]],
how="inner",
on="path",
)
if split == "train":
df = df[(df["split"] == split) | (df["split"] == f"{split}_covost")]
else:
df = df[df["split"] == split]
data = df.to_dict(orient="index").items()
data = [v for k, v in sorted(data, key=lambda x: x[0])]
self.data = []
for e in data:
try:
path = self.root / "clips" / e["path"]
_ = torchaudio.info(path.as_posix())
self.data.append(e)
except RuntimeError:
pass
def __getitem__(
self, n: int
) -> Tuple[Tensor, int, str, str, Optional[str], str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
tuple: ``(waveform, sample_rate, sentence, translation, speaker_id,
sample_id)``
"""
data = self.data[n]
path = self.root / "clips" / data["path"]
waveform, sample_rate = torchaudio.load(path)
sentence = data["sentence"]
translation = None if self.no_translation else data["translation"]
speaker_id = data["client_id"]
_id = data["path"].replace(".mp3", "")
return waveform, sample_rate, sentence, translation, speaker_id, _id
def __len__(self) -> int:
return len(self.data)
def process(args):
root = Path(args.data_root).absolute() / args.src_lang
if not root.is_dir():
raise NotADirectoryError(f"{root} does not exist")
# Extract features
feature_root = root / "fbank80"
feature_root.mkdir(exist_ok=True)
for split in CoVoST.SPLITS:
print(f"Fetching split {split}...")
dataset = CoVoST(root, split, args.src_lang, args.tgt_lang)
print("Extracting log mel filter bank features...")
for waveform, sample_rate, _, _, _, utt_id in tqdm(dataset):
extract_fbank_features(
waveform, sample_rate, feature_root / f"{utt_id}.npy"
)
# Pack features into ZIP
zip_path = root / "fbank80.zip"
print("ZIPing features...")
create_zip(feature_root, zip_path)
print("Fetching ZIP manifest...")
audio_paths, audio_lengths = get_zip_manifest(zip_path)
# Generate TSV manifest
print("Generating manifest...")
train_text = []
task = f"asr_{args.src_lang}"
if args.tgt_lang is not None:
task = f"st_{args.src_lang}_{args.tgt_lang}"
for split in CoVoST.SPLITS:
manifest = {c: [] for c in MANIFEST_COLUMNS}
dataset = CoVoST(root, split, args.src_lang, args.tgt_lang)
for _, _, src_utt, tgt_utt, speaker_id, utt_id in tqdm(dataset):
manifest["id"].append(utt_id)
manifest["audio"].append(audio_paths[utt_id])
manifest["n_frames"].append(audio_lengths[utt_id])
manifest["tgt_text"].append(src_utt if args.tgt_lang is None else tgt_utt)
manifest["speaker"].append(speaker_id)
is_train_split = split.startswith("train")
if is_train_split:
train_text.extend(manifest["tgt_text"])
df = pd.DataFrame.from_dict(manifest)
df = filter_manifest_df(df, is_train_split=is_train_split)
save_df_to_tsv(df, root / f"{split}_{task}.tsv")
# Generate vocab
vocab_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{vocab_size_str}_{task}"
with NamedTemporaryFile(mode="w") as f:
for t in train_text:
f.write(t + "\n")
gen_vocab(
Path(f.name), root / spm_filename_prefix, args.vocab_type, args.vocab_size
)
# Generate config YAML
gen_config_yaml(
root,
spm_filename=spm_filename_prefix + ".model",
yaml_filename=f"config_{task}.yaml",
specaugment_policy="lb",
)
# Clean up
shutil.rmtree(feature_root)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data-root",
"-d",
required=True,
type=str,
help="data root with sub-folders for each language <root>/<src_lang>",
)
parser.add_argument(
"--vocab-type",
default="unigram",
required=True,
type=str,
choices=["bpe", "unigram", "char"],
),
parser.add_argument("--vocab-size", default=1000, type=int)
parser.add_argument("--src-lang", "-s", required=True, type=str)
parser.add_argument("--tgt-lang", "-t", type=str)
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| 8,898
| 30.782143
| 86
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_to_text/prep_mtedx_data.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
from pathlib import Path
import shutil
from itertools import groupby
from tempfile import NamedTemporaryFile
from typing import Tuple
import pandas as pd
import soundfile as sf
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
filter_manifest_df,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
load_df_from_tsv,
save_df_to_tsv,
)
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
from fairseq.data.audio.audio_utils import get_waveform, convert_waveform
log = logging.getLogger(__name__)
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker", "tgt_lang"]
class mTEDx(Dataset):
"""
Create a Dataset for Multilingual TEDx.
Each item is a tuple of the form: waveform, sample_rate, source utterance,
target utterance, speaker_id, utterance_id
"""
SPLITS = ["train", "valid", "test"]
LANGPAIRS = [
"es-es",
"fr-fr",
"pt-pt",
"it-it",
"ru-ru",
"el-el",
"ar-ar",
"de-de",
"es-en",
"es-fr",
"es-pt",
"es-it",
"fr-en",
"fr-es",
"fr-pt",
"pt-en",
"pt-es",
"it-en",
"it-es",
"ru-en",
"el-en",
]
def __init__(self, root: str, lang: str, split: str) -> None:
assert split in self.SPLITS and lang in self.LANGPAIRS
_root = Path(root) / f"{lang}" / "data" / split
wav_root, txt_root = _root / "wav", _root / "txt"
assert _root.is_dir() and wav_root.is_dir() and txt_root.is_dir()
# Load audio segments
try:
import yaml
except ImportError:
print("Please install PyYAML to load the Multilingual TEDx YAML files")
with open(txt_root / f"{split}.yaml") as f:
segments = yaml.load(f, Loader=yaml.BaseLoader)
# Load source and target utterances
src, tgt = lang.split("-")
for _lang in [src, tgt]:
with open(txt_root / f"{split}.{_lang}") as f:
utterances = [r.strip() for r in f]
assert len(segments) == len(utterances)
for i, u in enumerate(utterances):
segments[i][_lang] = u
# Gather info
self.data = []
for wav_filename, _seg_group in groupby(segments, lambda x: x["wav"]):
wav_filename = wav_filename.replace(".wav", ".flac")
wav_path = wav_root / wav_filename
sample_rate = sf.info(wav_path.as_posix()).samplerate
seg_group = sorted(_seg_group, key=lambda x: float(x["offset"]))
for i, segment in enumerate(seg_group):
offset = int(float(segment["offset"]) * sample_rate)
n_frames = int(float(segment["duration"]) * sample_rate)
_id = f"{wav_path.stem}_{i}"
self.data.append(
(
wav_path.as_posix(),
offset,
n_frames,
sample_rate,
segment[src],
segment[tgt],
segment["speaker_id"],
tgt,
_id,
)
)
def __getitem__(self, n: int) -> Tuple[torch.Tensor, int, str, str, str, str, str]:
(
wav_path,
offset,
n_frames,
sr,
src_utt,
tgt_utt,
spk_id,
tgt_lang,
utt_id,
) = self.data[n]
waveform, _ = get_waveform(wav_path, frames=n_frames, start=offset)
waveform = torch.from_numpy(waveform)
return waveform, sr, src_utt, tgt_utt, spk_id, tgt_lang, utt_id
def __len__(self) -> int:
return len(self.data)
def process(args):
root = Path(args.data_root).absolute()
for lang in mTEDx.LANGPAIRS:
cur_root = root / f"{lang}"
if not cur_root.is_dir():
print(f"{cur_root.as_posix()} does not exist. Skipped.")
continue
# Extract features
audio_root = cur_root / ("flac" if args.use_audio_input else "fbank80")
audio_root.mkdir(exist_ok=True)
for split in mTEDx.SPLITS:
print(f"Fetching split {split}...")
dataset = mTEDx(root.as_posix(), lang, split)
if args.use_audio_input:
print("Converting audios...")
for waveform, sample_rate, _, _, _, utt_id in tqdm(dataset):
tgt_sample_rate = 16_000
_wavform, _ = convert_waveform(
waveform,
sample_rate,
to_mono=True,
to_sample_rate=tgt_sample_rate,
)
sf.write(
(audio_root / f"{utt_id}.flac").as_posix(),
_wavform.numpy(),
tgt_sample_rate,
)
else:
print("Extracting log mel filter bank features...")
for waveform, sample_rate, _, _, _, _, utt_id in tqdm(dataset):
extract_fbank_features(
waveform, sample_rate, audio_root / f"{utt_id}.npy"
)
# Pack features into ZIP
zip_path = cur_root / f"{audio_root.name}.zip"
print("ZIPing audios/features...")
create_zip(audio_root, zip_path)
print("Fetching ZIP manifest...")
audio_paths, audio_lengths = get_zip_manifest(zip_path)
# Generate TSV manifest
print("Generating manifest...")
train_text = []
for split in mTEDx.SPLITS:
is_train_split = split.startswith("train")
manifest = {c: [] for c in MANIFEST_COLUMNS}
ds = mTEDx(args.data_root, lang, split)
for _, _, src_utt, tgt_utt, spk_id, tgt_lang, utt_id in tqdm(ds):
manifest["id"].append(utt_id)
manifest["audio"].append(audio_paths[utt_id])
manifest["n_frames"].append(audio_lengths[utt_id])
manifest["tgt_text"].append(src_utt if args.task == "asr" else tgt_utt)
manifest["speaker"].append(spk_id)
manifest["tgt_lang"].append(tgt_lang)
if is_train_split:
train_text.extend(manifest["tgt_text"])
df = pd.DataFrame.from_dict(manifest)
df = filter_manifest_df(df, is_train_split=is_train_split)
save_df_to_tsv(df, cur_root / f"{split}_{args.task}.tsv")
# Generate vocab
v_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{v_size_str}_{args.task}"
with NamedTemporaryFile(mode="w") as f:
for t in train_text:
f.write(t + "\n")
gen_vocab(
Path(f.name),
cur_root / spm_filename_prefix,
args.vocab_type,
args.vocab_size,
)
# Generate config YAML
if args.use_audio_input:
gen_config_yaml(
cur_root,
spm_filename=spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy=None,
extra={"use_audio_input": True},
)
else:
gen_config_yaml(
cur_root,
spm_filename=spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy="lb",
)
# Clean up
shutil.rmtree(audio_root)
def process_joint(args):
cur_root = Path(args.data_root)
assert all(
(cur_root / f"{lang}").is_dir() for lang in mTEDx.LANGPAIRS
), "do not have downloaded data available for all languages"
# Generate vocab
vocab_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{vocab_size_str}_{args.task}"
with NamedTemporaryFile(mode="w") as f:
for lang in mTEDx.LANGPAIRS:
tsv_path = cur_root / f"{lang}" / f"train_{args.task}.tsv"
df = load_df_from_tsv(tsv_path)
for t in df["tgt_text"]:
f.write(t + "\n")
special_symbols = None
if args.joint:
# Add tgt_lang tags to dict
special_symbols = list(
{f'<lang:{lang.split("-")[1]}>' for lang in mTEDx.LANGPAIRS}
)
gen_vocab(
Path(f.name),
cur_root / spm_filename_prefix,
args.vocab_type,
args.vocab_size,
special_symbols=special_symbols,
)
# Generate config YAML
gen_config_yaml(
cur_root,
spm_filename=spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy="ld",
prepend_tgt_lang_tag=(args.joint),
)
# Make symbolic links to manifests
for lang in mTEDx.LANGPAIRS:
for split in mTEDx.SPLITS:
src_path = cur_root / f"{lang}" / f"{split}_{args.task}.tsv"
desc_path = cur_root / f"{split}_{lang}_{args.task}.tsv"
if not desc_path.is_symlink():
os.symlink(src_path, desc_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data-root", "-d", required=True, type=str)
parser.add_argument(
"--vocab-type",
default="unigram",
required=True,
type=str,
choices=["bpe", "unigram", "char"],
),
parser.add_argument("--vocab-size", default=8000, type=int)
parser.add_argument("--task", type=str, choices=["asr", "st"])
parser.add_argument("--joint", action="store_true", help="")
parser.add_argument("--use-audio-input", action="store_true")
args = parser.parse_args()
if args.joint:
process_joint(args)
else:
process(args)
if __name__ == "__main__":
main()
| 10,404
| 34.03367
| 87
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_to_text/prep_aishell2_data.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import logging
from pathlib import Path
import shutil
from tempfile import NamedTemporaryFile
from torch.utils.data import Dataset
from typing import Tuple, Union
import torchaudio
import pandas as pd
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
save_df_to_tsv,
)
from tqdm import tqdm
log = logging.getLogger(__name__)
# Define data splits
SPLITS = [
"train-error"
# "train"
# "test-ios", "test-android",
# "test-mic", "dev-ios",
# "dev-android", "dev-mic"
]
# Define the headers of columns
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"]
# Define special tokens
BOS_TOKEN, BOS_TOKEN_ID = "<s>", 0
PAD_TOKEN, PAD_TOKEN_ID = "<pad>", 1
EOS_TOKEN, EOS_TOKEN_ID = "</s>", 2
UNK_TOKEN, UNK_TOKEN_ID = "<unk>", 3
def load_aishell2_item(file_id, id2audios, id2trans, id2spk):
speaker_id = id2spk[file_id]
file_audio = id2audios[file_id]
waveform, sample_rate = torchaudio.load(file_audio)
assert sample_rate == 16000, "sample rate is not correct."
if file_id in id2trans.keys():
transcript = id2trans[file_id]
transcript = transcript.strip().replace(" ", "")
else:
# Translation not found
print("Translation not found for " + fileid)
transcript = None
return (
waveform,
sample_rate,
transcript,
str(speaker_id),
str(file_id),
)
class AISHELL2(Dataset):
"""Create a Dataset for AISHELL2."""
txt_filename = "trans.txt"
audio_scp_filename = "wav.scp"
speaker_filename = "spk_info.txt"
def __init__(self, root, split):
assert split in [
"train",
"test-ios",
"test-android",
"test-mic",
"dev-ios",
"dev-android",
"dev-mic",
"train-error",
], "data split is invalid."
root = os.fspath(root)
print(root)
if split == "train":
data_root_dir = os.path.join(root, "iOS", "data")
elif split == "train-error":
data_root_dir = os.path.join(root, "iOS", "data_error")
elif "dev" in split or "test" in split:
if "dev" in split:
if "ios" in split:
data_root_dir = os.path.join(
root, "AISHELL-DEV-TEST-SET", "iOS", "dev"
)
elif "android" in split:
data_root_dir = os.path.join(
root, "AISHELL-DEV-TEST-SET", "Android", "dev"
)
elif "mic" in split:
data_root_dir = os.path.join(
root, "AISHELL-DEV-TEST-SET", "Mic", "dev"
)
else:
raise ValueError("Invalid options %s" % split)
else:
if "ios" in split:
data_root_dir = os.path.join(
root, "AISHELL-DEV-TEST-SET", "iOS", "test"
)
elif "android" in split:
data_root_dir = os.path.join(
root, "AISHELL-DEV-TEST-SET", "Android", "test"
)
elif "mic" in split:
data_root_dir = os.path.join(
root, "AISHELL-DEV-TEST-SET", "Mic", "test"
)
else:
raise ValueError("Invalid options %s" % split)
else:
raise ValueError("Invalid options %s" % split)
self.trans_filename = os.path.join(data_root_dir, self.txt_filename)
self.wav_scp_filename = os.path.join(data_root_dir, self.audio_scp_filename)
self.id2txt_dict = dict()
with open(self.trans_filename, "r") as f_trans:
for line in f_trans:
uttid, text = line.strip().split("\t", 1)
self.id2txt_dict[uttid] = text
self.id2audios_dict = dict()
self.id2spk_dict = dict()
with open(self.wav_scp_filename, "r") as f_audios:
for line in f_audios:
uttid, audio_path = line.strip().split("\t", 1)
spk_id = audio_path.split("/")[1]
abs_audio_path = os.path.join(data_root_dir, audio_path)
self.id2audios_dict[uttid] = abs_audio_path
self.id2spk_dict[uttid] = spk_id
self._walker = list(self.id2txt_dict.keys())
self._walker.sort()
def __getitem__(self, n: int):
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, int, int, int):
``(waveform, sample_rate, transcript, speaker_id, chapter_id, utterance_id)``
"""
fileid = self._walker[n]
return load_aishell2_item(
fileid, self.id2audios_dict, self.id2txt_dict, self.id2spk_dict
)
def __len__(self) -> int:
return len(self._walker)
def process(args):
print("Begin process...")
input_root = Path(args.input_root).absolute()
out_root = Path(args.output_root).absolute()
out_root.mkdir(exist_ok=True)
# Extract features
feature_root = out_root / "fbank80"
feature_root.mkdir(exist_ok=True)
for split in SPLITS:
print(f"Fetching split {split}...")
dataset = AISHELL2(input_root.as_posix(), split=split)
print("Extracting log mel filter bank features...")
for wav, sample_rate, _, spk_id, utt_id in tqdm(dataset):
sample_id = utt_id
try:
extract_fbank_features(
wav, sample_rate, feature_root / f"{sample_id}.npy"
)
except Exception as e:
print(e)
print("Encounter error for %s" % utt_id)
else:
continue
# Pack features into ZIP
zip_path = out_root / "fbank80.zip"
print("ZIPing features...")
create_zip(feature_root, zip_path)
print("Fetching ZIP manifest...")
audio_paths, audio_lengths = get_zip_manifest(zip_path)
# Generate TSV manifest
print("Generating manifest...")
train_text = []
for split in SPLITS: # conduct for each data split
manifest = {c: [] for c in MANIFEST_COLUMNS}
dataset = AISHELL2(input_root.as_posix(), split=split)
for _, _, trans, spk_id, utt_id in tqdm(dataset):
if trans is not None and utt_id.strip() in audio_paths.keys():
# Add items one-by-one
sample_id = utt_id
manifest["id"].append(sample_id)
manifest["audio"].append(audio_paths[sample_id])
manifest["n_frames"].append(audio_lengths[sample_id])
manifest["tgt_text"].append(" ".join(list(trans.lower())))
manifest["speaker"].append(spk_id)
save_df_to_tsv(pd.DataFrame.from_dict(manifest), out_root / f"{split}.tsv")
if split.startswith("train"):
train_text.extend(manifest["tgt_text"])
# Generate vocab
vocab_file_path = os.path.join(str(out_root), "vocab.txt")
if len(train_text) != 0:
vocab_dict = dict()
for line in train_text:
tokens_list = line.strip().split(" ")
for tok in tokens_list:
if tok not in vocab_dict:
vocab_dict[tok] = 1
else:
vocab_dict[tok] += 1
sorted_vocab_dict = {
sort_k: sort_v
for sort_k, sort_v in sorted(
vocab_dict.items(), key=lambda kv: (kv[1], kv[0]), reverse=True
)
}
f_vocab = open(vocab_file_path, "w")
f_vocab.write("\t".join([BOS_TOKEN, str(0)]) + "\n")
f_vocab.write("\t".join([PAD_TOKEN, str(0)]) + "\n")
f_vocab.write("\t".join([EOS_TOKEN, str(0)]) + "\n")
f_vocab.write("\t".join([UNK_TOKEN, str(0)]) + "\n")
for idx, (tok, freq) in enumerate(sorted_vocab_dict.items()):
f_vocab.write("\t".join([tok, str(freq)]) + "\n")
f_vocab.close()
# Generate config YAML
gen_config_yaml(out_root, vocab_name=vocab_file_path, specaugment_policy="ld")
# Clean up
shutil.rmtree(feature_root)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input-root",
"-i",
default="/data/LibriSpeech/mlhan_extra_files/AISHELL2",
required=False,
type=str,
) # assign the data output root directory
parser.add_argument(
"--output-root",
"-o",
default="/workspace/fairseq-uni/examples/speech_to_text/egs/aishell2/data/train_error",
required=False,
type=str,
) # assign the data output root directory
parser.add_argument(
"--vocab-type",
default="char",
required=False,
type=str,
choices=["bpe", "unigram", "char"],
) # assign the vocabulary type
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| 9,428
| 31.513793
| 95
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_to_text/prep_librispeech_data.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from pathlib import Path
import shutil
from tempfile import NamedTemporaryFile
import pandas as pd
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
save_df_to_tsv,
)
from torchaudio.datasets import LIBRISPEECH
from tqdm import tqdm
log = logging.getLogger(__name__)
# Define data splits
SPLITS = [
"train-clean-100",
"train-clean-360",
"train-other-500",
"dev-clean",
"dev-other",
"test-clean",
"test-other",
]
# Define the headers of columns
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"]
def process(args):
out_root = Path(args.output_root).absolute()
out_root.mkdir(exist_ok=True)
# Extract features
feature_root = out_root / "fbank80"
feature_root.mkdir(exist_ok=True)
for split in SPLITS:
print(f"Fetching split {split}...")
dataset = LIBRISPEECH(out_root.as_posix(), url=split, download=False)
print("Extracting log mel filter bank features...")
for wav, sample_rate, _, spk_id, chapter_no, utt_no in tqdm(dataset):
sample_id = f"{spk_id}-{chapter_no}-{utt_no}"
extract_fbank_features(wav, sample_rate, feature_root / f"{sample_id}.npy")
# Pack features into ZIP
zip_path = out_root / "fbank80.zip"
print("ZIPing features...")
create_zip(feature_root, zip_path)
print("Fetching ZIP manifest...")
audio_paths, audio_lengths = get_zip_manifest(zip_path)
# Generate TSV manifest
print("Generating manifest...")
train_text = []
for split in SPLITS: # conduct for each data split
manifest = {c: [] for c in MANIFEST_COLUMNS}
dataset = LIBRISPEECH(out_root.as_posix(), url=split)
for _, _, utt, spk_id, chapter_no, utt_no in tqdm(dataset):
# Add items one-by-one
sample_id = f"{spk_id}-{chapter_no}-{utt_no}"
manifest["id"].append(sample_id)
manifest["audio"].append(audio_paths[sample_id])
manifest["n_frames"].append(audio_lengths[sample_id])
manifest["tgt_text"].append(utt.lower())
manifest["speaker"].append(spk_id)
save_df_to_tsv(pd.DataFrame.from_dict(manifest), out_root / f"{split}.tsv")
if split.startswith("train"):
train_text.extend(manifest["tgt_text"])
# Generate vocab
vocab_size = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{vocab_size}"
with NamedTemporaryFile(mode="w") as f:
for t in train_text:
f.write(t + "\n")
gen_vocab(
Path(f.name),
out_root / spm_filename_prefix,
args.vocab_type,
args.vocab_size,
)
# Generate config YAML
gen_config_yaml(
out_root, spm_filename=spm_filename_prefix + ".model", specaugment_policy="ld"
)
# Clean up
shutil.rmtree(feature_root)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--output-root", "-o", required=True, type=str
) # assign the data output root directory
parser.add_argument(
"--vocab-type",
default="unigram",
required=True,
type=str,
choices=["bpe", "unigram", "char"],
) # assign the vocabulary type
parser.add_argument(
"--vocab-size", default=10000, type=int
) # assign the size of vocabulary
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| 3,811
| 29.253968
| 87
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_to_text/data_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import csv
from pathlib import Path
import zipfile
from functools import reduce
from multiprocessing import cpu_count
from typing import Any, Dict, List, Optional, Union
import io
import numpy as np
import pandas as pd
import sentencepiece as sp
from fairseq.data.audio.audio_utils import (
convert_waveform,
_get_kaldi_fbank,
_get_torchaudio_fbank,
is_npy_data,
is_sf_audio_data,
)
import torch
import soundfile as sf
from tqdm import tqdm
UNK_TOKEN, UNK_TOKEN_ID = "<unk>", 3
BOS_TOKEN, BOS_TOKEN_ID = "<s>", 0
EOS_TOKEN, EOS_TOKEN_ID = "</s>", 2
PAD_TOKEN, PAD_TOKEN_ID = "<pad>", 1
def gen_vocab(
input_path: Path,
output_path_prefix: Path,
model_type="bpe",
vocab_size=1000,
special_symbols: Optional[List[str]] = None,
):
# Train SentencePiece Model
arguments = [
f"--input={input_path.as_posix()}",
f"--model_prefix={output_path_prefix.as_posix()}",
f"--model_type={model_type}",
f"--vocab_size={vocab_size}",
"--character_coverage=1.0",
f"--num_threads={cpu_count()}",
f"--unk_id={UNK_TOKEN_ID}",
f"--bos_id={BOS_TOKEN_ID}",
f"--eos_id={EOS_TOKEN_ID}",
f"--pad_id={PAD_TOKEN_ID}",
]
if special_symbols is not None:
_special_symbols = ",".join(special_symbols)
arguments.append(f"--user_defined_symbols={_special_symbols}")
sp.SentencePieceTrainer.Train(" ".join(arguments))
# Export fairseq dictionary
spm = sp.SentencePieceProcessor()
spm.Load(output_path_prefix.as_posix() + ".model")
vocab = {i: spm.IdToPiece(i) for i in range(spm.GetPieceSize())}
assert (
vocab.get(UNK_TOKEN_ID) == UNK_TOKEN
and vocab.get(PAD_TOKEN_ID) == PAD_TOKEN
and vocab.get(BOS_TOKEN_ID) == BOS_TOKEN
and vocab.get(EOS_TOKEN_ID) == EOS_TOKEN
)
vocab = {
i: s
for i, s in vocab.items()
if s not in {UNK_TOKEN, BOS_TOKEN, EOS_TOKEN, PAD_TOKEN}
}
with open(output_path_prefix.as_posix() + ".txt", "w") as f_out:
for _, s in sorted(vocab.items(), key=lambda x: x[0]):
f_out.write(f"{s} 1\n")
def extract_fbank_features(
waveform: torch.FloatTensor,
sample_rate: int,
output_path: Optional[Path] = None,
n_mel_bins: int = 80,
overwrite: bool = False,
):
if output_path is not None and output_path.is_file() and not overwrite:
return
_waveform = convert_waveform(waveform, sample_rate, to_mono=True)
# Kaldi compliance: 16-bit signed integers
_waveform = _waveform * (2**15)
_waveform = _waveform[0].numpy()
features = _get_kaldi_fbank(_waveform, sample_rate, n_mel_bins)
if features is None:
features = _get_torchaudio_fbank(_waveform, sample_rate, n_mel_bins)
if features is None:
raise ImportError(
"Please install pyKaldi or torchaudio to enable fbank feature extraction"
)
if output_path is not None:
np.save(output_path.as_posix(), features)
return features
def create_zip(data_root: Path, zip_path: Path):
paths = list(data_root.glob("*.npy"))
with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_STORED) as f:
for path in tqdm(paths):
f.write(path, arcname=path.name)
def get_zip_manifest(zip_path: Path, zip_root: Optional[Path] = None, is_audio=False):
_zip_path = Path.joinpath(zip_root or Path(""), zip_path)
with zipfile.ZipFile(_zip_path, mode="r") as f:
info = f.infolist()
paths, lengths = {}, {}
for i in tqdm(info):
utt_id = Path(i.filename).stem
offset, file_size = i.header_offset + 30 + len(i.filename), i.file_size
paths[utt_id] = f"{zip_path.as_posix()}:{offset}:{file_size}"
with open(_zip_path, "rb") as f:
f.seek(offset)
byte_data = f.read(file_size)
assert len(byte_data) > 1
if is_audio:
assert is_sf_audio_data(byte_data), i
else:
assert is_npy_data(byte_data), i
byte_data_fp = io.BytesIO(byte_data)
if is_audio:
lengths[utt_id] = sf.info(byte_data_fp).frames
else:
lengths[utt_id] = np.load(byte_data_fp).shape[0]
return paths, lengths
def gen_config_yaml(
manifest_root: Path,
spm_filename: Optional[str] = None,
vocab_name: Optional[str] = None,
yaml_filename: str = "config.yaml",
specaugment_policy: Optional[str] = "lb",
prepend_tgt_lang_tag: bool = False,
sampling_alpha: Optional[float] = None,
input_channels: Optional[int] = 1,
input_feat_per_channel: Optional[int] = 80,
audio_root: str = "",
cmvn_type: str = "utterance",
gcmvn_path: Optional[Path] = None,
extra=None,
):
manifest_root = manifest_root.absolute()
writer = S2TDataConfigWriter(manifest_root / yaml_filename)
assert spm_filename is not None or vocab_name is not None
vocab_name = (
spm_filename.replace(".model", ".txt") if vocab_name is None else vocab_name
)
writer.set_vocab_filename(vocab_name)
if input_channels is not None:
writer.set_input_channels(input_channels)
if input_feat_per_channel is not None:
writer.set_input_feat_per_channel(input_feat_per_channel)
specaugment_setters = {
"lb": writer.set_specaugment_lb_policy,
"ld": writer.set_specaugment_ld_policy,
"sm": writer.set_specaugment_sm_policy,
"ss": writer.set_specaugment_ss_policy,
}
specaugment_setter = specaugment_setters.get(specaugment_policy, None)
if specaugment_setter is not None:
specaugment_setter()
if spm_filename is not None:
writer.set_bpe_tokenizer(
{
"bpe": "sentencepiece",
"sentencepiece_model": (manifest_root / spm_filename).as_posix(),
}
)
if prepend_tgt_lang_tag:
writer.set_prepend_tgt_lang_tag(True)
if sampling_alpha is not None:
writer.set_sampling_alpha(sampling_alpha)
if cmvn_type not in ["global", "utterance"]:
raise NotImplementedError
if specaugment_policy is not None:
writer.set_feature_transforms("_train", [f"{cmvn_type}_cmvn", "specaugment"])
writer.set_feature_transforms("*", [f"{cmvn_type}_cmvn"])
if cmvn_type == "global":
if gcmvn_path is None:
raise ValueError("Please provide path of global cmvn file.")
else:
writer.set_global_cmvn(gcmvn_path.as_posix())
if len(audio_root) > 0:
writer.set_audio_root(audio_root)
if extra is not None:
writer.set_extra(extra)
writer.flush()
def load_df_from_tsv(path: Union[str, Path]) -> pd.DataFrame:
_path = path if isinstance(path, str) else path.as_posix()
return pd.read_csv(
_path,
sep="\t",
header=0,
encoding="utf-8",
escapechar="\\",
quoting=csv.QUOTE_NONE,
na_filter=False,
)
def save_df_to_tsv(dataframe, path: Union[str, Path]):
_path = path if isinstance(path, str) else path.as_posix()
dataframe.to_csv(
_path,
sep="\t",
header=True,
index=False,
encoding="utf-8",
escapechar="\\",
quoting=csv.QUOTE_NONE,
)
def load_tsv_to_dicts(path: Union[str, Path]) -> List[dict]:
with open(path, "r") as f:
reader = csv.DictReader(
f,
delimiter="\t",
quotechar=None,
doublequote=False,
lineterminator="\n",
quoting=csv.QUOTE_NONE,
)
rows = [dict(e) for e in reader]
return rows
def filter_manifest_df(
df, is_train_split=False, extra_filters=None, min_n_frames=5, max_n_frames=3000
):
filters = {
"no speech": df["audio"] == "",
f"short speech (<{min_n_frames} frames)": df["n_frames"] < min_n_frames,
"empty sentence": df["tgt_text"] == "",
}
if is_train_split:
filters[f"long speech (>{max_n_frames} frames)"] = df["n_frames"] > max_n_frames
if extra_filters is not None:
filters.update(extra_filters)
invalid = reduce(lambda x, y: x | y, filters.values())
valid = ~invalid
print(
"| "
+ ", ".join(f"{n}: {f.sum()}" for n, f in filters.items())
+ f", total {invalid.sum()} filtered, {valid.sum()} remained."
)
return df[valid]
def cal_gcmvn_stats(features_list):
features = np.concatenate(features_list)
square_sums = (features**2).sum(axis=0)
mean = features.mean(axis=0)
features = np.subtract(features, mean)
var = square_sums / features.shape[0] - mean**2
std = np.sqrt(np.maximum(var, 1e-8))
return {"mean": mean.astype("float32"), "std": std.astype("float32")}
class S2TDataConfigWriter(object):
DEFAULT_VOCAB_FILENAME = "dict.txt"
DEFAULT_INPUT_FEAT_PER_CHANNEL = 80
DEFAULT_INPUT_CHANNELS = 1
def __init__(self, yaml_path: Path):
try:
import yaml
except ImportError:
print("Please install PyYAML for S2T data config YAML files")
self.yaml = yaml
self.yaml_path = yaml_path
self.config = {}
def flush(self):
with open(self.yaml_path, "w") as f:
self.yaml.dump(self.config, f)
def set_audio_root(self, audio_root=""):
self.config["audio_root"] = audio_root
def set_vocab_filename(self, vocab_filename: str = "dict.txt"):
self.config["vocab_filename"] = vocab_filename
def set_specaugment(
self,
time_wrap_w: int,
freq_mask_n: int,
freq_mask_f: int,
time_mask_n: int,
time_mask_t: int,
time_mask_p: float,
):
self.config["specaugment"] = {
"time_wrap_W": time_wrap_w,
"freq_mask_N": freq_mask_n,
"freq_mask_F": freq_mask_f,
"time_mask_N": time_mask_n,
"time_mask_T": time_mask_t,
"time_mask_p": time_mask_p,
}
def set_specaugment_lb_policy(self):
self.set_specaugment(
time_wrap_w=0,
freq_mask_n=1,
freq_mask_f=27,
time_mask_n=1,
time_mask_t=100,
time_mask_p=1.0,
)
def set_specaugment_ld_policy(self):
self.set_specaugment(
time_wrap_w=0,
freq_mask_n=2,
freq_mask_f=27,
time_mask_n=2,
time_mask_t=100,
time_mask_p=1.0,
)
def set_specaugment_sm_policy(self):
self.set_specaugment(
time_wrap_w=0,
freq_mask_n=2,
freq_mask_f=15,
time_mask_n=2,
time_mask_t=70,
time_mask_p=0.2,
)
def set_specaugment_ss_policy(self):
self.set_specaugment(
time_wrap_w=0,
freq_mask_n=2,
freq_mask_f=27,
time_mask_n=2,
time_mask_t=70,
time_mask_p=0.2,
)
def set_input_channels(self, input_channels: int = 1):
self.config["input_channels"] = input_channels
def set_input_feat_per_channel(self, input_feat_per_channel: int = 80):
self.config["input_feat_per_channel"] = input_feat_per_channel
def set_bpe_tokenizer(self, bpe_tokenizer: Dict[str, Any]):
self.config["bpe_tokenizer"] = bpe_tokenizer
def set_global_cmvn(self, stats_npz_path: str):
self.config["global_cmvn"] = {"stats_npz_path": stats_npz_path}
def set_feature_transforms(self, split: str, transforms: List[str]):
if "transforms" not in self.config:
self.config["transforms"] = {}
self.config["transforms"][split] = transforms
def set_prepend_tgt_lang_tag(self, flag: bool = True):
self.config["prepend_tgt_lang_tag"] = flag
def set_sampling_alpha(self, sampling_alpha: float = 1.0):
self.config["sampling_alpha"] = sampling_alpha
def set_extra(self, data):
self.config.update(data)
| 12,224
| 30.670984
| 88
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_to_text/prep_mustc_data.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
from pathlib import Path
import shutil
from itertools import groupby
from tempfile import NamedTemporaryFile
from typing import Tuple
import numpy as np
import pandas as pd
import soundfile as sf
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
filter_manifest_df,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
load_df_from_tsv,
save_df_to_tsv,
cal_gcmvn_stats,
)
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
from fairseq.data.audio.audio_utils import get_waveform, convert_waveform
log = logging.getLogger(__name__)
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"]
class MUSTC(Dataset):
"""
Create a Dataset for MuST-C. Each item is a tuple of the form:
waveform, sample_rate, source utterance, target utterance, speaker_id,
utterance_id
"""
SPLITS = ["train", "dev", "tst-COMMON", "tst-HE"]
LANGUAGES = ["de", "es", "fr", "it", "nl", "pt", "ro", "ru"]
def __init__(self, root: str, lang: str, split: str) -> None:
assert split in self.SPLITS and lang in self.LANGUAGES
_root = Path(root) / f"en-{lang}" / "data" / split
wav_root, txt_root = _root / "wav", _root / "txt"
assert _root.is_dir() and wav_root.is_dir() and txt_root.is_dir()
# Load audio segments
try:
import yaml
except ImportError:
print("Please install PyYAML to load the MuST-C YAML files")
with open(txt_root / f"{split}.yaml") as f:
segments = yaml.load(f, Loader=yaml.BaseLoader)
# Load source and target utterances
for _lang in ["en", lang]:
with open(txt_root / f"{split}.{_lang}") as f:
utterances = [r.strip() for r in f]
assert len(segments) == len(utterances)
for i, u in enumerate(utterances):
segments[i][_lang] = u
# Gather info
self.data = []
for wav_filename, _seg_group in groupby(segments, lambda x: x["wav"]):
wav_path = wav_root / wav_filename
sample_rate = sf.info(wav_path.as_posix()).samplerate
seg_group = sorted(_seg_group, key=lambda x: x["offset"])
for i, segment in enumerate(seg_group):
offset = int(float(segment["offset"]) * sample_rate)
n_frames = int(float(segment["duration"]) * sample_rate)
_id = f"{wav_path.stem}_{i}"
self.data.append(
(
wav_path.as_posix(),
offset,
n_frames,
sample_rate,
segment["en"],
segment[lang],
segment["speaker_id"],
_id,
)
)
def __getitem__(self, n: int) -> Tuple[torch.Tensor, int, str, str, str, str]:
wav_path, offset, n_frames, sr, src_utt, tgt_utt, spk_id, utt_id = self.data[n]
waveform, _ = get_waveform(wav_path, frames=n_frames, start=offset)
waveform = torch.from_numpy(waveform)
return waveform, sr, src_utt, tgt_utt, spk_id, utt_id
def __len__(self) -> int:
return len(self.data)
def process(args):
root = Path(args.data_root).absolute()
for lang in MUSTC.LANGUAGES:
cur_root = root / f"en-{lang}"
if not cur_root.is_dir():
print(f"{cur_root.as_posix()} does not exist. Skipped.")
continue
# Extract features
audio_root = cur_root / ("flac" if args.use_audio_input else "fbank80")
audio_root.mkdir(exist_ok=True)
for split in MUSTC.SPLITS:
print(f"Fetching split {split}...")
dataset = MUSTC(root.as_posix(), lang, split)
if args.use_audio_input:
print("Converting audios...")
for waveform, sample_rate, _, _, _, utt_id in tqdm(dataset):
tgt_sample_rate = 16_000
_wavform, _ = convert_waveform(
waveform,
sample_rate,
to_mono=True,
to_sample_rate=tgt_sample_rate,
)
sf.write(
(audio_root / f"{utt_id}.flac").as_posix(),
_wavform.numpy(),
tgt_sample_rate,
)
else:
print("Extracting log mel filter bank features...")
gcmvn_feature_list = []
if split == "train" and args.cmvn_type == "global":
print("And estimating cepstral mean and variance stats...")
for waveform, sample_rate, _, _, _, utt_id in tqdm(dataset):
features = extract_fbank_features(
waveform, sample_rate, audio_root / f"{utt_id}.npy"
)
if split == "train" and args.cmvn_type == "global":
if len(gcmvn_feature_list) < args.gcmvn_max_num:
gcmvn_feature_list.append(features)
if split == "train" and args.cmvn_type == "global":
# Estimate and save cmv
stats = cal_gcmvn_stats(gcmvn_feature_list)
with open(cur_root / "gcmvn.npz", "wb") as f:
np.savez(f, mean=stats["mean"], std=stats["std"])
# Pack features into ZIP
zip_path = cur_root / f"{audio_root.name}.zip"
print("ZIPing audios/features...")
create_zip(audio_root, zip_path)
print("Fetching ZIP manifest...")
audio_paths, audio_lengths = get_zip_manifest(zip_path)
# Generate TSV manifest
print("Generating manifest...")
train_text = []
for split in MUSTC.SPLITS:
is_train_split = split.startswith("train")
manifest = {c: [] for c in MANIFEST_COLUMNS}
dataset = MUSTC(args.data_root, lang, split)
for _, _, src_utt, tgt_utt, speaker_id, utt_id in tqdm(dataset):
manifest["id"].append(utt_id)
manifest["audio"].append(audio_paths[utt_id])
manifest["n_frames"].append(audio_lengths[utt_id])
manifest["tgt_text"].append(src_utt if args.task == "asr" else tgt_utt)
manifest["speaker"].append(speaker_id)
if is_train_split:
train_text.extend(manifest["tgt_text"])
df = pd.DataFrame.from_dict(manifest)
df = filter_manifest_df(df, is_train_split=is_train_split)
save_df_to_tsv(df, cur_root / f"{split}_{args.task}.tsv")
# Generate vocab
v_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{v_size_str}_{args.task}"
with NamedTemporaryFile(mode="w") as f:
for t in train_text:
f.write(t + "\n")
gen_vocab(
Path(f.name),
cur_root / spm_filename_prefix,
args.vocab_type,
args.vocab_size,
)
# Generate config YAML
if args.use_audio_input:
gen_config_yaml(
cur_root,
spm_filename=spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy=None,
extra={"use_audio_input": True},
)
else:
gen_config_yaml(
cur_root,
spm_filename=spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy="lb",
cmvn_type=args.cmvn_type,
gcmvn_path=(
cur_root / "gcmvn.npz" if args.cmvn_type == "global" else None
),
)
# Clean up
shutil.rmtree(audio_root)
def process_joint(args):
cur_root = Path(args.data_root)
assert all(
(cur_root / f"en-{lang}").is_dir() for lang in MUSTC.LANGUAGES
), "do not have downloaded data available for all 8 languages"
# Generate vocab
vocab_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{vocab_size_str}_{args.task}"
with NamedTemporaryFile(mode="w") as f:
for lang in MUSTC.LANGUAGES:
tsv_path = cur_root / f"en-{lang}" / f"train_{args.task}.tsv"
df = load_df_from_tsv(tsv_path)
for t in df["tgt_text"]:
f.write(t + "\n")
special_symbols = None
if args.task == "st":
special_symbols = [f"<lang:{lang}>" for lang in MUSTC.LANGUAGES]
gen_vocab(
Path(f.name),
cur_root / spm_filename_prefix,
args.vocab_type,
args.vocab_size,
special_symbols=special_symbols,
)
# Generate config YAML
gen_config_yaml(
cur_root,
spm_filename=spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy="ld",
prepend_tgt_lang_tag=(args.task == "st"),
)
# Make symbolic links to manifests
for lang in MUSTC.LANGUAGES:
for split in MUSTC.SPLITS:
src_path = cur_root / f"en-{lang}" / f"{split}_{args.task}.tsv"
desc_path = cur_root / f"{split}_{lang}_{args.task}.tsv"
if not desc_path.is_symlink():
os.symlink(src_path, desc_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data-root", "-d", required=True, type=str)
parser.add_argument(
"--vocab-type",
default="unigram",
required=True,
type=str,
choices=["bpe", "unigram", "char"],
),
parser.add_argument("--vocab-size", default=8000, type=int)
parser.add_argument("--task", type=str, choices=["asr", "st"])
parser.add_argument("--joint", action="store_true", help="")
parser.add_argument(
"--cmvn-type",
default="utterance",
choices=["global", "utterance"],
help="The type of cepstral mean and variance normalization",
)
parser.add_argument(
"--gcmvn-max-num",
default=150000,
type=int,
help="Maximum number of sentences to use to estimate global mean and "
"variance",
)
parser.add_argument("--use-audio-input", action="store_true")
args = parser.parse_args()
if args.joint:
process_joint(args)
else:
process(args)
if __name__ == "__main__":
main()
| 11,015
| 36.726027
| 87
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_to_text/prep_aishell1_data.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import logging
from pathlib import Path
import shutil
from tempfile import NamedTemporaryFile
from torch.utils.data import Dataset
from typing import Tuple, Union
import torchaudio
import pandas as pd
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
save_df_to_tsv,
)
from tqdm import tqdm
log = logging.getLogger(__name__)
# Define data splits
SPLITS = ["dev", "test", "train_sp"]
# SPLITS = ["dev"]
# Define the headers of columns
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"]
# Define special tokens
BOS_TOKEN, BOS_TOKEN_ID = "<s>", 0
PAD_TOKEN, PAD_TOKEN_ID = "<pad>", 1
EOS_TOKEN, EOS_TOKEN_ID = "</s>", 2
UNK_TOKEN, UNK_TOKEN_ID = "<unk>", 3
def load_aishell1_item(fileid: str, path: str, ext_audio: str, id2txt_dict):
# get speaker id
speaker_id = "".join(list(fileid)[6:11])
# Specify the path to audio
file_audio = fileid + ext_audio
file_audio = os.path.join(path, speaker_id, file_audio)
# Load audio
waveform, sample_rate = torchaudio.load(file_audio)
# Load text
if fileid in id2txt_dict.keys():
transcript = id2txt_dict[fileid]
transcript = transcript.strip().replace(" ", "")
else:
# Translation not found
print("Translation not found for " + fileid)
transcript = None
return (
waveform,
sample_rate,
transcript,
str(speaker_id),
str(fileid),
)
def load_speed_perturbated_aishell1_item(fileid: str, id2txt_dict, id2filedir_dict):
if fileid.startswith("sp"):
# For speed perturbated audio
temp_fileid = fileid.split("-", 1)[-1]
speaker_id = "".join(list(temp_fileid)[6:11])
else:
# For original audio
speaker_id = "".join(list(fileid)[6:11])
# Load audio
file_path = id2filedir_dict[fileid]
waveform, sample_rate = torchaudio.load(file_path)
# Load text
if fileid in id2txt_dict.keys():
transcript = id2txt_dict[fileid]
transcript = transcript.strip().replace(" ", "")
else:
# Translation not found
print("Translation not found for " + fileid)
transcript = None
return (
waveform,
sample_rate,
transcript,
str(speaker_id),
str(fileid),
)
class AISHELL1(Dataset):
"""Create a Dataset for AISHELL1.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from,
or the type of the dataset to dowload.
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"LibriSpeech"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
_txt_file_name = "aishell_transcript_v0.8.txt"
_ext_audio = ".wav"
FOLDER_IN_ARCHIVE = "AISHELL1/data_aishell/wav"
TRANSCROPT_IN_ARCHIVE = "AISHELL1/data_aishell/transcript"
def __init__(self, root, split):
if split in [
"train",
"dev",
"test",
]:
print("Valid data split detected.")
basename = split
root = os.fspath(root)
folder_in_archive = os.path.join(self.FOLDER_IN_ARCHIVE, basename)
self._path = os.path.join(root, folder_in_archive) # Obtain target wav path
self._walker = sorted(
str(p.stem) for p in Path(self._path).glob("*/*" + self._ext_audio)
) # Traverse all samples
self._txt_file_path = os.path.join(
root, self.TRANSCROPT_IN_ARCHIVE, self._txt_file_name
)
self._id2txt_dict = dict()
with open(self._txt_file_path, "r") as ft:
for line in ft:
uttid, text = line.strip().split(" ", 1)
self._id2txt_dict[uttid] = text
def __getitem__(self, n: int):
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, int, int, int):
``(waveform, sample_rate, transcript, speaker_id, chapter_id, utterance_id)``
"""
fileid = self._walker[n]
return load_aishell1_item(
fileid, self._path, self._ext_audio, self._id2txt_dict
)
def __len__(self) -> int:
return len(self._walker)
class SpeedPerturbatedAISHELL1(Dataset):
"""Create a Dataset for AISHELL1.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from,
or the type of the dataset to dowload.
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"LibriSpeech"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
_txt_file_name = "text"
_scp_file_name = "wav.scp"
_ext_audio = ".wav"
FOLDER_IN_ARCHIVE = "dump/raw"
def __init__(self, root, split):
if split in ["dev", "test", "train_sp"]:
print("Valid data split detected.")
basename = split
root = os.fspath(root)
folder_in_archive = os.path.join(self.FOLDER_IN_ARCHIVE, basename)
self._path = os.path.join(root, folder_in_archive)
# Register path
self._walker = []
self.id2filedir_dict = dict()
self._scp_file_path = os.path.join(root, folder_in_archive, self._scp_file_name)
with open(self._scp_file_path) as fp:
for line in fp:
uttid, utt_dir = line.strip().split(" ", 1)
if uttid.startswith("sp"):
self.id2filedir_dict[uttid] = os.path.join(root, utt_dir.strip())
else:
self.id2filedir_dict[uttid] = utt_dir.strip()
self._walker.append(uttid.strip())
self._walker = sorted(self._walker)
logging.info("Utterance path registration done")
# Register text
self._id2txt_dict = dict()
self._txt_file_path = os.path.join(root, folder_in_archive, self._txt_file_name)
with open(self._txt_file_path, "r") as ft:
line_cnt = 0
for line in ft:
if uttid in self._walker:
uttid, text = line.strip().split(" ", 1)
self._id2txt_dict[uttid] = text.strip().replace(" ", "")
line_cnt += 1
if line_cnt % 10000 == 0:
logging.info("have processed %d lines" % line_cnt)
logging.info("Vocabulary collection done")
logging.info("Dataset initialization done")
def __getitem__(self, n: int):
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
``(waveform, sample_rate, transcript, speaker_id, chapter_id, utterance_id)``
"""
fileid = self._walker[n]
return load_speed_perturbated_aishell1_item(
fileid, self._id2txt_dict, self.id2filedir_dict
)
def __len__(self) -> int:
return len(self._walker)
def process(args):
input_root = Path(args.input_root).absolute()
out_root = Path(args.output_root).absolute()
out_root.mkdir(exist_ok=True)
# Extract features
feature_root = out_root / "fbank80"
feature_root.mkdir(exist_ok=True)
for split in SPLITS:
print(f"Fetching split {split}...")
dataset = AISHELL1(input_root.as_posix(), split=split)
# dataset = SpeedPerturbatedAISHELL1(input_root.as_posix(), split=split)
print("Extracting log mel filter bank features...")
for wav, sample_rate, _, spk_id, utt_id in tqdm(dataset):
sample_id = utt_id
try:
extract_fbank_features(
wav, sample_rate, feature_root / f"{sample_id}.npy"
)
except:
print("Encounter error for %s" % utt_id)
else:
continue
# Pack features into ZIP
zip_path = out_root / "fbank80.zip"
print("ZIPing features...")
create_zip(feature_root, zip_path)
print("Fetching ZIP manifest...")
audio_paths, audio_lengths = get_zip_manifest(zip_path)
# Generate TSV manifest
print("Generating manifest...")
train_text = []
for split in SPLITS: # conduct for each data split
manifest = {c: [] for c in MANIFEST_COLUMNS}
# dataset = AISHELL1(input_root.as_posix(), split=split)
dataset = SpeedPerturbatedAISHELL1(input_root.as_posix(), split=split)
for _, _, trans, spk_id, utt_id in tqdm(dataset):
if trans is not None and utt_id.strip() in audio_paths.keys():
# Add items one-by-one
sample_id = utt_id
manifest["id"].append(sample_id)
manifest["audio"].append(audio_paths[sample_id])
manifest["n_frames"].append(audio_lengths[sample_id])
manifest["tgt_text"].append(" ".join(list(trans.lower())))
manifest["speaker"].append(spk_id)
save_df_to_tsv(pd.DataFrame.from_dict(manifest), out_root / f"{split}.tsv")
if split.startswith("train"):
train_text.extend(manifest["tgt_text"])
# Generate vocab
vocab_dict = dict()
for line in train_text:
tokens_list = line.strip().split(" ")
for tok in tokens_list:
if tok not in vocab_dict:
vocab_dict[tok] = 1
else:
vocab_dict[tok] += 1
sorted_vocab_dict = {
sort_k: sort_v
for sort_k, sort_v in sorted(
vocab_dict.items(), key=lambda kv: (kv[1], kv[0]), reverse=True
)
}
vocab_file_path = os.path.join(str(out_root), "vocab.txt")
f_vocab = open(vocab_file_path, "w")
f_vocab.write("\t".join([BOS_TOKEN, str(0)]) + "\n")
f_vocab.write("\t".join([PAD_TOKEN, str(0)]) + "\n")
f_vocab.write("\t".join([EOS_TOKEN, str(0)]) + "\n")
f_vocab.write("\t".join([UNK_TOKEN, str(0)]) + "\n")
for idx, (tok, freq) in enumerate(sorted_vocab_dict.items()):
f_vocab.write("\t".join([tok, str(freq)]) + "\n")
f_vocab.close()
# Generate config YAML
gen_config_yaml(out_root, vocab_name=vocab_file_path, specaugment_policy="ld")
# Clean up
shutil.rmtree(feature_root)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input-root",
"-i",
default="/data/LibriSpeech/mlhan_extra_files/",
required=False,
type=str,
) # assign the data output root directory
parser.add_argument(
"--output-root",
"-o",
default="/workspace/fairseq-uni/examples/speech_to_text/egs/aishell1/data/",
required=False,
type=str,
) # assign the data output root directory
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| 11,501
| 31.038997
| 98
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_to_text/prep_ljspeech_data.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from pathlib import Path
import shutil
from tempfile import NamedTemporaryFile
import pandas as pd
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
save_df_to_tsv,
)
from tqdm import tqdm
log = logging.getLogger(__name__)
SPLITS = ["train", "dev", "test"]
MANIFEST_COLUMNS = [
"id",
"audio",
"n_frames",
"tgt_text",
"speaker",
"src_text",
"duration",
"pitch",
"energy",
]
def process(args):
out_root = Path(args.output_root).absolute()
out_root.mkdir(exist_ok=True)
# Read features from zipped files
feats_zip_path = out_root / "logmelspec80.zip"
print("Fetching ZIP manifest...")
audio_paths, audio_lengths = get_zip_manifest(feats_zip_path)
# Generate TSV manifest
print("Generating manifest...")
train_text = []
for split in SPLITS:
# Prepare dataset
print(f"Fetching split {split}...")
dataset = [] # dataset is list of lists
org_tsv_path = str(out_root / split) + ".tsv"
print(org_tsv_path)
f_org_tsv = open(org_tsv_path, "r")
org_headers_in_org_tsv = f_org_tsv.readline().strip().split("\t")
for line in f_org_tsv:
items = line.strip().split("\t")
cur_sample = []
for item in items:
cur_sample.append(item)
dataset.append(cur_sample)
# Check consistency
assert (
org_headers_in_org_tsv == MANIFEST_COLUMNS
), "Please ensure that manifest column headers are consistent."
# Generate manifest files
manifest = {c: [] for c in MANIFEST_COLUMNS}
for cur_sample in dataset:
(
sample_id,
audio,
n_frames,
tgt_text,
spk_id,
src_text,
duration,
pitch,
energy,
) = cur_sample
manifest["id"].append(sample_id)
manifest["audio"].append(audio_paths[sample_id])
manifest["n_frames"].append(audio_lengths[sample_id])
manifest["tgt_text"].append(tgt_text)
manifest["speaker"].append(spk_id)
manifest["src_text"].append(src_text)
manifest["duration"].append(duration)
manifest["pitch"].append(pitch)
manifest["energy"].append(energy)
save_df_to_tsv(
pd.DataFrame.from_dict(manifest), out_root / f"{split}_final.tsv"
)
if split.startswith("train"):
train_text.extend(manifest["tgt_text"])
# Generate config YAML
gen_config_yaml(
out_root,
vocab_name="phoneme_vocab.txt",
specaugment_policy=None,
cmvn_type="global",
gcmvn_path=out_root / "gcmvn_stats.npz",
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--output-root", "-o", required=True, type=str)
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| 3,352
| 26.483607
| 77
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_to_text/seg_mustc_data.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from pathlib import Path
import soundfile as sf
from examples.speech_to_text.prep_mustc_data import MUSTC
from tqdm import tqdm
log = logging.getLogger(__name__)
def main(args):
root = Path(args.data_root).absolute()
lang = args.lang
split = args.split
cur_root = root / f"en-{lang}"
assert cur_root.is_dir(), f"{cur_root.as_posix()} does not exist. Skipped."
dataset = MUSTC(root.as_posix(), lang, split)
output = Path(args.output).absolute()
output.mkdir(exist_ok=True)
f_text = open(output / f"{split}.{lang}", "w")
f_wav_list = open(output / f"{split}.wav_list", "w")
for waveform, sample_rate, _, text, _, utt_id in tqdm(dataset):
sf.write(
output / f"{utt_id}.wav",
waveform.squeeze(0).numpy(),
samplerate=int(sample_rate),
)
f_text.write(text + "\n")
f_wav_list.write(str(output / f"{utt_id}.wav") + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data-root", "-d", required=True, type=str)
parser.add_argument("--task", required=True, type=str, choices=["asr", "st"])
parser.add_argument("--lang", required=True, type=str)
parser.add_argument("--output", required=True, type=str)
parser.add_argument("--split", required=True, choices=MUSTC.SPLITS)
args = parser.parse_args()
main(args)
| 1,622
| 30.823529
| 81
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_to_text/gen_librispeech_vocab.py
|
# @Time : 2022/3/2
# @Author : Minglun Han
# @File : gen_vocab.py
import argparse
import logging
from pathlib import Path
import shutil
from tempfile import NamedTemporaryFile
import pandas as pd
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
save_df_to_tsv,
)
from torchaudio.datasets import LIBRISPEECH
from tqdm import tqdm
# Define data splits
SPLITS = [
"train-clean-100",
"train-clean-360",
"train-other-500",
]
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"]
output_root = (
"/workspace/fairseq-uni/examples/speech_to_text/egs/librispeech/data" or sys.argv[0]
)
vocab_size = 1000 or sys.argv[1]
vocab_type = "unigram" or sys.argv[2]
out_root = Path(output_root).absolute()
out_root.mkdir(exist_ok=True)
# Load text
train_text = []
for split in SPLITS:
manifest = {c: [] for c in MANIFEST_COLUMNS}
dataset = LIBRISPEECH(out_root.as_posix(), url=split)
for _, _, utt, _, _, _ in tqdm(dataset):
manifest["tgt_text"].append(utt.lower())
train_text.extend(manifest["tgt_text"])
# Generate vocabulary
vocab_size = "" if vocab_type == "char" else str(vocab_size)
spm_filename_prefix = f"spm_{vocab_type}{vocab_size}"
with NamedTemporaryFile(mode="w") as f:
for t in train_text:
f.write(t + "\n")
gen_vocab(
Path(f.name),
out_root / spm_filename_prefix,
vocab_type,
vocab_size,
)
| 1,514
| 24.25
| 88
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_to_text/gen_vocab.py
|
# @Time : 2022/3/2
# @Author : Minglun Han
# @File : gen_vocab.py
import argparse
import logging
from pathlib import Path
import shutil
from tempfile import NamedTemporaryFile
import pandas as pd
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
save_df_to_tsv,
)
from torchaudio.datasets import LIBRISPEECH
from tqdm import tqdm
# Define data splits
SPLITS = [
"train-clean-100",
"train-clean-360",
"train-other-500",
]
output_root = sys.argv[0]
vocab_size = sys.argv[1]
vocab_type = sys.argv[2]
out_root = Path(output_root).absolute()
out_root.mkdir(exist_ok=True)
# Load text
train_text = []
for split in SPLITS: # conduct for each data split
manifest = {c: [] for c in MANIFEST_COLUMNS}
dataset = LIBRISPEECH(out_root.as_posix(), url=split)
for _, _, utt, spk_id, chapter_no, utt_no in tqdm(dataset):
# Add items one-by-one
sample_id = f"{spk_id}-{chapter_no}-{utt_no}"
manifest["tgt_text"].append(utt.lower())
if split.startswith("train"):
train_text.extend(manifest["tgt_text"])
# Generate vocabulary
vocab_size = "" if vocab_type == "char" else str(vocab_size)
spm_filename_prefix = f"spm_{vocab_type}{vocab_size}"
with NamedTemporaryFile(mode="w") as f:
for t in train_text:
f.write(t + "\n")
gen_vocab(
Path(f.name),
out_root / spm_filename_prefix,
vocab_type,
vocab_size,
)
| 1,515
| 24.266667
| 63
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_to_text/bert_feat_extract/extract_bert_feats.py
|
# @Time : 2022/8/15
# @Author : Minglun Han
# @File : extract_bert_feats.py
import os
import sys
import argparse
import random
import string
import json
import numpy as np
import torch
from transformers import BertTokenizer, BertModel
"""
Description:
This program is used to extract the features from pretrained models.
You can specify the pretrained model and its vocabulary. The input should be a file of text_id and text pairs.
The outputs will be npy files with text_id as prefix, and a hash table with text_id to feature.npy mapping.
Outputs:
1. ${utterance_id}.npy files;
2. features hash json;
Chinese Pretraining models:
MODEL_NAME MODEL_KEY
Bert-base-chinese bert-base-chinese
"""
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def parse_args():
parse = argparse.ArgumentParser(description="generate data tables")
parse.add_argument(
"--input_text_file_dir",
type=str,
default="/data1/student/mlhan/myprojects/fairseq-uni/examples/speech_to_text/egs/aishell2/data/aishell2.map",
help="directory to texts, format '${utterance_id}\t${text}'",
)
parse.add_argument(
"--split_name", type=str, default="aishell2", help="the split name"
)
parse.add_argument(
"--output_dir",
type=str,
default="/data1/student/mlhan/myprojects/fairseq-uni/examples/speech_to_text/egs/aishell2/bert_feats/bert-base-chinese/",
help="directory used to save outputs",
)
parse.add_argument(
"--pretrained_model",
type=str,
default="bert-base-chinese",
help="determine which pretrained model to be used to extract features",
)
parse.add_argument(
"--pretrained_model_vocab",
type=str,
default="bert-base-chinese",
help="the vocabulary of the pretrained model",
)
parse.add_argument(
"--batch_size",
type=int,
default=256,
help="the batch size for feature extraction",
)
parse.add_argument(
"--lang", "-l", type=str, default="cn", help="the language of text"
)
parse.add_argument("--gpu", action="store_true")
args = parse.parse_args()
return args
def split_and_save(
final_output_dir, utt_id_list, input_ids, attention_mask, last_hidden_states
):
output_list = []
for utt_id, ids, padding_mask, feat in zip(
utt_id_list, input_ids, attention_mask, last_hidden_states
):
cur_dict = dict()
cur_dict["utt_id"] = utt_id
cur_dict["input_ids"] = ids.cpu().detach().numpy().tolist()
cur_dict["padding_mask"] = padding_mask.cpu().detach().numpy().tolist()
cur_dict["length"] = int(padding_mask.sum().cpu().detach().numpy())
cur_output_filename = os.path.join(final_output_dir, utt_id + ".npy")
if not os.path.exists(cur_output_filename):
np.save(cur_output_filename, feat.cpu().detach().numpy())
cur_dict["feat_path"] = cur_output_filename
output_list.append(cur_dict)
return output_list
def main(args):
input_text_file_dir = args.input_text_file_dir
split_name = args.split_name
output_dir = args.output_dir
pretrained_model = args.pretrained_model
pretrained_model_vocab = args.pretrained_model_vocab
lang = args.lang
# Load tokenizer and model
# tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# bert = BertModel.from_pretrained('bert-base-uncased')
print("1. Load pretrained models and vocabulary")
tokenizer = BertTokenizer.from_pretrained(pretrained_model_vocab)
bert = BertModel.from_pretrained(pretrained_model)
if args.gpu:
bert = bert.cuda()
# Prepare output directory
print("2. Create working directory")
final_output_dir = os.path.join(output_dir, split_name)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
if os.path.exists(final_output_dir):
print("features are existing in %s" % final_output_dir)
else:
os.mkdir(final_output_dir)
hash_table_dir = os.path.join(final_output_dir, split_name + "_text_feat" + ".json")
f_hash = open(hash_table_dir, "w")
# Extract features from pretrained models
print("3. Extracting features")
utt_id_list = []
batch_inputs = []
data_list = []
batch_counter = 0
with open(input_text_file_dir, "r") as f:
batch_size_counter = 0
for line in f:
utt_id, text = line.strip().split("\t", 1)
if lang == "cn":
text = text.strip().replace(" ", "") # For Chinese temporarily
else:
text = text.strip()
batch_inputs.append(text)
utt_id_list.append(utt_id)
batch_size_counter += 1
if batch_size_counter % args.batch_size == 0:
# Forward pretrained models
inputs = tokenizer(batch_inputs, return_tensors="pt", padding=True)
if args.gpu:
for k in inputs.keys():
inputs[k] = inputs[k].cuda()
outputs = bert(**inputs)
# Split and save samples
input_ids = inputs["input_ids"] # B x T
attention_mask = inputs["attention_mask"] # B x T
last_hidden_states = outputs.last_hidden_state # B x T x C
output_list = split_and_save(
final_output_dir,
utt_id_list,
input_ids,
attention_mask,
last_hidden_states,
)
data_list.extend(output_list)
# Empty buffers
batch_counter += 1
batch_size_counter = 0
batch_inputs = []
utt_id_list = []
print("have processed %d batches. " % batch_counter)
# Process samples in the last batch
print("4. Process residual batch")
inputs = tokenizer(batch_inputs, return_tensors="pt", padding=True)
if args.gpu:
for k in inputs.keys():
inputs[k] = inputs[k].cuda()
outputs = bert(**inputs)
input_ids = inputs["input_ids"] # B x T
attention_mask = inputs["attention_mask"] # B x T
last_hidden_states = outputs.last_hidden_state # B x T x C
output_list = split_and_save(
final_output_dir, utt_id_list, input_ids, attention_mask, last_hidden_states
)
data_list.extend(output_list)
data_dict = {"data": data_list}
json_data_dict = json.dumps(data_dict, indent=4)
f_hash.write(json_data_dict)
f_hash.close()
print("Feature extraction from pretrained language model is Done.")
if __name__ == "__main__":
args = parse_args()
main(args)
| 6,843
| 32.54902
| 129
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_to_text/simultaneous_translation/agents/fairseq_simul_st_agent.py
|
import math
import os
import json
import numpy as np
import torch
import torchaudio.compliance.kaldi as kaldi
import yaml
from fairseq import checkpoint_utils, tasks
from fairseq.file_io import PathManager
try:
from simuleval import READ_ACTION, WRITE_ACTION, DEFAULT_EOS
from simuleval.agents import SpeechAgent
from simuleval.states import ListEntry, SpeechStates
except ImportError:
print("Please install simuleval 'pip install simuleval'")
SHIFT_SIZE = 10
WINDOW_SIZE = 25
SAMPLE_RATE = 16000
FEATURE_DIM = 80
BOW_PREFIX = "\u2581"
class OnlineFeatureExtractor:
"""
Extract speech feature on the fly.
"""
def __init__(self, args):
self.shift_size = args.shift_size
self.window_size = args.window_size
assert self.window_size >= self.shift_size
self.sample_rate = args.sample_rate
self.feature_dim = args.feature_dim
self.num_samples_per_shift = int(self.shift_size * self.sample_rate / 1000)
self.num_samples_per_window = int(self.window_size * self.sample_rate / 1000)
self.len_ms_to_samples = lambda x: x * self.sample_rate / 1000
self.previous_residual_samples = []
self.global_cmvn = args.global_cmvn
def clear_cache(self):
self.previous_residual_samples = []
def __call__(self, new_samples):
samples = self.previous_residual_samples + new_samples
if len(samples) < self.num_samples_per_window:
self.previous_residual_samples = samples
return
# num_frames is the number of frames from the new segment
num_frames = math.floor(
(len(samples) - self.len_ms_to_samples(self.window_size - self.shift_size))
/ self.num_samples_per_shift
)
# the number of frames used for feature extraction
# including some part of thte previous segment
effective_num_samples = int(
num_frames * self.len_ms_to_samples(self.shift_size)
+ self.len_ms_to_samples(self.window_size - self.shift_size)
)
input_samples = samples[:effective_num_samples]
self.previous_residual_samples = samples[
num_frames * self.num_samples_per_shift :
]
torch.manual_seed(1)
output = kaldi.fbank(
torch.FloatTensor(input_samples).unsqueeze(0),
num_mel_bins=self.feature_dim,
frame_length=self.window_size,
frame_shift=self.shift_size,
).numpy()
output = self.transform(output)
return torch.from_numpy(output)
def transform(self, input):
if self.global_cmvn is None:
return input
mean = self.global_cmvn["mean"]
std = self.global_cmvn["std"]
x = np.subtract(input, mean)
x = np.divide(x, std)
return x
class TensorListEntry(ListEntry):
"""
Data structure to store a list of tensor.
"""
def append(self, value):
if len(self.value) == 0:
self.value = value
return
self.value = torch.cat([self.value] + [value], dim=0)
def info(self):
return {
"type": str(self.new_value_type),
"length": self.__len__(),
"value": "" if type(self.value) is list else self.value.size(),
}
class FairseqSimulSTAgent(SpeechAgent):
speech_segment_size = 40 # in ms, 4 pooling ratio * 10 ms step size
def __init__(self, args):
super().__init__(args)
self.eos = DEFAULT_EOS
self.gpu = getattr(args, "gpu", False)
self.args = args
self.load_model_vocab(args)
if (
getattr(
self.model.decoder.layers[0].encoder_attn, "pre_decision_ratio", None
)
is not None
):
self.speech_segment_size *= self.model.decoder.layers[
0
].encoder_attn.pre_decision_ratio
args.global_cmvn = None
if args.config:
with open(os.path.join(args.data_bin, args.config), "r") as f:
config = yaml.load(f, Loader=yaml.BaseLoader)
if "global_cmvn" in config:
args.global_cmvn = np.load(config["global_cmvn"]["stats_npz_path"])
if args.global_stats:
with PathManager.open(args.global_stats, "r") as f:
global_cmvn = json.loads(f.read())
self.global_cmvn = {
"mean": global_cmvn["mean"],
"std": global_cmvn["stddev"],
}
self.feature_extractor = OnlineFeatureExtractor(args)
self.max_len = args.max_len
self.force_finish = args.force_finish
torch.set_grad_enabled(False)
def build_states(self, args, client, sentence_id):
# Initialize states here, for example add customized entry to states
# This function will be called at beginning of every new sentence
states = SpeechStates(args, client, sentence_id, self)
self.initialize_states(states)
return states
def to_device(self, tensor):
if self.gpu:
return tensor.cuda()
else:
return tensor.cpu()
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument('--model-path', type=str, required=True,
help='path to your pretrained model.')
parser.add_argument("--data-bin", type=str, required=True,
help="Path of data binary")
parser.add_argument("--config", type=str, default=None,
help="Path to config yaml file")
parser.add_argument("--global-stats", type=str, default=None,
help="Path to json file containing cmvn stats")
parser.add_argument("--tgt-splitter-type", type=str, default="SentencePiece",
help="Subword splitter type for target text")
parser.add_argument("--tgt-splitter-path", type=str, default=None,
help="Subword splitter model path for target text")
parser.add_argument("--user-dir", type=str, default="examples/simultaneous_translation",
help="User directory for simultaneous translation")
parser.add_argument("--max-len", type=int, default=200,
help="Max length of translation")
parser.add_argument("--force-finish", default=False, action="store_true",
help="Force the model to finish the hypothsis if the source is not finished")
parser.add_argument("--shift-size", type=int, default=SHIFT_SIZE,
help="Shift size of feature extraction window.")
parser.add_argument("--window-size", type=int, default=WINDOW_SIZE,
help="Window size of feature extraction window.")
parser.add_argument("--sample-rate", type=int, default=SAMPLE_RATE,
help="Sample rate")
parser.add_argument("--feature-dim", type=int, default=FEATURE_DIM,
help="Acoustic feature dimension.")
# fmt: on
return parser
def load_model_vocab(self, args):
filename = args.model_path
if not os.path.exists(filename):
raise IOError("Model file not found: {}".format(filename))
state = checkpoint_utils.load_checkpoint_to_cpu(filename)
task_args = state["cfg"]["task"]
task_args.data = args.data_bin
if args.config is not None:
task_args.config_yaml = args.config
task = tasks.setup_task(task_args)
# build model for ensemble
state["cfg"]["model"].load_pretrained_encoder_from = None
state["cfg"]["model"].load_pretrained_decoder_from = None
self.model = task.build_model(state["cfg"]["model"])
self.model.load_state_dict(state["model"], strict=True)
self.model.eval()
self.model.share_memory()
if self.gpu:
self.model.cuda()
# Set dictionary
self.dict = {}
self.dict["tgt"] = task.target_dictionary
def initialize_states(self, states):
self.feature_extractor.clear_cache()
states.units.source = TensorListEntry()
states.units.target = ListEntry()
states.incremental_states = dict()
def segment_to_units(self, segment, states):
# Convert speech samples to features
features = self.feature_extractor(segment)
if features is not None:
return [features]
else:
return []
def units_to_segment(self, units, states):
# Merge sub word to full word.
if self.model.decoder.dictionary.eos() == units[0]:
return DEFAULT_EOS
segment = []
if None in units.value:
units.value.remove(None)
for index in units:
if index is None:
units.pop()
token = self.model.decoder.dictionary.string([index])
if token.startswith(BOW_PREFIX):
if len(segment) == 0:
segment += [token.replace(BOW_PREFIX, "")]
else:
for j in range(len(segment)):
units.pop()
string_to_return = ["".join(segment)]
if self.model.decoder.dictionary.eos() == units[0]:
string_to_return += [DEFAULT_EOS]
return string_to_return
else:
segment += [token.replace(BOW_PREFIX, "")]
if (
len(units) > 0
and self.model.decoder.dictionary.eos() == units[-1]
or len(states.units.target) > self.max_len
):
tokens = [self.model.decoder.dictionary.string([unit]) for unit in units]
return ["".join(tokens).replace(BOW_PREFIX, "")] + [DEFAULT_EOS]
return None
def update_model_encoder(self, states):
if len(states.units.source) == 0:
return
src_indices = self.to_device(states.units.source.value.unsqueeze(0))
src_lengths = self.to_device(
torch.LongTensor([states.units.source.value.size(0)])
)
states.encoder_states = self.model.encoder(src_indices, src_lengths)
torch.cuda.empty_cache()
def update_states_read(self, states):
# Happens after a read action.
self.update_model_encoder(states)
def policy(self, states):
if not getattr(states, "encoder_states", None):
return READ_ACTION
tgt_indices = self.to_device(
torch.LongTensor(
[self.model.decoder.dictionary.eos()]
+ [x for x in states.units.target.value if x is not None]
).unsqueeze(0)
)
states.incremental_states["steps"] = {
"src": states.encoder_states["encoder_out"][0].size(0),
"tgt": 1 + len(states.units.target),
}
states.incremental_states["online"] = {
"only": torch.tensor(not states.finish_read())
}
x, outputs = self.model.decoder.forward(
prev_output_tokens=tgt_indices,
encoder_out=states.encoder_states,
incremental_state=states.incremental_states,
)
states.decoder_out = x
states.decoder_out_extra = outputs
torch.cuda.empty_cache()
if outputs.action == 0:
return READ_ACTION
else:
return WRITE_ACTION
def predict(self, states):
decoder_states = states.decoder_out
lprobs = self.model.get_normalized_probs(
[decoder_states[:, -1:]], log_probs=True
)
index = lprobs.argmax(dim=-1)
index = index[0, 0].item()
if (
self.force_finish
and index == self.model.decoder.dictionary.eos()
and not states.finish_read()
):
# If we want to force finish the translation
# (don't stop before finish reading), return a None
# self.model.decoder.clear_cache(states.incremental_states)
index = None
return index
| 12,271
| 32.347826
| 105
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_recognition/ctc_decoder.py
|
# @Time : 2021/7/26
# @Author : Minglun Han
# @File : ctc_decoder.py
import os
import sys
import torch
import random
import logging
import torch.nn.functional as F
import numpy as np
import itertools as it
# Control print options
torch.set_printoptions(profile="full")
torch.set_printoptions(profile="default")
np.set_printoptions(threshold=sys.maxsize)
class CtcDecoder(object):
def __init__(self, args, tgt_dict):
self.tgt_dict = tgt_dict
self.vocab_size = len(tgt_dict)
self.beam = args.beam
# Get the index of special tokens
self.blank = (
tgt_dict.index("<ctc_blank>")
if "<ctc_blank>" in tgt_dict.indices
else tgt_dict.bos()
) # if <ctc_blank> in dictionary, use its index else use bos token's index
self.bos = tgt_dict.bos()
self.eos = tgt_dict.eos()
self.pad = tgt_dict.pad()
if self.beam == 1:
logging.info("employ ctc greedy decoder")
self.decode = self.batch_greedy_decode
else:
raise NotImplementedError("Not supported options!")
def generate(self, models, sample, **unused):
"""Generate a batch of inferences."""
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
model_inputs = {
k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
} # remove prev_output_tokens
# Forward encoder
ctc_logits, encoder_outputs_padding_mask = models[0].get_ctc_output(
src_tokens=model_inputs["src_tokens"],
src_lengths=model_inputs["src_lengths"],
)
# Obtain log-probabilities and conduct decoding
ctc_log_probs = models[0].get_probs_from_logits(ctc_logits, log_probs=True)
beam_results, beam_scores, out_seqlens = self.decode(
ctc_log_probs, encoder_outputs_padding_mask
)
return self.generate_hypos(
beam_results=beam_results,
beam_scores=beam_scores,
out_seqlens=out_seqlens,
)
def generate_hypos(self, beam_results, beam_scores, out_seqlens):
hypos = []
for beam_result, scores, lengths in zip(beam_results, beam_scores, out_seqlens):
# beam_ids: beam x id; score: beam; length: beam
top = []
for result, score, length in zip(beam_result, scores, lengths):
top.append({"tokens": self.get_tokens(result[:length]), "score": score})
hypos.append(top)
return hypos
def get_tokens(self, idxs):
"""
Normalize tokens by handling CTC blank, ASG replabels, etc.
"""
# Remove blank id and eos id
idxs = (g[0] for g in it.groupby(idxs))
idxs = filter(lambda x: x != self.blank, idxs)
idxs = filter(lambda x: x != self.eos, idxs)
return torch.LongTensor(list(idxs))
def batch_greedy_decode(self, ctc_log_probs, encoder_outputs_padding_mask):
"""
:param model: the model in usage
:param ctc_log_probs: the log probabilities of ctc outputs
:return: prev_tokens, out_seqlens, scores
"""
# Get the maximum length of decoding steps
batch_size, max_ctc_outputs_len, _ = ctc_log_probs.size()
input_lengths = (~encoder_outputs_padding_mask).int().sum(-1)
# Acquire output seqlens and scores
out_seqlens = []
scores = []
for sample_id in range(batch_size):
# Acquire current sample's ctc log probabilities
cur_sample_encoder_out_len = input_lengths[sample_id]
# print(cur_sample_encoder_out_len)
cur_ctc_log_probs = ctc_log_probs[sample_id, :cur_sample_encoder_out_len, :]
# cur_sample_encoder_out_len x V
# print(cur_ctc_log_probs.size())
cur_score = cur_ctc_log_probs.max(dim=-1)[0].sum().item() # 1
cur_toks = cur_ctc_log_probs.argmax(
dim=-1
).unique_consecutive() # cur_sample_encoder_out_len
cur_toks = cur_toks[cur_toks != self.blank]
cur_out_seqlen = cur_toks.size(0)
scores.append(cur_score)
out_seqlens.append(cur_out_seqlen)
# Acquire output hypotheses
scores = torch.tensor(scores)
out_seqlens = torch.tensor(out_seqlens)
prev_tokens = []
max_output_seqlen = out_seqlens.max().item()
for sample_id in range(batch_size):
cur_sample_encoder_out_len = input_lengths[sample_id]
cur_ctc_log_probs = ctc_log_probs[sample_id, :cur_sample_encoder_out_len, :]
cur_toks = cur_ctc_log_probs.argmax(dim=-1)
# print(cur_toks)
cur_toks = cur_toks.unique_consecutive()
# print(cur_toks)
cur_toks = cur_toks[cur_toks != self.blank]
# print(cur_toks)
cur_out_seqlen = cur_toks.size(0)
padding_tensor = (
(torch.ones([max_output_seqlen - cur_out_seqlen]) * self.tgt_dict.pad())
.long()
.cuda()
)
sample_pred = torch.unsqueeze(
torch.cat([cur_toks, padding_tensor], dim=0), dim=0
)
prev_tokens.append(sample_pred)
sys.exit(0)
prev_tokens = torch.cat(prev_tokens, dim=0)
# Reform outputs
prev_tokens = torch.unsqueeze(prev_tokens, dim=1) # B x 1 x T
out_seqlens = torch.unsqueeze(out_seqlens, dim=-1) # B x 1
scores = torch.unsqueeze(scores, dim=-1) # B x 1
return prev_tokens, scores, out_seqlens
| 5,750
| 35.169811
| 88
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_recognition/__init__.py
|
from . import criterions, models, tasks # noqa
| 48
| 23.5
| 47
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_recognition/infer.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Run inference for pre-processed data with a trained model.
"""
import ast
import logging
import math
import os
import sys
import edlib
import editdistance
import numpy as np
import torch
from fairseq import checkpoint_utils, options, progress_bar, tasks, utils
from fairseq.data.data_utils import post_process
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from transformers import BertTokenizer, BertModel, BertLayer
logging.basicConfig()
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
np.set_printoptions(threshold=10000000)
torch.set_printoptions(profile="full")
def add_asr_eval_argument(parser):
parser.add_argument("--kspmodel", default=None, help="sentence piece model")
parser.add_argument(
"--wfstlm", default=None, help="wfstlm on dictonary output units"
)
parser.add_argument(
"--rnnt_decoding_type",
default="greedy",
help="wfstlm on dictonary output units",
)
try:
parser.add_argument(
"--lm-weight",
"--lm_weight",
type=float,
default=0.2,
help="weight for lm while interpolating with neural score",
)
except:
pass
parser.add_argument(
"--rnnt_len_penalty", default=-0.5, help="rnnt length penalty on word level"
)
parser.add_argument(
"--w2l-decoder",
choices=["viterbi", "kenlm", "fairseqlm"],
help="use a w2l decoder",
)
parser.add_argument("--lexicon", help="lexicon for w2l decoder")
parser.add_argument("--unit-lm", action="store_true", help="if using a unit lm")
parser.add_argument("--kenlm-model", "--lm-model", help="lm model for w2l decoder")
parser.add_argument("--beam-threshold", type=float, default=25.0)
parser.add_argument("--beam-size-token", type=float, default=100)
parser.add_argument("--word-score", type=float, default=1.0)
parser.add_argument("--unk-weight", type=float, default=-math.inf)
parser.add_argument("--sil-weight", type=float, default=0.0)
parser.add_argument(
"--dump-emissions",
type=str,
default=None,
help="if present, dumps emissions into this file and exits",
)
parser.add_argument(
"--dump-features",
type=str,
default=None,
help="if present, dumps features into this file and exits",
)
parser.add_argument(
"--load-emissions",
type=str,
default=None,
help="if present, loads emissions from this file",
)
# Other decoder settings
parser.add_argument(
"--cif-decoder",
choices=["cif"],
help="use a cif decoder",
)
parser.add_argument(
"--cif-decoder-mode",
choices=["ar", "nar", "fast_ar"],
help="the mode of cif decoder",
)
parser.add_argument(
"--tail-handling-firing-threshold",
type=float,
default=0.5,
help="tail handling firing threshold",
)
parser.add_argument("--ctc-decoder", choices=["ctc"], help="use a ctc decoder")
# Shallow fusion settings
parser.add_argument(
"--use-nnlm", action="store_true", help="use neural language model"
)
parser.add_argument(
"--fetch-nnlm-from",
default="",
)
parser.add_argument("--specified-dict-path", default="")
# Multi-modal decoder settings
parser.add_argument("--use-multimodal-info", action="store_true")
parser.add_argument("--mask-multimodal-feats", action="store_true")
# Transformers tokenizer settings
parser.add_argument("--use-transformers-tokenizer", action="store_true")
parser.add_argument(
"--pretrained-model-vocab", type=str, default="bert-base-uncased"
)
return parser
def check_args(args):
# assert args.path is not None, "--path required for generation!"
# assert args.results_path is not None, "--results_path required for generation!"
assert (
not args.sampling or args.nbest == args.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
args.replace_unk is None or args.raw_text
), "--replace-unk requires a raw text dataset (--raw-text)"
def get_dataset_itr(args, task, models):
return task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.batch_size,
max_positions=(sys.maxsize, sys.maxsize),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
data_buffer_size=args.data_buffer_size,
).next_epoch_itr(shuffle=False)
def process_predictions(
args,
hypos,
sp,
tgt_dict,
target_tokens,
res_files,
speaker,
id,
tokenizer=None,
):
for hypo in hypos[: min(len(hypos), args.nbest)]:
hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu())
if "words" in hypo:
hyp_words = " ".join(hypo["words"])
else:
hyp_words = post_process(
hyp_pieces, args.post_process, args, huggingface_tokenizer=tokenizer
)
if res_files is not None:
print(
"{} ({}-{})".format(hyp_pieces, speaker, id),
file=res_files["hypo.units"],
)
print(
"{} ({}-{})".format(hyp_words, speaker, id),
file=res_files["hypo.words"],
)
tgt_pieces = tgt_dict.string(target_tokens)
tgt_words = post_process(
tgt_pieces, args.post_process, args, huggingface_tokenizer=tokenizer
)
if res_files is not None:
print(
"{} ({}-{})".format(tgt_pieces, speaker, id),
file=res_files["ref.units"],
)
print(
"{} ({}-{})".format(tgt_words, speaker, id),
file=res_files["ref.words"],
)
if not args.quiet:
logger.info("HYPO: " + hyp_words)
logger.info("TARGET: " + tgt_words)
logger.info("HYPO PIECES: " + hyp_pieces)
logger.info("TARGET PIECES: " + tgt_pieces)
logger.info("___________________")
hyp_words = hyp_words.split()
tgt_words = tgt_words.split()
# Get aligned errors
align_stats = edlib.align(hyp_words, tgt_words, task="path")
align_info = align_stats["cigar"]
op_list, len_list = [], []
tmp_len_str = ""
align_special_toks = ["=", "D", "I", "X"]
for char in list(align_info):
if char in align_special_toks:
op_list.append(char)
len_list.append(int(tmp_len_str))
tmp_len_str = ""
else:
tmp_len_str += char
op_dict = tuple(zip(op_list, len_list))
sub_errs, ins_errs, del_errs = 0, 0, 0
for err_type, num in op_dict:
if err_type == "X":
sub_errs += num
elif err_type == "I":
ins_errs += num
elif err_type == "D":
del_errs += num
else:
continue
return (
editdistance.eval(hyp_words, tgt_words),
len(tgt_words),
sub_errs,
ins_errs,
del_errs,
)
def prepare_result_files(args):
def get_res_file(file_prefix):
if args.num_shards > 1:
file_prefix = f"{args.shard_id}_{file_prefix}"
path = os.path.join(
args.results_path,
"{}-{}-{}.txt".format(
file_prefix, os.path.basename(args.path), args.gen_subset
),
)
return open(path, "w", buffering=1)
if not args.results_path:
return None
return {
"hypo.words": get_res_file("hypo.word"),
"hypo.units": get_res_file("hypo.units"),
"ref.words": get_res_file("ref.word"),
"ref.units": get_res_file("ref.units"),
}
def optimize_models(args, use_cuda, models):
"""Optimize ensemble for generation"""
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
def apply_half(t):
if t.dtype is torch.float32:
return t.to(dtype=torch.half)
return t
class ExistingEmissionsDecoder(object):
def __init__(self, decoder, emissions):
self.decoder = decoder
self.emissions = emissions
def generate(self, models, sample, **unused):
ids = sample["id"].cpu().numpy()
try:
emissions = np.stack(self.emissions[ids])
except:
print([x.shape for x in self.emissions[ids]])
raise Exception("invalid sizes")
emissions = torch.from_numpy(emissions)
return self.decoder.decode(emissions)
def main(args, task=None, model_state=None):
check_args(args)
use_fp16 = args.fp16
if args.max_tokens is None and args.batch_size is None:
args.max_tokens = 4000000
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
logger.info("| decoding with criterion {}".format(args.criterion))
task = tasks.setup_task(args)
# Load ensemble
if args.load_emissions:
models, criterions = [], []
task.load_dataset(args.gen_subset)
else:
logger.info("| loading model(s) from {}".format(args.path))
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
utils.split_paths(args.path, separator="\\"),
arg_overrides=ast.literal_eval(args.model_overrides),
task=task,
suffix=args.checkpoint_suffix,
strict=(args.checkpoint_shard_count == 1),
num_shards=args.checkpoint_shard_count,
state=model_state,
)
optimize_models(args, use_cuda, models)
task.load_dataset(args.gen_subset, task_cfg=saved_cfg.task)
# Set dictionary
tgt_dict = task.target_dictionary
logger.info(
"| {} {} {} examples".format(
args.data, args.gen_subset, len(task.dataset(args.gen_subset))
)
)
# hack to pass transitions to W2lDecoder
if args.criterion == "asg_loss":
raise NotImplementedError("asg_loss is currently not supported")
# trans = criterions[0].asg.trans.data
# args.asg_transitions = torch.flatten(trans).tolist()
# Load dataset (possibly sharded)
itr = get_dataset_itr(args, task, models)
# Initialize generator
gen_timer = StopwatchMeter()
def build_generator(args):
w2l_decoder = getattr(args, "w2l_decoder", None)
cif_decoder = getattr(args, "cif_decoder", None)
ctc_decoder = getattr(args, "ctc_decoder", None)
if w2l_decoder is not None:
if w2l_decoder == "viterbi":
from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
return W2lViterbiDecoder(args, task.target_dictionary)
elif w2l_decoder == "kenlm":
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
return W2lKenLMDecoder(args, task.target_dictionary)
elif w2l_decoder == "fairseqlm":
from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder
return W2lFairseqLMDecoder(args, task.target_dictionary)
else:
print(
"only flashlight decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment"
)
elif cif_decoder:
if cif_decoder == "cif":
from examples.speech_recognition.cif_decoder import CifDecoder
return CifDecoder(args, task.target_dictionary)
elif ctc_decoder:
if ctc_decoder == "ctc":
from examples.speech_recognition.ctc_decoder import CtcDecoder
return CtcDecoder(args, task.target_dictionary)
else:
raise NotImplementedError("unsupported options.")
# please do not touch this unless you test both generate.py and infer.py with audio_pretraining task
generator = build_generator(args)
if args.load_emissions:
generator = ExistingEmissionsDecoder(
generator, np.load(args.load_emissions, allow_pickle=True)
)
logger.info("loaded emissions from " + args.load_emissions)
num_sentences = 0
if args.results_path is not None and not os.path.exists(args.results_path):
os.makedirs(args.results_path)
max_source_pos = (
utils.resolve_max_positions(
task.max_positions(), *[model.max_positions() for model in models]
),
)
if max_source_pos is not None:
max_source_pos = max_source_pos[0]
if max_source_pos is not None:
max_source_pos = max_source_pos[0] - 1
if args.dump_emissions:
emissions = {}
if args.dump_features:
features = {}
models[0].bert.proj = None
else:
res_files = prepare_result_files(args)
# logger.info("Model Structure: ")
# logger.info(f"{models[0]}")
# load hugginface tokenizer
tokenizer = None
if args.use_transformers_tokenizer:
tokenizer = BertTokenizer.from_pretrained(args.pretrained_model_vocab)
errs_t, lengths_t = 0, 0
sub_errs_t, del_errs_t, ins_errs_t = 0, 0, 0
with progress_bar.build_progress_bar(args, itr) as t:
wps_meter = TimeMeter()
for sample in t:
sample = utils.move_to_cuda(sample) if use_cuda else sample
if use_fp16:
sample = utils.apply_to_sample(apply_half, sample)
if "net_input" not in sample:
continue
prefix_tokens = None
if args.prefix_size > 0:
prefix_tokens = sample["target"][:, : args.prefix_size]
gen_timer.start()
if args.dump_emissions:
with torch.no_grad():
encoder_out = models[0](**sample["net_input"])
emm = models[0].get_normalized_probs(encoder_out, log_probs=True)
emm = emm.transpose(0, 1).cpu().numpy()
for i, id in enumerate(sample["id"]):
emissions[id.item()] = emm[i]
continue
elif args.dump_features:
with torch.no_grad():
encoder_out = models[0](**sample["net_input"])
feat = encoder_out["encoder_out"].transpose(0, 1).cpu().numpy()
for i, id in enumerate(sample["id"]):
padding = (
encoder_out["encoder_padding_mask"][i].cpu().numpy()
if encoder_out["encoder_padding_mask"] is not None
else None
)
features[id.item()] = (feat[i], padding)
continue
hypos = task.inference_step(generator, models, sample, prefix_tokens)
num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
gen_timer.stop(num_generated_tokens)
for i, sample_id in enumerate(sample["id"].tolist()):
speaker = None
# id = task.dataset(args.gen_subset).ids[int(sample_id)]
id = sample_id
toks = (
sample["target"][i, :]
if "target_label" not in sample
else sample["target_label"][i, :]
)
target_tokens = utils.strip_pad(toks, tgt_dict.pad()).int().cpu()
# Process top predictions
errs, length, sub_errs, del_errs, ins_errs = process_predictions(
args,
hypos[i],
None,
tgt_dict,
target_tokens,
res_files,
speaker,
id,
tokenizer=tokenizer,
)
errs_t += errs
sub_errs_t += sub_errs
del_errs_t += del_errs
ins_errs_t += ins_errs
lengths_t += length
wps_meter.update(num_generated_tokens)
t.log({"wps": round(wps_meter.avg)})
num_sentences += (
sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
)
wer = None
if args.dump_emissions:
emm_arr = []
for i in range(len(emissions)):
emm_arr.append(emissions[i])
np.save(args.dump_emissions, emm_arr)
logger.info(f"saved {len(emissions)} emissions to {args.dump_emissions}")
elif args.dump_features:
feat_arr = []
for i in range(len(features)):
feat_arr.append(features[i])
np.save(args.dump_features, feat_arr)
logger.info(f"saved {len(features)} emissions to {args.dump_features}")
else:
if lengths_t > 0:
wer = errs_t * 100.0 / lengths_t
logger.info(f"WER: {wer}, ERRORS: {errs_t}, TOTAL_REF_LEN: {lengths_t}")
logger.info(
"| Processed {} sentences ({} tokens) in {:.1f}s ({:.2f}"
"sentences/s, {:.2f} tokens/s)".format(
num_sentences,
gen_timer.n,
gen_timer.sum,
num_sentences / gen_timer.sum,
1.0 / gen_timer.avg,
)
)
logger.info("| Generate {} with beam={}".format(args.gen_subset, args.beam))
return task, wer
def make_parser():
parser = options.get_generation_parser()
parser = add_asr_eval_argument(parser)
return parser
def cli_main():
parser = make_parser()
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
| 18,560
| 32.203936
| 115
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_recognition/w2l_decoder.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Flashlight decoders.
"""
import gc
import itertools as it
import os.path as osp
from typing import List
import warnings
from collections import deque, namedtuple
import numpy as np
import torch
from examples.speech_recognition.data.replabels import unpack_replabels
from fairseq import tasks
from fairseq.utils import apply_to_sample
from omegaconf import open_dict
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
try:
from flashlight.lib.text.dictionary import create_word_dict, load_words
from flashlight.lib.sequence.criterion import CpuViterbiPath, get_data_ptr_as_bytes
from flashlight.lib.text.decoder import (
CriterionType,
LexiconDecoderOptions,
KenLM,
LM,
LMState,
SmearingMode,
Trie,
LexiconDecoder,
)
except:
warnings.warn(
"flashlight python bindings are required to use this functionality. Please install from https://github.com/facebookresearch/flashlight/tree/master/bindings/python"
)
LM = object
LMState = object
class W2lDecoder(object):
def __init__(self, args, tgt_dict):
self.tgt_dict = tgt_dict
self.vocab_size = len(tgt_dict)
self.nbest = args.nbest
# criterion-specific init
self.criterion_type = CriterionType.CTC
self.blank = (
tgt_dict.index("<ctc_blank>")
if "<ctc_blank>" in tgt_dict.indices
else tgt_dict.bos()
)
if "<sep>" in tgt_dict.indices:
self.silence = tgt_dict.index("<sep>")
elif "|" in tgt_dict.indices:
self.silence = tgt_dict.index("|")
else:
self.silence = tgt_dict.eos()
self.asg_transitions = None
def generate(self, models, sample, **unused):
"""Generate a batch of inferences."""
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
}
emissions = self.get_emissions(models, encoder_input)
return self.decode(emissions)
def get_emissions(self, models, encoder_input):
"""Run encoder and normalize emissions"""
model = models[0]
encoder_out = model(**encoder_input)
if hasattr(model, "get_logits"):
emissions = model.get_logits(encoder_out) # no need to normalize emissions
else:
emissions = model.get_normalized_probs(encoder_out, log_probs=True)
return emissions.transpose(0, 1).float().cpu().contiguous()
def get_tokens(self, idxs):
"""Normalize tokens by handling CTC blank, ASG replabels, etc."""
idxs = (g[0] for g in it.groupby(idxs))
idxs = filter(lambda x: x != self.blank, idxs)
return torch.LongTensor(list(idxs))
class W2lViterbiDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
if self.asg_transitions is None:
transitions = torch.FloatTensor(N, N).zero_()
else:
transitions = torch.FloatTensor(self.asg_transitions).view(N, N)
viterbi_path = torch.IntTensor(B, T)
workspace = torch.ByteTensor(CpuViterbiPath.get_workspace_size(B, T, N))
CpuViterbiPath.compute(
B,
T,
N,
get_data_ptr_as_bytes(emissions),
get_data_ptr_as_bytes(transitions),
get_data_ptr_as_bytes(viterbi_path),
get_data_ptr_as_bytes(workspace),
)
return [
[{"tokens": self.get_tokens(viterbi_path[b].tolist()), "score": 0}]
for b in range(B)
]
class W2lKenLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.unit_lm = getattr(args, "unit_lm", False)
if args.lexicon:
self.lexicon = load_words(args.lexicon)
self.word_dict = create_word_dict(self.lexicon)
self.unk_word = self.word_dict.get_index("<unk>")
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.trie = Trie(self.vocab_size, self.silence)
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
word_idx = self.word_dict.get_index(word)
_, score = self.lm.score(start_state, word_idx)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (
tgt_dict.unk() not in spelling_idxs
), f"{spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = LexiconDecoderOptions(
beam_size=args.beam,
beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))),
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
word_score=args.word_score,
unk_score=args.unk_weight,
sil_score=args.sil_weight,
log_add=False,
criterion_type=self.criterion_type,
)
if self.asg_transitions is None:
N = 768
# self.asg_transitions = torch.FloatTensor(N, N).zero_()
self.asg_transitions = []
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
self.asg_transitions,
self.unit_lm,
)
else:
assert (
args.unit_lm
), "lexicon free decoding can only be done with a unit language model"
from flashlight.lib.text.decoder import (
LexiconFreeDecoder,
LexiconFreeDecoderOptions,
)
d = {w: [[w]] for w in tgt_dict.symbols}
self.word_dict = create_word_dict(d)
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.decoder_opts = LexiconFreeDecoderOptions(
beam_size=args.beam,
beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))),
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
sil_score=args.sil_weight,
log_add=False,
criterion_type=self.criterion_type,
)
self.decoder = LexiconFreeDecoder(
self.decoder_opts, self.lm, self.silence, self.blank, []
)
def get_timesteps(self, token_idxs: List[int]) -> List[int]:
"""Returns frame numbers corresponding to every non-blank token.
Parameters
----------
token_idxs : List[int]
IDs of decoded tokens.
Returns
-------
List[int]
Frame numbers corresponding to every non-blank token.
"""
timesteps = []
for i, token_idx in enumerate(token_idxs):
if token_idx == self.blank:
continue
if i == 0 or token_idx != token_idxs[i - 1]:
timesteps.append(i)
return timesteps
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
hypos.append(
[
{
"tokens": self.get_tokens(result.tokens),
"score": result.score,
"timesteps": self.get_timesteps(result.tokens),
"words": [
self.word_dict.get_entry(x) for x in result.words if x >= 0
],
}
for result in nbest_results
]
)
return hypos
FairseqLMState = namedtuple("FairseqLMState", ["prefix", "incremental_state", "probs"])
class FairseqLM(LM):
def __init__(self, dictionary, model):
LM.__init__(self)
self.dictionary = dictionary
self.model = model
self.unk = self.dictionary.unk()
self.save_incremental = False # this currently does not work properly
self.max_cache = 20_000
model.cuda()
model.eval()
model.make_generation_fast_()
self.states = {}
self.stateq = deque()
def start(self, start_with_nothing):
state = LMState()
prefix = torch.LongTensor([[self.dictionary.eos()]])
incremental_state = {} if self.save_incremental else None
with torch.no_grad():
res = self.model(prefix.cuda(), incremental_state=incremental_state)
probs = self.model.get_normalized_probs(res, log_probs=True, sample=None)
if incremental_state is not None:
incremental_state = apply_to_sample(lambda x: x.cpu(), incremental_state)
self.states[state] = FairseqLMState(
prefix.numpy(), incremental_state, probs[0, -1].cpu().numpy()
)
self.stateq.append(state)
return state
def score(self, state: LMState, token_index: int, no_cache: bool = False):
"""
Evaluate language model based on the current lm state and new word
Parameters:
-----------
state: current lm state
token_index: index of the word
(can be lexicon index then you should store inside LM the
mapping between indices of lexicon and lm, or lm index of a word)
Returns:
--------
(LMState, float): pair of (new state, score for the current word)
"""
curr_state = self.states[state]
def trim_cache(targ_size):
while len(self.stateq) > targ_size:
rem_k = self.stateq.popleft()
rem_st = self.states[rem_k]
rem_st = FairseqLMState(rem_st.prefix, None, None)
self.states[rem_k] = rem_st
if curr_state.probs is None:
new_incremental_state = (
curr_state.incremental_state.copy()
if curr_state.incremental_state is not None
else None
)
with torch.no_grad():
if new_incremental_state is not None:
new_incremental_state = apply_to_sample(
lambda x: x.cuda(), new_incremental_state
)
elif self.save_incremental:
new_incremental_state = {}
res = self.model(
torch.from_numpy(curr_state.prefix).cuda(),
incremental_state=new_incremental_state,
)
probs = self.model.get_normalized_probs(
res, log_probs=True, sample=None
)
if new_incremental_state is not None:
new_incremental_state = apply_to_sample(
lambda x: x.cpu(), new_incremental_state
)
curr_state = FairseqLMState(
curr_state.prefix, new_incremental_state, probs[0, -1].cpu().numpy()
)
if not no_cache:
self.states[state] = curr_state
self.stateq.append(state)
score = curr_state.probs[token_index].item()
trim_cache(self.max_cache)
outstate = state.child(token_index)
if outstate not in self.states and not no_cache:
prefix = np.concatenate(
[curr_state.prefix, torch.LongTensor([[token_index]])], -1
)
incr_state = curr_state.incremental_state
self.states[outstate] = FairseqLMState(prefix, incr_state, None)
if token_index == self.unk:
score = float("-inf")
return outstate, score
def finish(self, state: LMState):
"""
Evaluate eos for language model based on the current lm state
Returns:
--------
(LMState, float): pair of (new state, score for the current word)
"""
return self.score(state, self.dictionary.eos())
def empty_cache(self):
self.states = {}
self.stateq = deque()
gc.collect()
class W2lFairseqLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.unit_lm = getattr(args, "unit_lm", False)
self.lexicon = load_words(args.lexicon) if args.lexicon else None
self.idx_to_wrd = {}
checkpoint = torch.load(args.kenlm_model, map_location="cpu")
if "cfg" in checkpoint and checkpoint["cfg"] is not None:
lm_args = checkpoint["cfg"]
else:
lm_args = convert_namespace_to_omegaconf(checkpoint["args"])
with open_dict(lm_args.task):
lm_args.task.data = osp.dirname(args.kenlm_model)
task = tasks.setup_task(lm_args.task)
model = task.build_model(lm_args.model)
model.load_state_dict(checkpoint["model"], strict=False)
self.trie = Trie(self.vocab_size, self.silence)
self.word_dict = task.dictionary
self.unk_word = self.word_dict.unk()
self.lm = FairseqLM(self.word_dict, model)
if self.lexicon:
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
if self.unit_lm:
word_idx = i
self.idx_to_wrd[i] = word
score = 0
else:
word_idx = self.word_dict.index(word)
_, score = self.lm.score(start_state, word_idx, no_cache=True)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (
tgt_dict.unk() not in spelling_idxs
), f"{spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = LexiconDecoderOptions(
beam_size=args.beam,
beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))),
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
word_score=args.word_score,
unk_score=args.unk_weight,
sil_score=args.sil_weight,
log_add=False,
criterion_type=self.criterion_type,
)
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
[],
self.unit_lm,
)
else:
assert (
args.unit_lm
), "lexicon free decoding can only be done with a unit language model"
from flashlight.lib.text.decoder import (
LexiconFreeDecoder,
LexiconFreeDecoderOptions,
)
d = {w: [[w]] for w in tgt_dict.symbols}
self.word_dict = create_word_dict(d)
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.decoder_opts = LexiconFreeDecoderOptions(
beam_size=args.beam,
beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))),
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
sil_score=args.sil_weight,
log_add=False,
criterion_type=self.criterion_type,
)
self.decoder = LexiconFreeDecoder(
self.decoder_opts, self.lm, self.silence, self.blank, []
)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
def idx_to_word(idx):
if self.unit_lm:
return self.idx_to_wrd[idx]
else:
return self.word_dict[idx]
def make_hypo(result):
hypo = {"tokens": self.get_tokens(result.tokens), "score": result.score}
if self.lexicon:
hypo["words"] = [idx_to_word(x) for x in result.words if x >= 0]
return hypo
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
hypos.append([make_hypo(result) for result in nbest_results])
self.lm.empty_cache()
return hypos
| 17,561
| 34.336016
| 171
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_recognition/cif_decoder.py
|
# @Time : 2021/7/14
# @Author : Minglun Han
# @File : cif_decoder.py
"""""
Update:
By 2022/06/19
1. support LM decoding with language model by shallow fusion;
""" ""
import os
import sys
import torch
import logging
import numpy as np
import itertools as it
from torch import Tensor
import torch.nn.functional as F
from typing import Dict, Tuple, List, Optional
from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils
np.set_printoptions(threshold=10000000)
torch.set_printoptions(profile="full")
class CifDecoder(object):
def __init__(self, args, tgt_dict):
self.tgt_dict = tgt_dict
self.vocab_size = len(tgt_dict)
self.nbest = args.nbest
self.beam = args.beam
self.tail_handling_firing_threshold = args.tail_handling_firing_threshold
# Obtain ids of special tokens
self.blank = (
tgt_dict.index("<ctc_blank>")
if "<ctc_blank>" in tgt_dict.indices
else tgt_dict.bos()
)
self.bos = tgt_dict.bos()
self.eos = tgt_dict.eos()
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.cif_decoder_mode = args.cif_decoder_mode
self.use_nnlm = args.use_nnlm
self.fetch_nnlm_from = args.fetch_nnlm_from
self.lm_weight = args.lm_weight
self.specified_dict_path = args.specified_dict_path
# Load language model
self.lm_decoder = None
if self.use_nnlm:
logging.info("load language model from %s" % self.fetch_nnlm_from)
state = checkpoint_utils.load_checkpoint_to_cpu(self.fetch_nnlm_from)
# build task
cfg = None
if "args" in state and state["args"] is not None:
cfg = convert_namespace_to_omegaconf(state["args"])
elif "cfg" in state and state["cfg"] is not None:
cfg = state["cfg"]
assert cfg is not None, "Configuration is None"
cfg.task.data = self.specified_dict_path
task = tasks.setup_task(cfg.task)
if "task_state" in state:
task.load_state_dict(state["task_state"])
# build model & load model parameters
model = task.build_model(cfg.model)
model.load_state_dict(
state["model"],
strict=True,
model_cfg=cfg.model,
)
if args.fp16:
model.half()
model.cuda()
model.eval()
# register language model
self.lm_decoder = model
# # Check: inspect LM loading process and LM model
# logging.info(" Checking language model ...... ")
# dummy_inputs = torch.tensor(
# [[2,38,817,72,220,80,594,168,
# 29,19,17,42,146,518,436]]
# ).cuda() # For validation
# # dummy_inputs = torch.tensor(
# # [[2, 320, 1018, 1090, 553]]
# # ).cuda() # For training
# dummy_lm_logits, _ = self.lm_decoder(src_tokens=dummy_inputs)
# dummy_preds = dummy_lm_logits.max(-1).indices
# dummy_logprobs = utils.log_softmax(
# dummy_lm_logits.float(), dim=-1)
# nonmean_dummy_nll_loss = F.nll_loss(
# dummy_logprobs[0], dummy_inputs[0],
# ignore_index=self.pad, reduction="none")
# dummy_nll_loss = F.nll_loss(
# dummy_logprobs[0], dummy_inputs[0],
# ignore_index=self.pad, reduction="mean")
# logging.info(f"dummy_inputs: {dummy_inputs[0, 1:]}")
# logging.info(f"dummy_preds: {dummy_preds[0]}")
# logging.info(f"dummy_nll_loss: {dummy_nll_loss}")
# logging.info(f"nonmean_dummy_nll_loss: {nonmean_dummy_nll_loss}")
# logging.info(f"Language model inspection is done.")
if self.beam == 1:
if self.cif_decoder_mode == "ar":
logging.info("employ ar greedy decoder")
self.decode = self.ar_batch_greedy_decode
elif self.cif_decoder_mode == "fast_ar":
logging.info("employ ar fast greedy decoder")
self.decode = self.ar_fast_batch_greedy_decode
else:
logging.info("employ nar greedy decoder")
# self.decode = self.nar_batch_greedy_decode
self.decode = self.nar_batch_parallel_greedy_decode
# Parallel Greedy Decoding which is better for NAR decoder
else:
if self.cif_decoder_mode == "ar":
logging.info("employ ar beam decoder")
self.decode = self.ar_batch_beam_decode
elif self.cif_decoder_mode == "fast_ar":
logging.info("employ ar fast beam decoder")
self.decode = self.ar_fast_batch_beam_decode
else:
logging.info("employ nar beam decoder")
self.decode = self.nar_batch_beam_decode
def generate(self, models, sample, **kwargs):
"""Generate a batch of inferences."""
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
# Prepare model inputs
model_inputs = {
k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
} # remove prev_output_tokens
# Forward encoder and cif
if self.tail_handling_firing_threshold:
models[
0
].encoder.cif.tail_handling_firing_threshold = (
self.tail_handling_firing_threshold
)
cif_outputs = models[0].get_cif_output(
src_tokens=model_inputs["src_tokens"],
src_lengths=model_inputs["src_lengths"],
target_lengths=sample["target_lengths"],
)
# Decode
beam_results, beam_scores, out_seqlens = self.decode(models[0], cif_outputs)
# Truncate at <eos>
tmp_beam_results = []
bsz, beam_size, max_len = beam_results.size()
beam_results = beam_results.view((bsz * beam_size), -1) # (B * beam_size) x T
for n in range(bsz):
cur_res = beam_results[n] # T
eos_inds = (cur_res == 2).nonzero()
if len(eos_inds) > 0:
cur_max_valid_len = eos_inds[0][0]
else:
cur_max_valid_len = max_len
cur_res = cur_res[:cur_max_valid_len]
pad_len = max_len - cur_max_valid_len
cur_res = torch.cat(
[cur_res, torch.tensor([self.pad for _ in range(pad_len)]).cuda()],
dim=0,
)
tmp_beam_results.append(cur_res.unsqueeze(0))
beam_results = torch.cat(tmp_beam_results, dim=0).view(bsz, beam_size, -1)
return self.generate_hypos(
beam_results=beam_results,
beam_scores=beam_scores,
out_seqlens=out_seqlens,
)
def generate_hypos(self, beam_results, beam_scores, out_seqlens):
hypos = []
for beam_result, scores, lengths in zip(beam_results, beam_scores, out_seqlens):
# beam_ids: beam x id; score: beam; length: beam
top = []
for result, score, length in zip(beam_result, scores, lengths):
top.append({"tokens": self.get_tokens(result[:length]), "score": score})
hypos.append(top)
return hypos
def get_tokens(self, idxs):
"""Normalize tokens by handling CTC blank, ASG replabels, etc."""
# Remove blank id and eos id
# idxs = (g[0] for g in it.groupby(idxs)) # remove repetition
idxs = filter(lambda x: x != self.blank, idxs)
idxs = filter(lambda x: x != self.eos, idxs)
idxs = filter(lambda x: x != self.pad, idxs)
return torch.LongTensor(list(idxs))
def ar_batch_greedy_decode(self, model, cif_outputs):
"""
:param model: the model in usage
:param cif_outputs: the outputs of cif module
:return: prev_tokens, out_seqlens, scores
"""
# Get Cif outputs
cif_out = cif_outputs["cif_out"]
cif_out_padding_mask = cif_outputs["cif_out_padding_mask"]
raw_encoder_out = cif_outputs["encoder_out"]
raw_encoder_padding_mask = cif_outputs["encoder_padding_mask"]
# Get the maximum length of decoding steps
batch_size, max_decode_length, _ = cif_out.size()
out_seqlens = cif_out_padding_mask.sum(-1) # B
# Initialize previous decoded tokens
prev_tokens = torch.ones([batch_size, 1]).long().cuda() * self.eos
# B x 1, use <eos> as the beginning of sentence (<bos>)
scores = torch.ones([batch_size]).cuda() # B
for step_i in range(max_decode_length):
# Conduct forward of current step t
cur_step_cif_outputs = cif_out[:, : (step_i + 1), :] # B x t x C
cur_step_cif_out_padding_mask = cif_out_padding_mask[
:, : (step_i + 1)
] # B x t
cur_step_cif_out = {
"cif_out": cur_step_cif_outputs,
"cif_out_padding_mask": cur_step_cif_out_padding_mask,
"ctxt_cif_out": None,
"raw_encoder_out": raw_encoder_out,
"raw_encoder_padding_mask": raw_encoder_padding_mask,
}
# Get decoder outputs of current step
decoder_output_i, extra_outputs, _ = model.step_forward_decoder(
prev_decoded_tokens=prev_tokens, cif_outputs=cur_step_cif_out
)
# Update previous decoded tokens & scores
decoder_output_i = model.get_probs_from_logits(
decoder_output_i[:, -1, :], log_probs=False
)
latest_token = torch.argmax(decoder_output_i, dim=-1).unsqueeze(
dim=-1
) # shape = B x 1
prev_tokens = torch.cat([prev_tokens, latest_token], dim=-1)
max_prob_of_last_step = decoder_output_i.max(-1)[0] # shape = B
scores = scores * max_prob_of_last_step
# Reform outputs
prev_tokens = torch.unsqueeze(prev_tokens, dim=1)[:, :, 1:] # B x 1 x T
out_seqlens = torch.unsqueeze(out_seqlens, dim=1) # B x 1
scores = torch.unsqueeze(scores, dim=-1) # B x 1
return prev_tokens, scores, out_seqlens
def ar_fast_batch_greedy_decode(self, model, cif_outputs):
"""
:param model: the model in usage
:param cif_outputs: the outputs of cif module
:return: prev_tokens, out_seqlens, scores
"""
# Get Cif outputs
cif_out = cif_outputs["cif_out"]
cif_out_padding_mask = cif_outputs["cif_out_padding_mask"]
raw_encoder_out = cif_outputs["encoder_out"]
raw_encoder_padding_mask = cif_outputs["encoder_padding_mask"]
# Get the maximum length of decoding steps
batch_size, max_decode_length, _ = cif_out.size()
out_seqlens = cif_out_padding_mask.sum(-1) # B
# Initialize incremental states for fast decoding
incremental_state = torch.jit.annotate(
Dict[str, Dict[str, Optional[Tensor]]], {}
)
# incremental_states is a dictionary of dictionaries of tensors
# Initialize previous decoded tokens
prev_tokens = torch.ones([batch_size, 1]).long().cuda() * self.eos
# B x 1, use <eos> as the beginning of sentence (<bos>)
scores = torch.ones([batch_size]).cuda() # B
for step_i in range(max_decode_length):
# Forward decoder
cur_step_cif_outputs = cif_out[:, : (step_i + 1), :] # B x t x C
cur_step_cif_out_padding_mask = cif_out_padding_mask[
:, : (step_i + 1)
] # B x t
cur_step_cif_out = {
"cif_out": cur_step_cif_outputs,
"cif_out_padding_mask": cur_step_cif_out_padding_mask,
"ctxt_cif_out": None,
"raw_encoder_out": raw_encoder_out,
"raw_encoder_padding_mask": raw_encoder_padding_mask,
}
# Get decoder outputs of current step
decoder_output_i, _, _ = model.step_forward_decoder(
prev_decoded_tokens=prev_tokens,
cif_outputs=cur_step_cif_out,
incremental_state=incremental_state,
)
# This is different from normal decoding process,
# because the historical states are put into buffer
# Update previous decoded tokens
decoder_output_i = model.get_probs_from_logits(
decoder_output_i[:, -1, :], log_probs=False
)
latest_token = torch.argmax(decoder_output_i, dim=-1).unsqueeze(
dim=-1
) # B x 1
prev_tokens = torch.cat([prev_tokens, latest_token], dim=-1)
max_prob_of_last_step = decoder_output_i.max(-1)[0] # B
scores = scores * max_prob_of_last_step
# Reform outputs
prev_tokens = torch.unsqueeze(prev_tokens, dim=1)[:, :, 1:] # B x 1 x T
out_seqlens = torch.unsqueeze(out_seqlens, dim=1) # B x 1
scores = torch.unsqueeze(scores, dim=-1) # B x 1
return prev_tokens, scores, out_seqlens
def ar_batch_beam_decode(self, model, cif_outputs):
"""
:param model: the model in usage
:param cif_outputs: the outputs of cif module
:return: prev_tokens, out_seqlens, scores
"""
cif_out = cif_outputs["cif_out"] # B x T x C
cif_out_padding_mask = cif_outputs["cif_out_padding_mask"] # B x T
raw_encoder_out = None
raw_encoder_padding_mask = None
# Get the maximum length of decoding steps
batch_size, max_decode_length, cif_out_dim = cif_out.size() # B x T x C
out_seqlens = cif_out_padding_mask.sum(-1) # B
# Initialize all needed variables
cif_out = torch.unsqueeze(cif_out, dim=1).repeat(
1, self.beam, 1, 1
) # B x beam_size x T x C
prev_tokens = (
torch.ones([batch_size, self.beam, 1]).long().cuda() * self.eos
) # B x beam_size x 1
scores = torch.zeros([batch_size, self.beam]).float().cuda() # B x beam_size
cif_out_padding_mask = torch.unsqueeze(cif_out_padding_mask, dim=1).repeat(
[1, self.beam, 1]
)
# B x beam_size x T
cif_out = cif_out.view(
[batch_size * self.beam, max_decode_length, cif_out_dim]
) # (B * beam_size) x T x C
prev_tokens = prev_tokens.view(
[batch_size * self.beam, 1]
) # (B * beam_size) x 1
scores = scores.view([batch_size * self.beam]) # (B * beam_size)
cif_out_padding_mask = cif_out_padding_mask.view(
[batch_size * self.beam, max_decode_length]
) # (B * beam_size) x T
if not model.decoder.no_encoder_attn:
raw_encoder_out = cif_outputs["encoder_out"] # T x B x C
raw_encoder_padding_mask = cif_outputs["encoder_padding_mask"] # B x T
max_raw_out_length, _, raw_out_dim = raw_encoder_out.size()
raw_encoder_out = (
raw_encoder_out.transpose(0, 1)
.unsqueeze(dim=1)
.repeat(1, self.beam, 1, 1)
.view(batch_size * self.beam, max_raw_out_length, raw_out_dim)
.transpose(0, 1)
) # T x (B x beam_size) x C
raw_encoder_padding_mask = (
raw_encoder_padding_mask.unsqueeze(dim=1)
.repeat(1, self.beam, 1)
.view(batch_size * self.beam, max_raw_out_length)
) # (B * beam_size) x T
for step_i in range(1, max_decode_length + 1):
# Get cif outputs of current step
cur_step_cif_outputs = cif_out[:, :step_i, :] # (B * beam_size) x t x C
cur_step_cif_out_padding_mask = cif_out_padding_mask[
:, :step_i
] # (B * beam_size) x t
cur_step_cif_out = {
"cif_out": cur_step_cif_outputs,
"cif_out_padding_mask": cur_step_cif_out_padding_mask,
"ctxt_cif_out": None,
"raw_encoder_out": raw_encoder_out,
"raw_encoder_padding_mask": raw_encoder_padding_mask,
}
# Get decoder outputs at step_i
decoder_output_i, extra_outputs, _ = model.step_forward_decoder(
prev_decoded_tokens=prev_tokens, # (B x beam_size) x t
cif_outputs=cur_step_cif_out,
# cif_out: (B * beam_size) x t x C, cif_out_padding_mask: (B * beam_size) x t
) # decoder_output_i has shape [(B * beam_size), t, V]
cur_decoder_output = model.get_probs_from_logits(
decoder_output_i[:, -1, :], log_probs=True
) # [B * beam_size, V]
tmp_scores = scores # Backup scores, with shape [B * beam_size]
scores = scores.unsqueeze(dim=-1).repeat(
[1, self.vocab_size]
) # [B * beam_size, V]
cur_score = cur_decoder_output
# cur_score, with shape [(B x beam_size) x V]
updated_scores = (scores + cur_score).view(
[batch_size, self.beam * self.vocab_size]
) # converted from shape [B * beam_size, V] to [B, beam_size * V]
# Handle the first timestep with special operation
if step_i == 1:
# For the first step, due to the same input token, only consider one beam.
topk_scores, topk_indices = torch.topk(
updated_scores.view([batch_size, self.beam, self.vocab_size])[
:, 0, :
],
k=self.beam,
dim=-1,
)
beam_indices = (
torch.zeros(batch_size, self.beam).long().cuda()
) # [B, beam_size] with all zero elements
fixed_topk_indices = topk_indices # [B, beam_size]
else:
# For all the other beams, due to their inputs are varying, consider all beams.
topk_scores, topk_indices = torch.topk(
updated_scores, k=self.beam, dim=-1
) # topk_scores shape [B, beam_size], topk_indices shape [B, beam_size]
# beam_indices = \
# torch.div(topk_indices, self.vocab_size, rounding_mode='floor') # [B, beam_size]
beam_indices = topk_indices // vocab_size
fixed_topk_indices = topk_indices % self.vocab_size # [B, beam_size]
# Update previous decoded tokens and scores
prev_tokens = prev_tokens.view(
[batch_size, self.beam, -1]
) # [B, beam_size, t]
tmp_scores = tmp_scores.view(
[batch_size, self.beam]
) # previous scores, with shape [B, beam_size]
prev_token_tmp_list = []
scores_tmp_list = []
for n in range(batch_size): # n ranges from 0 to (batch_size - 1)
# Get the max length of current sample
cur_output_maxlen = out_seqlens[n]
# If some sample's decode length is smaller than current step id, keep its score and decoded results
if step_i > cur_output_maxlen:
cur_scores = tmp_scores[n, :] # beam_size
cur_prev_tokens = prev_tokens[n, :, :] # beam_size x t
else:
cur_scores = topk_scores[n, :] # beam_size
cur_prev_tokens = prev_tokens[n, :, :] # beam_size x t
cur_beam_indices = beam_indices[n, :] # beam_size
# Get reformed previous tokens
cur_prev_tokens = torch.index_select(
cur_prev_tokens, dim=0, index=cur_beam_indices
) # beam_size x t
scores_tmp_list.append(cur_scores.unsqueeze(dim=0))
prev_token_tmp_list.append(cur_prev_tokens.unsqueeze(dim=0))
fixed_prev_tokens = torch.cat(prev_token_tmp_list, dim=0)
fixed_topk_indices = torch.where(
step_i <= out_seqlens.unsqueeze(dim=-1).repeat([1, self.beam]),
fixed_topk_indices, # B x beam_size
torch.ones_like(fixed_topk_indices).cuda() * self.pad,
) # Mask locations that outnumber cif max length using <pad>
fixed_topk_indices = fixed_topk_indices.unsqueeze(
dim=-1
) # [B, beam_size, 1]
prev_tokens = torch.cat(
[fixed_prev_tokens, fixed_topk_indices], dim=-1
).view(
[batch_size * self.beam, -1]
) # [B * beam_size, t + 1]
scores = torch.cat(scores_tmp_list, dim=0).view(
[batch_size * self.beam]
) # [B * beam_size]
scores = scores.view([batch_size, self.beam])[:, : self.nbest] # B x beam_size
prev_tokens = prev_tokens.view([batch_size, self.beam, -1])[
:, : self.nbest, 1:
] # B x beam_size x T
out_seqlens = torch.unsqueeze(out_seqlens, dim=-1).repeat(1, self.beam)[
:, : self.nbest
] # B x beam_size
return prev_tokens, scores, out_seqlens
def ar_fast_batch_beam_decode(self, model, cif_outputs):
"""
:param model: the model in usage
:param cif_outputs: the outputs of cif module
:return: prev_tokens, out_seqlens, scores
"""
cif_out = cif_outputs["cif_out"] # B x T x C
cif_out_padding_mask = cif_outputs["cif_out_padding_mask"] # B x T
raw_encoder_out = None
raw_encoder_padding_mask = None
# Get the maximum length of decoding steps
batch_size, max_decode_length, cif_out_dim = cif_out.size() # B x T x C
out_seqlens = cif_out_padding_mask.sum(-1) # B
# Initialize all needed variables
cif_out = torch.unsqueeze(cif_out, dim=1).repeat(
1, self.beam, 1, 1
) # B x beam_size x T x C
prev_tokens = (
torch.ones([batch_size, self.beam, 1]).long().cuda() * self.eos
) # B x beam_size x 1
scores = torch.zeros([batch_size, self.beam]).float().cuda() # B x beam_size
cif_out_padding_mask = torch.unsqueeze(cif_out_padding_mask, dim=1).repeat(
[1, self.beam, 1]
)
# B x beam_size x T
cif_out = cif_out.view(
[batch_size * self.beam, max_decode_length, cif_out_dim]
) # (B * beam_size) x T x C
prev_tokens = prev_tokens.view(
[batch_size * self.beam, 1]
) # (B * beam_size) x 1
scores = scores.view([batch_size * self.beam]) # (B * beam_size)
cif_out_padding_mask = cif_out_padding_mask.view(
[batch_size * self.beam, max_decode_length]
) # (B * beam_size) x T
if not model.decoder.no_encoder_attn:
raw_encoder_out = cif_outputs["encoder_out"] # T x B x C
raw_encoder_padding_mask = cif_outputs["encoder_padding_mask"] # B x T
max_raw_out_length, _, raw_out_dim = raw_encoder_out.size()
raw_encoder_out = (
raw_encoder_out.transpose(0, 1)
.unsqueeze(dim=1)
.repeat(1, self.beam, 1, 1)
.view(batch_size * self.beam, max_raw_out_length, raw_out_dim)
.transpose(0, 1)
) # T x (B x beam_size) x C
raw_encoder_padding_mask = (
raw_encoder_padding_mask.unsqueeze(dim=1)
.repeat(1, self.beam, 1)
.view(batch_size * self.beam, max_raw_out_length)
) # (B * beam_size) x T
# Initialize incremental states for fast decoding
reorder_state = None
lm_reorder_state = None
incremental_state = torch.jit.annotate(
Dict[str, Dict[str, Optional[Tensor]]], {}
)
lm_incremental_state = torch.jit.annotate(
Dict[str, Dict[str, Optional[Tensor]]], {}
)
# incremental_states is a dictionary of dictionaries of tensors
for step_i in range(1, max_decode_length + 1):
# Reorder decoder internal states
if reorder_state is not None:
model.decoder.reorder_incremental_state_scripting(
incremental_state, reorder_state
)
if self.use_nnlm and lm_reorder_state is not None:
self.lm_decoder.decoder.reorder_incremental_state_scripting(
lm_incremental_state, lm_reorder_state
)
# Get cif outputs of current step
cur_step_cif_outputs = cif_out[:, :step_i, :] # (B * beam_size) x t x C
cur_step_cif_out_padding_mask = cif_out_padding_mask[
:, :step_i
] # (B * beam_size) x t
cur_step_cif_out = {
"cif_out": cur_step_cif_outputs,
"cif_out_padding_mask": cur_step_cif_out_padding_mask,
"ctxt_cif_out": None,
"raw_encoder_out": raw_encoder_out,
"raw_encoder_padding_mask": raw_encoder_padding_mask,
}
# Get decoder outputs at step_i
decoder_output_i, extra_outputs, _ = model.step_forward_decoder(
prev_decoded_tokens=prev_tokens,
cif_outputs=cur_step_cif_out,
incremental_state=incremental_state,
)
cur_decoder_output = model.get_probs_from_logits(
decoder_output_i[:, -1, :], log_probs=True
) # [B * beam_size, V]
tmp_scores = scores # Backup scores, with shape [B * beam_size]
scores = scores.unsqueeze(dim=-1).repeat(
[1, self.vocab_size]
) # [B * beam_size, V]
# Forward language model
cur_lm_decoder_output = None
if self.use_nnlm and self.lm_decoder is not None:
lm_decoder_output_i, _ = self.lm_decoder(
src_tokens=prev_tokens,
incremental_state=lm_incremental_state,
)
cur_lm_decoder_output = model.get_probs_from_logits(
lm_decoder_output_i[:, -1, :],
log_probs=True,
) # [B * beam_size, V]
# Update scores
if self.use_nnlm:
cur_score = cur_decoder_output + self.lm_weight * cur_lm_decoder_output
else:
cur_score = cur_decoder_output
# cur_score, with shape [(B x beam_size) x V]
updated_scores = (scores + cur_score).view(
[batch_size, self.beam * self.vocab_size]
) # converted from shape [B * beam_size, V] to [B, beam_size * V]
# Handle the first timestep with special operation
if step_i == 1:
# For the first step, due to the same input token, only consider one beam.
topk_scores, topk_indices = torch.topk(
updated_scores.view([batch_size, self.beam, self.vocab_size])[
:, 0, :
],
k=self.beam,
dim=-1,
)
beam_indices = (
torch.zeros(batch_size, self.beam).long().cuda()
) # [B, beam_size] with all zero elements
fixed_topk_indices = topk_indices # [B, beam_size]
else:
# For all the other steps, due to their inputs are varying, consider all beams.
topk_scores, topk_indices = torch.topk(
updated_scores, k=self.beam, dim=-1
) # topk_scores shape [B, beam_size], topk_indices shape [B, beam_size]
beam_indices = topk_indices // self.vocab_size
fixed_topk_indices = topk_indices % self.vocab_size # [B, beam_size]
stage_index = torch.arange(batch_size) * self.beam
cand_indices = beam_indices + stage_index.unsqueeze(-1).cuda()
reorder_state = cand_indices.view(batch_size * self.beam)
lm_reorder_state = reorder_state
# Update previous decoded tokens and scores
prev_tokens = prev_tokens.view(
[batch_size, self.beam, -1]
) # [B, beam_size, t]
tmp_scores = tmp_scores.view(
[batch_size, self.beam]
) # previous scores, with shape [B, beam_size]
prev_token_tmp_list = []
scores_tmp_list = []
for n in range(batch_size): # n ranges from 0 to (batch_size - 1)
# Get the max length of current sample
cur_output_maxlen = out_seqlens[n]
# If some sample's decode length is smaller than current step id, keep its score and decoded results
if step_i > cur_output_maxlen:
cur_scores = tmp_scores[n, :] # beam_size
cur_prev_tokens = prev_tokens[n, :, :] # beam_size x t
else:
cur_scores = topk_scores[n, :] # beam_size
cur_prev_tokens = prev_tokens[n, :, :] # beam_size x t
cur_beam_indices = beam_indices[n, :] # beam_size
# Get reformed previous tokens
cur_prev_tokens = torch.index_select(
cur_prev_tokens, dim=0, index=cur_beam_indices
) # beam_size x t
scores_tmp_list.append(cur_scores.unsqueeze(dim=0))
prev_token_tmp_list.append(cur_prev_tokens.unsqueeze(dim=0))
fixed_prev_tokens = torch.cat(prev_token_tmp_list, dim=0)
fixed_topk_indices = torch.where(
step_i <= out_seqlens.unsqueeze(dim=-1).repeat([1, self.beam]),
fixed_topk_indices, # B x beam_size
torch.ones_like(fixed_topk_indices).cuda() * self.pad,
) # Mask locations that outnumber cif max length using <pad>
fixed_topk_indices = fixed_topk_indices.unsqueeze(
dim=-1
) # [B, beam_size, 1]
prev_tokens = torch.cat(
[fixed_prev_tokens, fixed_topk_indices], dim=-1
).view(
[batch_size * self.beam, -1]
) # [B * beam_size, t + 1]
scores = torch.cat(scores_tmp_list, dim=0).view(
[batch_size * self.beam]
) # [B * beam_size]
scores = scores.view([batch_size, self.beam])[:, : self.nbest] # B x beam_size
prev_tokens = prev_tokens.view([batch_size, self.beam, -1])[
:, : self.nbest, 1:
] # B x beam_size x T
out_seqlens = torch.unsqueeze(out_seqlens, dim=-1).repeat(1, self.beam)[
:, : self.nbest
] # B x beam_size
return prev_tokens, scores, out_seqlens
def nar_batch_parallel_greedy_decode(self, model, cif_outputs):
"""
:param model: the model in usage
:param cif_outputs: the outputs of cif module
:return: prev_tokens, out_seqlens, scores
"""
# Get cif outputs
cif_out = cif_outputs["cif_out"]
cif_out_padding_mask = cif_outputs["cif_out_padding_mask"]
raw_encoder_out = cif_outputs["encoder_out"]
raw_encoder_padding_mask = cif_outputs["encoder_padding_mask"]
# Get the maximum length of decoding steps
batch_size, max_decode_length, _ = cif_out.size()
out_seqlens = cif_out_padding_mask.sum(-1) # B
# Initialize previous decoded tokens and cif outputs
prev_decoded_tokens = torch.zeros(
[batch_size, max_decode_length]
).long() # B x T
cif_outputs = {
"cif_out": cif_out,
"cif_out_padding_mask": cif_out_padding_mask,
"raw_encoder_out": raw_encoder_out,
"raw_encoder_padding_mask": raw_encoder_padding_mask,
}
decoder_output, _, _ = model.step_forward_decoder(
prev_decoded_tokens=prev_decoded_tokens, cif_outputs=cif_outputs
) # B x T x V
# Update previous decoded tokens
decoder_output = model.get_probs_from_logits(
decoder_output, log_probs=False
) # B x T x V
decoded_tokens = torch.argmax(decoder_output, dim=-1) # B x T
scores = torch.prod(decoder_output.max(-1)[0], dim=-1) # B
# Reform outputs, now prev_tokens has shape B x (T + 1)
prev_tokens = torch.unsqueeze(decoded_tokens, dim=1) # B x 1 x T
out_seqlens = torch.unsqueeze(out_seqlens, dim=1) # B x 1
scores = torch.unsqueeze(scores, dim=-1) # B x 1
return prev_tokens, scores, out_seqlens
def nar_batch_beam_decode(self, model, cif_outputs):
"""
:param model: the model in usage
:param cif_outputs: the outputs of cif module
:return: prev_tokens, out_seqlens, scores
"""
cif_out = cif_outputs["cif_out"] # B x T x C
cif_out_padding_mask = cif_outputs["cif_out_padding_mask"] # B x T
raw_encoder_out = cif_outputs["encoder_out"]
raw_encoder_padding_mask = cif_outputs["encoder_padding_mask"]
# Get the maximum length of decoding steps
batch_size, max_decode_length, cif_out_dim = cif_out.size() # B x T x C
out_seqlens = cif_out_padding_mask.sum(-1) # B
# Initialize all needed variables
cif_out = torch.unsqueeze(cif_out, dim=1).repeat(
1, self.beam, 1, 1
) # B x beam_size x T x C
prev_tokens = (
torch.ones([batch_size, self.beam, 1]).long().cuda() * self.eos
) # B x beam_size x 1
scores = torch.zeros([batch_size, self.beam]).float().cuda() # B x beam_size
cif_out_padding_mask = torch.unsqueeze(cif_out_padding_mask, dim=1).repeat(
[1, self.beam, 1]
) # B x beam_size x T
cif_out = cif_out.view(
[batch_size * self.beam, max_decode_length, cif_out_dim]
) # (B * beam_size) x T x C
prev_tokens = prev_tokens.view(
[batch_size * self.beam, 1]
) # (B * beam_size) x 1
scores = scores.view([batch_size * self.beam]) # (B * beam_size)
cif_out_padding_mask = cif_out_padding_mask.view(
[batch_size * self.beam, max_decode_length]
) # (B * beam_size) x T
for step_i in range(1, max_decode_length + 1):
# Get cif outputs of current step
cur_step_cif_outputs = cif_out[:, :step_i, :] # (B * beam_size) x t x C
cur_step_cif_out_padding_mask = cif_out_padding_mask[
:, :step_i
] # (B * beam_size) x t
cur_step_cif_out = {
"cif_out": cur_step_cif_outputs,
"cif_out_padding_mask": cur_step_cif_out_padding_mask,
"ctxt_cif_out": None,
"raw_encoder_out": raw_encoder_out,
"raw_encoder_padding_mask": raw_encoder_padding_mask,
}
# Get decoder outputs at step_i
decoder_output_i, extra_outputs, _ = model.step_forward_decoder(
prev_decoded_tokens=prev_tokens, # (B x beam_size) x t
cif_outputs=cur_step_cif_out,
# cif_out: (B * beam_size) x t x C, cif_out_padding_mask: (B * beam_size) x t
) # decoder_output_i has shape [(B * beam_size), t, V]
cur_decoder_output = model.get_probs_from_logits(
decoder_output_i[:, -1, :], log_probs=True
) # [B * beam_size, V]
tmp_scores = scores # Backup scores, with shape [B * beam_size]
scores = scores.unsqueeze(dim=-1).repeat(
[1, self.vocab_size]
) # [B * beam_size, V]
cur_score = cur_decoder_output
# cur_score, with shape [(B x beam_size) x V]
updated_scores = (scores + cur_score).view(
[batch_size, self.beam * self.vocab_size]
) # converted from shape [B * beam_size, V] to [B, beam_size * V]
# Handle the first timestep with special operation
if step_i == 1:
# For the first step, due to the same input token, only consider one beam.
topk_scores, topk_indices = torch.topk(
updated_scores.view([batch_size, self.beam, self.vocab_size])[
:, 0, :
],
k=self.beam,
dim=-1,
)
beam_indices = (
torch.zeros(batch_size, self.beam).long().cuda()
) # [B, beam_size] with all zero elements
fixed_topk_indices = topk_indices # [B, beam_size]
else:
# For all the other beams, due to their inputs are varying, consider all beams.
topk_scores, topk_indices = torch.topk(
updated_scores, k=self.beam, dim=-1
) # topk_scores shape [B, beam_size], topk_indices shape [B, beam_size]
beam_indices = torch.div(
topk_indices, self.vocab_size, rounding_mode="floor"
) # [B, beam_size]
fixed_topk_indices = topk_indices % self.vocab_size # [B, beam_size]
# Update previous decoded tokens and scores
prev_tokens = prev_tokens.view(
[batch_size, self.beam, -1]
) # [B, beam_size, t]
tmp_scores = tmp_scores.view(
[batch_size, self.beam]
) # previous scores, with shape [B, beam_size]
prev_token_tmp_list = []
scores_tmp_list = []
for n in range(batch_size): # n ranges from 0 to (batch_size - 1)
# Get the max length of current sample
cur_output_maxlen = out_seqlens[n]
# If some sample's decode length is smaller than current step id, keep its score and decoded results
if step_i > cur_output_maxlen:
cur_scores = tmp_scores[n, :] # beam_size
cur_prev_tokens = prev_tokens[n, :, :] # beam_size x t
else:
cur_scores = topk_scores[n, :] # beam_size
cur_prev_tokens = prev_tokens[n, :, :] # beam_size x t
cur_beam_indices = beam_indices[n, :] # beam_size
# Get reformed previous tokens
cur_prev_tokens = torch.index_select(
cur_prev_tokens, dim=0, index=cur_beam_indices
) # beam_size x t
scores_tmp_list.append(cur_scores.unsqueeze(dim=0))
prev_token_tmp_list.append(cur_prev_tokens.unsqueeze(dim=0))
fixed_prev_tokens = torch.cat(prev_token_tmp_list, dim=0)
fixed_topk_indices = torch.where(
step_i <= out_seqlens.unsqueeze(dim=-1).repeat([1, self.beam]),
fixed_topk_indices, # B x beam_size
torch.ones_like(fixed_topk_indices).cuda() * self.pad,
) # Mask locations that outnumber cif max length using <pad>
fixed_topk_indices = fixed_topk_indices.unsqueeze(
dim=-1
) # B x beam_size x 1
prev_tokens = torch.cat(
[fixed_prev_tokens, fixed_topk_indices], dim=-1
).view([batch_size * self.beam, -1])
scores = torch.cat(scores_tmp_list, dim=0).view(
[batch_size * self.beam]
) # B x beam_size
scores = scores.view([batch_size, self.beam])[:, : self.nbest] # B x beam_size
prev_tokens = prev_tokens.view([batch_size, self.beam, -1])[
:, : self.nbest, 1:
] # B x beam_size x T
out_seqlens = torch.unsqueeze(out_seqlens, dim=-1).repeat(1, self.beam)[
:, : self.nbest
] # B x beam_size
return prev_tokens, scores, out_seqlens
| 40,532
| 43.057609
| 116
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_recognition/criterions/cross_entropy_acc.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("cross_entropy_acc")
class CrossEntropyWithAccCriterion(FairseqCriterion):
def __init__(self, task, sentence_avg):
super().__init__(task)
self.sentence_avg = sentence_avg
def compute_loss(self, model, net_output, target, reduction, log_probs):
# N, T -> N * T
target = target.view(-1)
lprobs = model.get_normalized_probs(net_output, log_probs=log_probs)
if not hasattr(lprobs, "batch_first"):
logging.warning(
"ERROR: we need to know whether "
"batch first for the net output; "
"you need to set batch_first attribute for the return value of "
"model.get_normalized_probs. Now, we assume this is true, but "
"in the future, we will raise exception instead. "
)
batch_first = getattr(lprobs, "batch_first", True)
if not batch_first:
lprobs = lprobs.transpose(0, 1)
# N, T, D -> N * T, D
lprobs = lprobs.view(-1, lprobs.size(-1))
loss = F.nll_loss(
lprobs, target, ignore_index=self.padding_idx, reduction=reduction
)
return lprobs, loss
def get_logging_output(self, sample, target, lprobs, loss):
target = target.view(-1)
mask = target != self.padding_idx
correct = torch.sum(
lprobs.argmax(1).masked_select(mask) == target.masked_select(mask)
)
total = torch.sum(mask)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data), # * sample['ntokens'],
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
"correct": utils.item(correct.data),
"total": utils.item(total.data),
"nframes": torch.sum(sample["net_input"]["src_lengths"]).item(),
}
return sample_size, logging_output
def forward(self, model, sample, reduction="sum", log_probs=True):
"""Computes the cross entropy with accuracy metric for the given sample.
This is similar to CrossEntropyCriterion in fairseq, but also
computes accuracy metrics as part of logging
Args:
logprobs (Torch.tensor) of shape N, T, D i.e.
batchsize, timesteps, dimensions
targets (Torch.tensor) of shape N, T i.e batchsize, timesteps
Returns:
tuple: With three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
TODO:
* Currently this Criterion will only work with LSTMEncoderModels or
FairseqModels which have decoder, or Models which return TorchTensor
as net_output.
We need to make a change to support all FairseqEncoder models.
"""
net_output = model(**sample["net_input"])
target = model.get_targets(sample, net_output)
lprobs, loss = self.compute_loss(
model, net_output, target, reduction, log_probs
)
sample_size, logging_output = self.get_logging_output(
sample, target, lprobs, loss
)
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
correct_sum = sum(log.get("correct", 0) for log in logging_outputs)
total_sum = sum(log.get("total", 0) for log in logging_outputs)
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
nframes = sum(log.get("nframes", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.0,
# if args.sentence_avg, then sample_size is nsentences, then loss
# is per-sentence loss; else sample_size is ntokens, the loss
# becomes per-output token loss
"ntokens": ntokens,
"nsentences": nsentences,
"nframes": nframes,
"sample_size": sample_size,
"acc": correct_sum * 100.0 / total_sum if total_sum > 0 else 0.0,
"correct": correct_sum,
"total": total_sum,
# total is the number of validate tokens
}
if sample_size != ntokens:
agg_output["nll_loss"] = loss_sum / ntokens / math.log(2)
# loss: per output token loss
# nll_loss: per sentence loss
return agg_output
| 5,372
| 40.015267
| 85
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_recognition/criterions/ASG_loss.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from examples.speech_recognition.data.replabels import pack_replabels
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("asg_loss")
class ASGCriterion(FairseqCriterion):
@staticmethod
def add_args(parser):
group = parser.add_argument_group("ASG Loss")
group.add_argument(
"--asg-transitions-init",
help="initial diagonal value of transition matrix",
type=float,
default=0.0,
)
group.add_argument(
"--max-replabel", help="maximum # of replabels", type=int, default=2
)
group.add_argument(
"--linseg-updates",
help="# of training updates to use LinSeg initialization",
type=int,
default=0,
)
group.add_argument(
"--hide-linseg-messages",
help="hide messages about LinSeg initialization",
action="store_true",
)
def __init__(
self,
task,
silence_token,
asg_transitions_init,
max_replabel,
linseg_updates,
hide_linseg_messages,
):
from flashlight.lib.sequence.criterion import ASGLoss, CriterionScaleMode
super().__init__(task)
self.tgt_dict = task.target_dictionary
self.eos = self.tgt_dict.eos()
self.silence = (
self.tgt_dict.index(silence_token)
if silence_token in self.tgt_dict
else None
)
self.max_replabel = max_replabel
num_labels = len(self.tgt_dict)
self.asg = ASGLoss(num_labels, scale_mode=CriterionScaleMode.TARGET_SZ_SQRT)
self.asg.trans = torch.nn.Parameter(
asg_transitions_init * torch.eye(num_labels), requires_grad=True
)
self.linseg_progress = torch.nn.Parameter(
torch.tensor([0], dtype=torch.int), requires_grad=False
)
self.linseg_maximum = linseg_updates
self.linseg_message_state = "none" if hide_linseg_messages else "start"
@classmethod
def build_criterion(cls, args, task):
return cls(
task,
args.silence_token,
args.asg_transitions_init,
args.max_replabel,
args.linseg_updates,
args.hide_linseg_messages,
)
def linseg_step(self):
if not self.training:
return False
if self.linseg_progress.item() < self.linseg_maximum:
if self.linseg_message_state == "start":
print("| using LinSeg to initialize ASG")
self.linseg_message_state = "finish"
self.linseg_progress.add_(1)
return True
elif self.linseg_message_state == "finish":
print("| finished LinSeg initialization")
self.linseg_message_state = "none"
return False
def replace_eos_with_silence(self, tgt):
if tgt[-1] != self.eos:
return tgt
elif self.silence is None or (len(tgt) > 1 and tgt[-2] == self.silence):
return tgt[:-1]
else:
return tgt[:-1] + [self.silence]
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
emissions = net_output["encoder_out"].transpose(0, 1).contiguous()
B = emissions.size(0)
T = emissions.size(1)
device = emissions.device
target = torch.IntTensor(B, T)
target_size = torch.IntTensor(B)
using_linseg = self.linseg_step()
for b in range(B):
initial_target_size = sample["target_lengths"][b].item()
if initial_target_size == 0:
raise ValueError("target size cannot be zero")
tgt = sample["target"][b, :initial_target_size].tolist()
tgt = self.replace_eos_with_silence(tgt)
tgt = pack_replabels(tgt, self.tgt_dict, self.max_replabel)
tgt = tgt[:T]
if using_linseg:
tgt = [tgt[t * len(tgt) // T] for t in range(T)]
target[b][: len(tgt)] = torch.IntTensor(tgt)
target_size[b] = len(tgt)
loss = self.asg.forward(emissions, target.to(device), target_size.to(device))
if reduce:
loss = torch.sum(loss)
sample_size = (
sample["target"].size(0) if self.args.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / nsentences,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
return agg_output
| 5,870
| 33.333333
| 85
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_recognition/criterions/__init__.py
|
import importlib
import os
# ASG loss requires flashlight bindings
files_to_skip = set()
try:
import flashlight.lib.sequence.criterion
except ImportError:
files_to_skip.add("ASG_loss.py")
for file in sorted(os.listdir(os.path.dirname(__file__))):
if file.endswith(".py") and not file.startswith("_") and file not in files_to_skip:
criterion_name = file[: file.find(".py")]
importlib.import_module(
"examples.speech_recognition.criterions." + criterion_name
)
| 510
| 27.388889
| 87
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_recognition/models/vggtransformer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import math
from collections.abc import Iterable
import torch
import torch.nn as nn
from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqEncoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
LinearizedConvolution,
TransformerDecoderLayer,
TransformerEncoderLayer,
VGGBlock,
)
@register_model("asr_vggtransformer")
class VGGTransformerModel(FairseqEncoderDecoderModel):
"""
Transformers with convolutional context for ASR
https://arxiv.org/abs/1904.11660
"""
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--vggblock-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one vggblock:
[(out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
use_layer_norm), ...])
""",
)
parser.add_argument(
"--transformer-enc-config",
type=str,
metavar="EXPR",
help=""""
a tuple containing the configuration of the encoder transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ...]')
""",
)
parser.add_argument(
"--enc-output-dim",
type=int,
metavar="N",
help="""
encoder output dimension, can be None. If specified, projecting the
transformer output to the specified dimension""",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--tgt-embed-dim",
type=int,
metavar="N",
help="embedding dimension of the decoder target tokens",
)
parser.add_argument(
"--transformer-dec-config",
type=str,
metavar="EXPR",
help="""
a tuple containing the configuration of the decoder transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ...]
""",
)
parser.add_argument(
"--conv-dec-config",
type=str,
metavar="EXPR",
help="""
an array of tuples for the decoder 1-D convolution config
[(out_channels, conv_kernel_size, use_layer_norm), ...]""",
)
@classmethod
def build_encoder(cls, args, task):
return VGGTransformerEncoder(
input_feat_per_channel=args.input_feat_per_channel,
vggblock_config=eval(args.vggblock_enc_config),
transformer_config=eval(args.transformer_enc_config),
encoder_output_dim=args.enc_output_dim,
in_channels=args.in_channels,
)
@classmethod
def build_decoder(cls, args, task):
return TransformerDecoder(
dictionary=task.target_dictionary,
embed_dim=args.tgt_embed_dim,
transformer_config=eval(args.transformer_dec_config),
conv_config=eval(args.conv_dec_config),
encoder_output_dim=args.enc_output_dim,
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted
# (in case there are any new ones)
base_architecture(args)
encoder = cls.build_encoder(args, task)
decoder = cls.build_decoder(args, task)
return cls(encoder, decoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
# net_output['encoder_out'] is a (B, T, D) tensor
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
DEFAULT_ENC_VGGBLOCK_CONFIG = ((32, 3, 2, 2, False),) * 2
DEFAULT_ENC_TRANSFORMER_CONFIG = ((256, 4, 1024, True, 0.2, 0.2, 0.2),) * 2
# 256: embedding dimension
# 4: number of heads
# 1024: FFN
# True: apply layerNorm before (dropout + resiaul) instead of after
# 0.2 (dropout): dropout after MultiheadAttention and second FC
# 0.2 (attention_dropout): dropout in MultiheadAttention
# 0.2 (relu_dropout): dropout after ReLu
DEFAULT_DEC_TRANSFORMER_CONFIG = ((256, 2, 1024, True, 0.2, 0.2, 0.2),) * 2
DEFAULT_DEC_CONV_CONFIG = ((256, 3, True),) * 2
# TODO: repace transformer encoder config from one liner
# to explicit args to get rid of this transformation
def prepare_transformer_encoder_params(
input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout,
):
args = argparse.Namespace()
args.encoder_embed_dim = input_dim
args.encoder_attention_heads = num_heads
args.attention_dropout = attention_dropout
args.dropout = dropout
args.activation_dropout = relu_dropout
args.encoder_normalize_before = normalize_before
args.encoder_ffn_embed_dim = ffn_dim
return args
def prepare_transformer_decoder_params(
input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout,
):
args = argparse.Namespace()
args.encoder_embed_dim = None
args.decoder_embed_dim = input_dim
args.decoder_attention_heads = num_heads
args.attention_dropout = attention_dropout
args.dropout = dropout
args.activation_dropout = relu_dropout
args.decoder_normalize_before = normalize_before
args.decoder_ffn_embed_dim = ffn_dim
return args
class VGGTransformerEncoder(FairseqEncoder):
"""VGG + Transformer encoder"""
def __init__(
self,
input_feat_per_channel,
vggblock_config=DEFAULT_ENC_VGGBLOCK_CONFIG,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
encoder_output_dim=512,
in_channels=1,
transformer_context=None,
transformer_sampling=None,
):
"""constructor for VGGTransformerEncoder
Args:
- input_feat_per_channel: feature dim (not including stacked,
just base feature)
- in_channel: # input channels (e.g., if stack 8 feature vector
together, this is 8)
- vggblock_config: configuration of vggblock, see comments on
DEFAULT_ENC_VGGBLOCK_CONFIG
- transformer_config: configuration of transformer layer, see comments
on DEFAULT_ENC_TRANSFORMER_CONFIG
- encoder_output_dim: final transformer output embedding dimension
- transformer_context: (left, right) if set, self-attention will be focused
on (t-left, t+right)
- transformer_sampling: an iterable of int, must match with
len(transformer_config), transformer_sampling[i] indicates sampling
factor for i-th transformer layer, after multihead att and feedfoward
part
"""
super().__init__(None)
self.num_vggblocks = 0
if vggblock_config is not None:
if not isinstance(vggblock_config, Iterable):
raise ValueError("vggblock_config is not iterable")
self.num_vggblocks = len(vggblock_config)
self.conv_layers = nn.ModuleList()
self.in_channels = in_channels
self.input_dim = input_feat_per_channel
self.pooling_kernel_sizes = []
if vggblock_config is not None:
for _, config in enumerate(vggblock_config):
(
out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
layer_norm,
) = config
self.conv_layers.append(
VGGBlock(
in_channels,
out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
input_dim=input_feat_per_channel,
layer_norm=layer_norm,
)
)
self.pooling_kernel_sizes.append(pooling_kernel_size)
in_channels = out_channels
input_feat_per_channel = self.conv_layers[-1].output_dim
transformer_input_dim = self.infer_conv_output_dim(
self.in_channels, self.input_dim
)
# transformer_input_dim is the output dimension of VGG part
self.validate_transformer_config(transformer_config)
self.transformer_context = self.parse_transformer_context(transformer_context)
self.transformer_sampling = self.parse_transformer_sampling(
transformer_sampling, len(transformer_config)
)
self.transformer_layers = nn.ModuleList()
if transformer_input_dim != transformer_config[0][0]:
self.transformer_layers.append(
Linear(transformer_input_dim, transformer_config[0][0])
)
self.transformer_layers.append(
TransformerEncoderLayer(
prepare_transformer_encoder_params(*transformer_config[0])
)
)
for i in range(1, len(transformer_config)):
if transformer_config[i - 1][0] != transformer_config[i][0]:
self.transformer_layers.append(
Linear(transformer_config[i - 1][0], transformer_config[i][0])
)
self.transformer_layers.append(
TransformerEncoderLayer(
prepare_transformer_encoder_params(*transformer_config[i])
)
)
self.encoder_output_dim = encoder_output_dim
self.transformer_layers.extend(
[
Linear(transformer_config[-1][0], encoder_output_dim),
LayerNorm(encoder_output_dim),
]
)
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
bsz, max_seq_len, _ = src_tokens.size()
x = src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim)
x = x.transpose(1, 2).contiguous()
# (B, C, T, feat)
for layer_idx in range(len(self.conv_layers)):
x = self.conv_layers[layer_idx](x)
bsz, _, output_seq_len, _ = x.size()
# (B, C, T, feat) -> (B, T, C, feat) -> (T, B, C, feat) -> (T, B, C * feat)
x = x.transpose(1, 2).transpose(0, 1)
x = x.contiguous().view(output_seq_len, bsz, -1)
input_lengths = src_lengths.clone()
for s in self.pooling_kernel_sizes:
input_lengths = (input_lengths.float() / s).ceil().long()
encoder_padding_mask, _ = lengths_to_encoder_padding_mask(
input_lengths, batch_first=True
)
if not encoder_padding_mask.any():
encoder_padding_mask = None
subsampling_factor = int(max_seq_len * 1.0 / output_seq_len + 0.5)
attn_mask = self.lengths_to_attn_mask(input_lengths, subsampling_factor)
transformer_layer_idx = 0
for layer_idx in range(len(self.transformer_layers)):
if isinstance(self.transformer_layers[layer_idx], TransformerEncoderLayer):
x = self.transformer_layers[layer_idx](
x, encoder_padding_mask, attn_mask
)
if self.transformer_sampling[transformer_layer_idx] != 1:
sampling_factor = self.transformer_sampling[transformer_layer_idx]
x, encoder_padding_mask, attn_mask = self.slice(
x, encoder_padding_mask, attn_mask, sampling_factor
)
transformer_layer_idx += 1
else:
x = self.transformer_layers[layer_idx](x)
# encoder_padding_maks is a (T x B) tensor, its [t, b] elements indicate
# whether encoder_output[t, b] is valid or not (valid=0, invalid=1)
return {
"encoder_out": x, # (T, B, C)
"encoder_padding_mask": encoder_padding_mask.t()
if encoder_padding_mask is not None
else None,
# (B, T) --> (T, B)
}
def infer_conv_output_dim(self, in_channels, input_dim):
sample_seq_len = 200
sample_bsz = 10
x = torch.randn(sample_bsz, in_channels, sample_seq_len, input_dim)
for i, _ in enumerate(self.conv_layers):
x = self.conv_layers[i](x)
x = x.transpose(1, 2)
mb, seq = x.size()[:2]
return x.contiguous().view(mb, seq, -1).size(-1)
def validate_transformer_config(self, transformer_config):
for config in transformer_config:
input_dim, num_heads = config[:2]
if input_dim % num_heads != 0:
msg = (
"ERROR in transformer config {}: ".format(config)
+ "input dimension {} ".format(input_dim)
+ "not dividable by number of heads {}".format(num_heads)
)
raise ValueError(msg)
def parse_transformer_context(self, transformer_context):
"""
transformer_context can be the following:
- None; indicates no context is used, i.e.,
transformer can access full context
- a tuple/list of two int; indicates left and right context,
any number <0 indicates infinite context
* e.g., (5, 6) indicates that for query at x_t, transformer can
access [t-5, t+6] (inclusive)
* e.g., (-1, 6) indicates that for query at x_t, transformer can
access [0, t+6] (inclusive)
"""
if transformer_context is None:
return None
if not isinstance(transformer_context, Iterable):
raise ValueError("transformer context must be Iterable if it is not None")
if len(transformer_context) != 2:
raise ValueError("transformer context must have length 2")
left_context = transformer_context[0]
if left_context < 0:
left_context = None
right_context = transformer_context[1]
if right_context < 0:
right_context = None
if left_context is None and right_context is None:
return None
return (left_context, right_context)
def parse_transformer_sampling(self, transformer_sampling, num_layers):
"""
parsing transformer sampling configuration
Args:
- transformer_sampling, accepted input:
* None, indicating no sampling
* an Iterable with int (>0) as element
- num_layers, expected number of transformer layers, must match with
the length of transformer_sampling if it is not None
Returns:
- A tuple with length num_layers
"""
if transformer_sampling is None:
return (1,) * num_layers
if not isinstance(transformer_sampling, Iterable):
raise ValueError(
"transformer_sampling must be an iterable if it is not None"
)
if len(transformer_sampling) != num_layers:
raise ValueError(
"transformer_sampling {} does not match with the number "
"of layers {}".format(transformer_sampling, num_layers)
)
for layer, value in enumerate(transformer_sampling):
if not isinstance(value, int):
raise ValueError("Invalid value in transformer_sampling: ")
if value < 1:
raise ValueError(
"{} layer's subsampling is {}.".format(layer, value)
+ " This is not allowed! "
)
return transformer_sampling
def slice(self, embedding, padding_mask, attn_mask, sampling_factor):
"""
embedding is a (T, B, D) tensor
padding_mask is a (B, T) tensor or None
attn_mask is a (T, T) tensor or None
"""
embedding = embedding[::sampling_factor, :, :]
if padding_mask is not None:
padding_mask = padding_mask[:, ::sampling_factor]
if attn_mask is not None:
attn_mask = attn_mask[::sampling_factor, ::sampling_factor]
return embedding, padding_mask, attn_mask
def lengths_to_attn_mask(self, input_lengths, subsampling_factor=1):
"""
create attention mask according to sequence lengths and transformer
context
Args:
- input_lengths: (B, )-shape Int/Long tensor; input_lengths[b] is
the length of b-th sequence
- subsampling_factor: int
* Note that the left_context and right_context is specified in
the input frame-level while input to transformer may already
go through subsampling (e.g., the use of striding in vggblock)
we use subsampling_factor to scale the left/right context
Return:
- a (T, T) binary tensor or None, where T is max(input_lengths)
* if self.transformer_context is None, None
* if left_context is None,
* attn_mask[t, t + right_context + 1:] = 1
* others = 0
* if right_context is None,
* attn_mask[t, 0:t - left_context] = 1
* others = 0
* elsif
* attn_mask[t, t - left_context: t + right_context + 1] = 0
* others = 1
"""
if self.transformer_context is None:
return None
maxT = torch.max(input_lengths).item()
attn_mask = torch.zeros(maxT, maxT)
left_context = self.transformer_context[0]
right_context = self.transformer_context[1]
if left_context is not None:
left_context = math.ceil(self.transformer_context[0] / subsampling_factor)
if right_context is not None:
right_context = math.ceil(self.transformer_context[1] / subsampling_factor)
for t in range(maxT):
if left_context is not None:
st = 0
en = max(st, t - left_context)
attn_mask[t, st:en] = 1
if right_context is not None:
st = t + right_context + 1
st = min(st, maxT - 1)
attn_mask[t, st:] = 1
return attn_mask.to(input_lengths.device)
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(1, new_order)
return encoder_out
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs.
Default: ``False``
left_pad (bool, optional): whether the input is left-padded. Default:
``False``
"""
def __init__(
self,
dictionary,
embed_dim=512,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
conv_config=DEFAULT_DEC_CONV_CONFIG,
encoder_output_dim=512,
):
super().__init__(dictionary)
vocab_size = len(dictionary)
self.padding_idx = dictionary.pad()
self.embed_tokens = Embedding(vocab_size, embed_dim, self.padding_idx)
self.conv_layers = nn.ModuleList()
for i in range(len(conv_config)):
out_channels, kernel_size, layer_norm = conv_config[i]
if i == 0:
conv_layer = LinearizedConv1d(
embed_dim, out_channels, kernel_size, padding=kernel_size - 1
)
else:
conv_layer = LinearizedConv1d(
conv_config[i - 1][0],
out_channels,
kernel_size,
padding=kernel_size - 1,
)
self.conv_layers.append(conv_layer)
if layer_norm:
self.conv_layers.append(nn.LayerNorm(out_channels))
self.conv_layers.append(nn.ReLU())
self.layers = nn.ModuleList()
if conv_config[-1][0] != transformer_config[0][0]:
self.layers.append(Linear(conv_config[-1][0], transformer_config[0][0]))
self.layers.append(
TransformerDecoderLayer(
prepare_transformer_decoder_params(*transformer_config[0])
)
)
for i in range(1, len(transformer_config)):
if transformer_config[i - 1][0] != transformer_config[i][0]:
self.layers.append(
Linear(transformer_config[i - 1][0], transformer_config[i][0])
)
self.layers.append(
TransformerDecoderLayer(
prepare_transformer_decoder_params(*transformer_config[i])
)
)
self.fc_out = Linear(transformer_config[-1][0], vocab_size)
def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the last decoder layer's output of shape `(batch, tgt_len,
vocab)`
- the last decoder layer's attention weights of shape `(batch,
tgt_len, src_len)`
"""
target_padding_mask = (
(prev_output_tokens == self.padding_idx).to(prev_output_tokens.device)
if incremental_state is None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
# embed tokens
x = self.embed_tokens(prev_output_tokens)
# B x T x C -> T x B x C
x = self._transpose_if_training(x, incremental_state)
for layer in self.conv_layers:
if isinstance(layer, LinearizedConvolution):
x = layer(x, incremental_state)
else:
x = layer(x)
# B x T x C -> T x B x C
x = self._transpose_if_inference(x, incremental_state)
# decoder layers
for layer in self.layers:
if isinstance(layer, TransformerDecoderLayer):
x, *_ = layer(
x,
(encoder_out["encoder_out"] if encoder_out is not None else None),
(
encoder_out["encoder_padding_mask"].t()
if encoder_out["encoder_padding_mask"] is not None
else None
),
incremental_state,
self_attn_mask=(
self.buffered_future_mask(x)
if incremental_state is None
else None
),
self_attn_padding_mask=(
target_padding_mask if incremental_state is None else None
),
)
else:
x = layer(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
x = self.fc_out(x)
return x, None
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
not hasattr(self, "_future_mask")
or self._future_mask is None
or self._future_mask.device != tensor.device
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(tensor.new(dim, dim)), 1
)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(
utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1
)
return self._future_mask[:dim, :dim]
def _transpose_if_training(self, x, incremental_state):
if incremental_state is None:
x = x.transpose(0, 1)
return x
def _transpose_if_inference(self, x, incremental_state):
if incremental_state:
x = x.transpose(0, 1)
return x
@register_model("asr_vggtransformer_encoder")
class VGGTransformerEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--vggblock-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one vggblock
[(out_channels, conv_kernel_size, pooling_kernel_size,num_conv_layers), ...]
""",
)
parser.add_argument(
"--transformer-enc-config",
type=str,
metavar="EXPR",
help="""
a tuple containing the configuration of the Transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ]""",
)
parser.add_argument(
"--enc-output-dim",
type=int,
metavar="N",
help="encoder output dimension, projecting the LSTM output",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--transformer-context",
type=str,
metavar="EXPR",
help="""
either None or a tuple of two ints, indicating left/right context a
transformer can have access to""",
)
parser.add_argument(
"--transformer-sampling",
type=str,
metavar="EXPR",
help="""
either None or a tuple of ints, indicating sampling factor in each layer""",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
base_architecture_enconly(args)
encoder = VGGTransformerEncoderOnly(
vocab_size=len(task.target_dictionary),
input_feat_per_channel=args.input_feat_per_channel,
vggblock_config=eval(args.vggblock_enc_config),
transformer_config=eval(args.transformer_enc_config),
encoder_output_dim=args.enc_output_dim,
in_channels=args.in_channels,
transformer_context=eval(args.transformer_context),
transformer_sampling=eval(args.transformer_sampling),
)
return cls(encoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
# net_output['encoder_out'] is a (T, B, D) tensor
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
# lprobs is a (T, B, D) tensor
# we need to transoose to get (B, T, D) tensor
lprobs = lprobs.transpose(0, 1).contiguous()
lprobs.batch_first = True
return lprobs
class VGGTransformerEncoderOnly(VGGTransformerEncoder):
def __init__(
self,
vocab_size,
input_feat_per_channel,
vggblock_config=DEFAULT_ENC_VGGBLOCK_CONFIG,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
encoder_output_dim=512,
in_channels=1,
transformer_context=None,
transformer_sampling=None,
):
super().__init__(
input_feat_per_channel=input_feat_per_channel,
vggblock_config=vggblock_config,
transformer_config=transformer_config,
encoder_output_dim=encoder_output_dim,
in_channels=in_channels,
transformer_context=transformer_context,
transformer_sampling=transformer_sampling,
)
self.fc_out = Linear(self.encoder_output_dim, vocab_size)
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
enc_out = super().forward(src_tokens, src_lengths)
x = self.fc_out(enc_out["encoder_out"])
# x = F.log_softmax(x, dim=-1)
# Note: no need this line, because model.get_normalized_prob will call
# log_softmax
return {
"encoder_out": x, # (T, B, C)
"encoder_padding_mask": enc_out["encoder_padding_mask"], # (T, B)
}
def max_positions(self):
"""Maximum input length supported by the encoder."""
return (1e6, 1e6) # an arbitrary large number
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
# nn.init.uniform_(m.weight, -0.1, 0.1)
# nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True, dropout=0):
"""Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features, bias=bias)
# m.weight.data.uniform_(-0.1, 0.1)
# if bias:
# m.bias.data.uniform_(-0.1, 0.1)
return m
def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0, **kwargs):
"""Weight-normalized Conv1d layer optimized for decoding"""
m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
nn.init.normal_(m.weight, mean=0, std=std)
nn.init.constant_(m.bias, 0)
return nn.utils.weight_norm(m, dim=2)
def LayerNorm(embedding_dim):
m = nn.LayerNorm(embedding_dim)
return m
# seq2seq models
def base_architecture(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", DEFAULT_ENC_VGGBLOCK_CONFIG
)
args.transformer_enc_config = getattr(
args, "transformer_enc_config", DEFAULT_ENC_TRANSFORMER_CONFIG
)
args.enc_output_dim = getattr(args, "enc_output_dim", 512)
args.in_channels = getattr(args, "in_channels", 1)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128)
args.transformer_dec_config = getattr(
args, "transformer_dec_config", DEFAULT_ENC_TRANSFORMER_CONFIG
)
args.conv_dec_config = getattr(args, "conv_dec_config", DEFAULT_DEC_CONV_CONFIG)
args.transformer_context = getattr(args, "transformer_context", "None")
@register_model_architecture("asr_vggtransformer", "vggtransformer_1")
def vggtransformer_1(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args,
"transformer_enc_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 14",
)
args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128)
args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4")
args.transformer_dec_config = getattr(
args,
"transformer_dec_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 4",
)
@register_model_architecture("asr_vggtransformer", "vggtransformer_2")
def vggtransformer_2(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args,
"transformer_enc_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 16",
)
args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 512)
args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4")
args.transformer_dec_config = getattr(
args,
"transformer_dec_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 6",
)
@register_model_architecture("asr_vggtransformer", "vggtransformer_base")
def vggtransformer_base(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args, "transformer_enc_config", "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 12"
)
args.enc_output_dim = getattr(args, "enc_output_dim", 512)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 512)
args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4")
args.transformer_dec_config = getattr(
args, "transformer_dec_config", "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 6"
)
# Size estimations:
# Encoder:
# - vggblock param: 64*1*3*3 + 64*64*3*3 + 128*64*3*3 + 128*128*3 = 258K
# Transformer:
# - input dimension adapter: 2560 x 512 -> 1.31M
# - transformer_layers (x12) --> 37.74M
# * MultiheadAttention: 512*512*3 (in_proj) + 512*512 (out_proj) = 1.048M
# * FFN weight: 512*2048*2 = 2.097M
# - output dimension adapter: 512 x 512 -> 0.26 M
# Decoder:
# - LinearizedConv1d: 512 * 256 * 3 + 256 * 256 * 3 * 3
# - transformer_layer: (x6) --> 25.16M
# * MultiheadAttention (self-attention): 512*512*3 + 512*512 = 1.048M
# * MultiheadAttention (encoder-attention): 512*512*3 + 512*512 = 1.048M
# * FFN: 512*2048*2 = 2.097M
# Final FC:
# - FC: 512*5000 = 256K (assuming vocab size 5K)
# In total:
# ~65 M
# CTC models
def base_architecture_enconly(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(32, 3, 2, 2, True)] * 2"
)
args.transformer_enc_config = getattr(
args, "transformer_enc_config", "((256, 4, 1024, True, 0.2, 0.2, 0.2),) * 2"
)
args.enc_output_dim = getattr(args, "enc_output_dim", 512)
args.in_channels = getattr(args, "in_channels", 1)
args.transformer_context = getattr(args, "transformer_context", "None")
args.transformer_sampling = getattr(args, "transformer_sampling", "None")
@register_model_architecture("asr_vggtransformer_encoder", "vggtransformer_enc_1")
def vggtransformer_enc_1(args):
# vggtransformer_1 is the same as vggtransformer_enc_big, except the number
# of layers is increased to 16
# keep it here for backward compatiablity purpose
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args,
"transformer_enc_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 16",
)
args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
| 37,258
| 35.564279
| 88
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_recognition/models/w2l_conv_glu_enc.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules.fairseq_dropout import FairseqDropout
default_conv_enc_config = """[
(400, 13, 170, 0.2),
(440, 14, 0, 0.214),
(484, 15, 0, 0.22898),
(532, 16, 0, 0.2450086),
(584, 17, 0, 0.262159202),
(642, 18, 0, 0.28051034614),
(706, 19, 0, 0.30014607037),
(776, 20, 0, 0.321156295296),
(852, 21, 0, 0.343637235966),
(936, 22, 0, 0.367691842484),
(1028, 23, 0, 0.393430271458),
(1130, 24, 0, 0.42097039046),
(1242, 25, 0, 0.450438317792),
(1366, 26, 0, 0.481969000038),
(1502, 27, 0, 0.51570683004),
(1652, 28, 0, 0.551806308143),
(1816, 29, 0, 0.590432749713),
]"""
@register_model("asr_w2l_conv_glu_encoder")
class W2lConvGluEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--conv-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one conv layer
[(out_channels, kernel_size, padding, dropout), ...]
""",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config)
encoder = W2lConvGluEncoder(
vocab_size=len(task.target_dictionary),
input_feat_per_channel=args.input_feat_per_channel,
in_channels=args.in_channels,
conv_enc_config=eval(conv_enc_config),
)
return cls(encoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
lprobs.batch_first = False
return lprobs
class W2lConvGluEncoder(FairseqEncoder):
def __init__(
self, vocab_size, input_feat_per_channel, in_channels, conv_enc_config
):
super().__init__(None)
self.input_dim = input_feat_per_channel
if in_channels != 1:
raise ValueError("only 1 input channel is currently supported")
self.conv_layers = nn.ModuleList()
self.linear_layers = nn.ModuleList()
self.dropouts = []
cur_channels = input_feat_per_channel
for out_channels, kernel_size, padding, dropout in conv_enc_config:
layer = nn.Conv1d(cur_channels, out_channels, kernel_size, padding=padding)
layer.weight.data.mul_(math.sqrt(3)) # match wav2letter init
self.conv_layers.append(nn.utils.weight_norm(layer))
self.dropouts.append(
FairseqDropout(dropout, module_name=self.__class__.__name__)
)
if out_channels % 2 != 0:
raise ValueError("odd # of out_channels is incompatible with GLU")
cur_channels = out_channels // 2 # halved by GLU
for out_channels in [2 * cur_channels, vocab_size]:
layer = nn.Linear(cur_channels, out_channels)
layer.weight.data.mul_(math.sqrt(3))
self.linear_layers.append(nn.utils.weight_norm(layer))
cur_channels = out_channels // 2
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
B, T, _ = src_tokens.size()
x = src_tokens.transpose(1, 2).contiguous() # (B, feat, T) assuming C == 1
for layer_idx in range(len(self.conv_layers)):
x = self.conv_layers[layer_idx](x)
x = F.glu(x, dim=1)
x = self.dropouts[layer_idx](x)
x = x.transpose(1, 2).contiguous() # (B, T, 908)
x = self.linear_layers[0](x)
x = F.glu(x, dim=2)
x = self.dropouts[-1](x)
x = self.linear_layers[1](x)
assert x.size(0) == B
assert x.size(1) == T
encoder_out = x.transpose(0, 1) # (T, B, vocab_size)
# need to debug this -- find a simpler/elegant way in pytorch APIs
encoder_padding_mask = (
torch.arange(T).view(1, T).expand(B, -1).to(x.device)
>= src_lengths.view(B, 1).expand(-1, T)
).t() # (B x T) -> (T x B)
return {
"encoder_out": encoder_out, # (T, B, vocab_size)
"encoder_padding_mask": encoder_padding_mask, # (T, B)
}
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(1, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return (1e6, 1e6) # an arbitrary large number
@register_model_architecture("asr_w2l_conv_glu_encoder", "w2l_conv_glu_enc")
def w2l_conv_glu_enc(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.in_channels = getattr(args, "in_channels", 1)
args.conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config)
| 6,077
| 33.338983
| 87
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_recognition/models/__init__.py
|
import importlib
import os
for file in sorted(os.listdir(os.path.dirname(__file__))):
if file.endswith(".py") and not file.startswith("_"):
model_name = file[: file.find(".py")]
importlib.import_module("examples.speech_recognition.models." + model_name)
| 276
| 29.777778
| 83
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_recognition/datasets/asr_prep_json.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import concurrent.futures
import json
import multiprocessing
import os
from collections import namedtuple
from itertools import chain
import sentencepiece as spm
from fairseq.data import Dictionary
MILLISECONDS_TO_SECONDS = 0.001
def process_sample(aud_path, lable, utt_id, sp, tgt_dict):
import torchaudio
input = {}
output = {}
si, ei = torchaudio.info(aud_path)
input["length_ms"] = int(
si.length / si.channels / si.rate / MILLISECONDS_TO_SECONDS
)
input["path"] = aud_path
token = " ".join(sp.EncodeAsPieces(lable))
ids = tgt_dict.encode_line(token, append_eos=False)
output["text"] = lable
output["token"] = token
output["tokenid"] = ", ".join(map(str, [t.tolist() for t in ids]))
return {utt_id: {"input": input, "output": output}}
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--audio-dirs",
nargs="+",
default=["-"],
required=True,
help="input directories with audio files",
)
parser.add_argument(
"--labels",
required=True,
help="aggregated input labels with format <ID LABEL> per line",
type=argparse.FileType("r", encoding="UTF-8"),
)
parser.add_argument(
"--spm-model",
required=True,
help="sentencepiece model to use for encoding",
type=argparse.FileType("r", encoding="UTF-8"),
)
parser.add_argument(
"--dictionary",
required=True,
help="file to load fairseq dictionary from",
type=argparse.FileType("r", encoding="UTF-8"),
)
parser.add_argument("--audio-format", choices=["flac", "wav"], default="wav")
parser.add_argument(
"--output",
required=True,
type=argparse.FileType("w"),
help="path to save json output",
)
args = parser.parse_args()
sp = spm.SentencePieceProcessor()
sp.Load(args.spm_model.name)
tgt_dict = Dictionary.load(args.dictionary)
labels = {}
for line in args.labels:
(utt_id, label) = line.split(" ", 1)
labels[utt_id] = label
if len(labels) == 0:
raise Exception("No labels found in ", args.labels_path)
Sample = namedtuple("Sample", "aud_path utt_id")
samples = []
for path, _, files in chain.from_iterable(
os.walk(path) for path in args.audio_dirs
):
for f in files:
if f.endswith(args.audio_format):
if len(os.path.splitext(f)) != 2:
raise Exception("Expect <utt_id.extension> file name. Got: ", f)
utt_id = os.path.splitext(f)[0]
if utt_id not in labels:
continue
samples.append(Sample(os.path.join(path, f), utt_id))
utts = {}
num_cpu = multiprocessing.cpu_count()
with concurrent.futures.ThreadPoolExecutor(max_workers=num_cpu) as executor:
future_to_sample = {
executor.submit(
process_sample, s.aud_path, labels[s.utt_id], s.utt_id, sp, tgt_dict
): s
for s in samples
}
for future in concurrent.futures.as_completed(future_to_sample):
try:
data = future.result()
except Exception as exc:
print("generated an exception: ", exc)
else:
utts.update(data)
json.dump({"utts": utts}, args.output, indent=4)
if __name__ == "__main__":
main()
| 3,775
| 28.968254
| 84
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_recognition/new/__init__.py
| 0
| 0
| 0
|
py
|
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_recognition/new/infer.py
|
#!/usr/bin/env python -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ast
import hashlib
import logging
import os
import shutil
import sys
from dataclasses import dataclass, field, is_dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import editdistance
import torch
import torch.distributed as dist
from examples.speech_recognition.new.decoders.decoder_config import (
DecoderConfig,
FlashlightDecoderConfig,
)
from examples.speech_recognition.new.decoders.decoder import Decoder
from fairseq import checkpoint_utils, distributed_utils, progress_bar, tasks, utils
from fairseq.data.data_utils import post_process
from fairseq.dataclass.configs import (
CheckpointConfig,
CommonConfig,
CommonEvalConfig,
DatasetConfig,
DistributedTrainingConfig,
FairseqDataclass,
)
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from fairseq.logging.progress_bar import BaseProgressBar
from fairseq.models.fairseq_model import FairseqModel
from omegaconf import OmegaConf
import hydra
from hydra.core.config_store import ConfigStore
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
config_path = Path(__file__).resolve().parent / "conf"
@dataclass
class DecodingConfig(DecoderConfig, FlashlightDecoderConfig):
unique_wer_file: bool = field(
default=False,
metadata={"help": "If set, use a unique file for storing WER"},
)
results_path: Optional[str] = field(
default=None,
metadata={
"help": "If set, write hypothesis and reference sentences into this directory"
},
)
@dataclass
class InferConfig(FairseqDataclass):
task: Any = None
decoding: DecodingConfig = DecodingConfig()
common: CommonConfig = CommonConfig()
common_eval: CommonEvalConfig = CommonEvalConfig()
checkpoint: CheckpointConfig = CheckpointConfig()
distributed_training: DistributedTrainingConfig = DistributedTrainingConfig()
dataset: DatasetConfig = DatasetConfig()
is_ax: bool = field(
default=False,
metadata={
"help": "if true, assumes we are using ax for tuning and returns a tuple for ax to consume"
},
)
def reset_logging():
root = logging.getLogger()
for handler in root.handlers:
root.removeHandler(handler)
root.setLevel(os.environ.get("LOGLEVEL", "INFO").upper())
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(
logging.Formatter(
fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
)
root.addHandler(handler)
class InferenceProcessor:
cfg: InferConfig
def __init__(self, cfg: InferConfig) -> None:
self.cfg = cfg
self.task = tasks.setup_task(cfg.task)
models, saved_cfg = self.load_model_ensemble()
self.models = models
self.saved_cfg = saved_cfg
self.tgt_dict = self.task.target_dictionary
self.task.load_dataset(
self.cfg.dataset.gen_subset,
task_cfg=saved_cfg.task,
)
self.generator = Decoder(cfg.decoding, self.tgt_dict)
self.gen_timer = StopwatchMeter()
self.wps_meter = TimeMeter()
self.num_sentences = 0
self.total_errors = 0
self.total_length = 0
self.hypo_words_file = None
self.hypo_units_file = None
self.ref_words_file = None
self.ref_units_file = None
self.progress_bar = self.build_progress_bar()
def __enter__(self) -> "InferenceProcessor":
if self.cfg.decoding.results_path is not None:
self.hypo_words_file = self.get_res_file("hypo.word")
self.hypo_units_file = self.get_res_file("hypo.units")
self.ref_words_file = self.get_res_file("ref.word")
self.ref_units_file = self.get_res_file("ref.units")
return self
def __exit__(self, *exc) -> bool:
if self.cfg.decoding.results_path is not None:
self.hypo_words_file.close()
self.hypo_units_file.close()
self.ref_words_file.close()
self.ref_units_file.close()
return False
def __iter__(self) -> Any:
for sample in self.progress_bar:
if not self.cfg.common.cpu:
sample = utils.move_to_cuda(sample)
# Happens on the last batch.
if "net_input" not in sample:
continue
yield sample
def log(self, *args, **kwargs):
self.progress_bar.log(*args, **kwargs)
def print(self, *args, **kwargs):
self.progress_bar.print(*args, **kwargs)
def get_res_file(self, fname: str) -> None:
fname = os.path.join(self.cfg.decoding.results_path, fname)
if self.data_parallel_world_size > 1:
fname = f"{fname}.{self.data_parallel_rank}"
return open(fname, "w", buffering=1)
def merge_shards(self) -> None:
"""Merges all shard files into shard 0, then removes shard suffix."""
shard_id = self.data_parallel_rank
num_shards = self.data_parallel_world_size
if self.data_parallel_world_size > 1:
def merge_shards_with_root(fname: str) -> None:
fname = os.path.join(self.cfg.decoding.results_path, fname)
logger.info("Merging %s on shard %d", fname, shard_id)
base_fpath = Path(f"{fname}.0")
with open(base_fpath, "a") as out_file:
for s in range(1, num_shards):
shard_fpath = Path(f"{fname}.{s}")
with open(shard_fpath, "r") as in_file:
for line in in_file:
out_file.write(line)
shard_fpath.unlink()
shutil.move(f"{fname}.0", fname)
dist.barrier() # ensure all shards finished writing
if shard_id == (0 % num_shards):
merge_shards_with_root("hypo.word")
if shard_id == (1 % num_shards):
merge_shards_with_root("hypo.units")
if shard_id == (2 % num_shards):
merge_shards_with_root("ref.word")
if shard_id == (3 % num_shards):
merge_shards_with_root("ref.units")
dist.barrier()
def optimize_model(self, model: FairseqModel) -> None:
model.make_generation_fast_()
if self.cfg.common.fp16:
model.half()
if not self.cfg.common.cpu:
model.cuda()
def load_model_ensemble(self) -> Tuple[List[FairseqModel], FairseqDataclass]:
arg_overrides = ast.literal_eval(self.cfg.common_eval.model_overrides)
models, saved_cfg = checkpoint_utils.load_model_ensemble(
utils.split_paths(self.cfg.common_eval.path, separator="\\"),
arg_overrides=arg_overrides,
task=self.task,
suffix=self.cfg.checkpoint.checkpoint_suffix,
strict=(self.cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=self.cfg.checkpoint.checkpoint_shard_count,
)
for model in models:
self.optimize_model(model)
return models, saved_cfg
def get_dataset_itr(self, disable_iterator_cache: bool = False) -> None:
return self.task.get_batch_iterator(
dataset=self.task.dataset(self.cfg.dataset.gen_subset),
max_tokens=self.cfg.dataset.max_tokens,
max_sentences=self.cfg.dataset.batch_size,
max_positions=(sys.maxsize, sys.maxsize),
ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple,
seed=self.cfg.common.seed,
num_shards=self.data_parallel_world_size,
shard_id=self.data_parallel_rank,
num_workers=self.cfg.dataset.num_workers,
data_buffer_size=self.cfg.dataset.data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
).next_epoch_itr(shuffle=False)
def build_progress_bar(
self,
epoch: Optional[int] = None,
prefix: Optional[str] = None,
default_log_format: str = "tqdm",
) -> BaseProgressBar:
return progress_bar.progress_bar(
iterator=self.get_dataset_itr(),
log_format=self.cfg.common.log_format,
log_interval=self.cfg.common.log_interval,
epoch=epoch,
prefix=prefix,
tensorboard_logdir=self.cfg.common.tensorboard_logdir,
default_log_format=default_log_format,
)
@property
def data_parallel_world_size(self):
if self.cfg.distributed_training.distributed_world_size == 1:
return 1
return distributed_utils.get_data_parallel_world_size()
@property
def data_parallel_rank(self):
if self.cfg.distributed_training.distributed_world_size == 1:
return 0
return distributed_utils.get_data_parallel_rank()
def process_sentence(
self,
sample: Dict[str, Any],
hypo: Dict[str, Any],
sid: int,
batch_id: int,
) -> Tuple[int, int]:
speaker = None # Speaker can't be parsed from dataset.
if "target_label" in sample:
toks = sample["target_label"]
else:
toks = sample["target"]
toks = toks[batch_id, :]
# Processes hypothesis.
hyp_pieces = self.tgt_dict.string(hypo["tokens"].int().cpu())
if "words" in hypo:
hyp_words = " ".join(hypo["words"])
else:
hyp_words = post_process(hyp_pieces, self.cfg.common_eval.post_process)
# Processes target.
target_tokens = utils.strip_pad(toks, self.tgt_dict.pad())
tgt_pieces = self.tgt_dict.string(target_tokens.int().cpu())
tgt_words = post_process(tgt_pieces, self.cfg.common_eval.post_process)
if self.cfg.decoding.results_path is not None:
print(f"{hyp_pieces} ({speaker}-{sid})", file=self.hypo_units_file)
print(f"{hyp_words} ({speaker}-{sid})", file=self.hypo_words_file)
print(f"{tgt_pieces} ({speaker}-{sid})", file=self.ref_units_file)
print(f"{tgt_words} ({speaker}-{sid})", file=self.ref_words_file)
if not self.cfg.common_eval.quiet:
logger.info(f"HYPO: {hyp_words}")
logger.info(f"REF: {tgt_words}")
logger.info("---------------------")
hyp_words, tgt_words = hyp_words.split(), tgt_words.split()
return editdistance.eval(hyp_words, tgt_words), len(tgt_words)
def process_sample(self, sample: Dict[str, Any]) -> None:
self.gen_timer.start()
hypos = self.task.inference_step(
generator=self.generator,
models=self.models,
sample=sample,
)
num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
self.gen_timer.stop(num_generated_tokens)
self.wps_meter.update(num_generated_tokens)
for batch_id, sample_id in enumerate(sample["id"].tolist()):
errs, length = self.process_sentence(
sample=sample,
sid=sample_id,
batch_id=batch_id,
hypo=hypos[batch_id][0],
)
self.total_errors += errs
self.total_length += length
self.log({"wps": round(self.wps_meter.avg)})
if "nsentences" in sample:
self.num_sentences += sample["nsentences"]
else:
self.num_sentences += sample["id"].numel()
def log_generation_time(self) -> None:
logger.info(
"Processed %d sentences (%d tokens) in %.1fs %.2f "
"sentences per second, %.2f tokens per second)",
self.num_sentences,
self.gen_timer.n,
self.gen_timer.sum,
self.num_sentences / self.gen_timer.sum,
1.0 / self.gen_timer.avg,
)
def parse_wer(wer_file: Path) -> float:
with open(wer_file, "r") as f:
return float(f.readline().strip().split(" ")[1])
def get_wer_file(cfg: InferConfig) -> Path:
"""Hashes the decoding parameters to a unique file ID."""
base_path = "wer"
if cfg.decoding.results_path is not None:
base_path = os.path.join(cfg.decoding.results_path, base_path)
if cfg.decoding.unique_wer_file:
yaml_str = OmegaConf.to_yaml(cfg.decoding)
fid = int(hashlib.md5(yaml_str.encode("utf-8")).hexdigest(), 16)
return Path(f"{base_path}.{fid % 1000000}")
else:
return Path(base_path)
def main(cfg: InferConfig) -> float:
"""Entry point for main processing logic.
Args:
cfg: The inferance configuration to use.
wer: Optional shared memory pointer for returning the WER. If not None,
the final WER value will be written here instead of being returned.
Returns:
The final WER if `wer` is None, otherwise None.
"""
yaml_str, wer_file = OmegaConf.to_yaml(cfg.decoding), get_wer_file(cfg)
# Validates the provided configuration.
if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None:
cfg.dataset.max_tokens = 4000000
if not cfg.common.cpu and not torch.cuda.is_available():
raise ValueError("CUDA not found; set `cpu=True` to run without CUDA")
with InferenceProcessor(cfg) as processor:
for sample in processor:
processor.process_sample(sample)
processor.log_generation_time()
if cfg.decoding.results_path is not None:
processor.merge_shards()
errs_t, leng_t = processor.total_errors, processor.total_length
if cfg.common.cpu:
logger.warning("Merging WER requires CUDA.")
elif processor.data_parallel_world_size > 1:
stats = torch.LongTensor([errs_t, leng_t]).cuda()
dist.all_reduce(stats, op=dist.ReduceOp.SUM)
errs_t, leng_t = stats[0].item(), stats[1].item()
wer = errs_t * 100.0 / leng_t
if distributed_utils.is_master(cfg.distributed_training):
with open(wer_file, "w") as f:
f.write(
(
f"WER: {wer}\n"
f"err / num_ref_words = {errs_t} / {leng_t}\n\n"
f"{yaml_str}"
)
)
return wer
@hydra.main(config_path=config_path, config_name="infer")
def hydra_main(cfg: InferConfig) -> Union[float, Tuple[float, Optional[float]]]:
container = OmegaConf.to_container(cfg, resolve=True, enum_to_str=True)
cfg = OmegaConf.create(container)
OmegaConf.set_struct(cfg, True)
if cfg.common.reset_logging:
reset_logging()
# logger.info("Config:\n%s", OmegaConf.to_yaml(cfg))
wer = float("inf")
try:
if cfg.common.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(cfg, main)
else:
distributed_utils.call_main(cfg, main)
wer = parse_wer(get_wer_file(cfg))
except BaseException as e: # pylint: disable=broad-except
if not cfg.common.suppress_crashes:
raise
else:
logger.error("Crashed! %s", str(e))
logger.info("Word error rate: %.4f", wer)
if cfg.is_ax:
return wer, None
return wer
def cli_main() -> None:
try:
from hydra._internal.utils import (
get_args,
) # pylint: disable=import-outside-toplevel
cfg_name = get_args().config_name or "infer"
except ImportError:
logger.warning("Failed to get config name from hydra args")
cfg_name = "infer"
cs = ConfigStore.instance()
cs.store(name=cfg_name, node=InferConfig)
for k in InferConfig.__dataclass_fields__:
if is_dataclass(InferConfig.__dataclass_fields__[k].type):
v = InferConfig.__dataclass_fields__[k].default
cs.store(name=k, node=v)
hydra_main() # pylint: disable=no-value-for-parameter
if __name__ == "__main__":
cli_main()
| 16,498
| 33.955508
| 103
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_recognition/new/decoders/decoder_config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
from typing import Optional
from fairseq.dataclass.configs import FairseqDataclass
from fairseq.dataclass.constants import ChoiceEnum
from omegaconf import MISSING
DECODER_CHOICES = ChoiceEnum(["viterbi", "kenlm", "fairseqlm"])
@dataclass
class DecoderConfig(FairseqDataclass):
type: DECODER_CHOICES = field(
default="viterbi",
metadata={"help": "The type of decoder to use"},
)
@dataclass
class FlashlightDecoderConfig(FairseqDataclass):
nbest: int = field(
default=1,
metadata={"help": "Number of decodings to return"},
)
unitlm: bool = field(
default=False,
metadata={"help": "If set, use unit language model"},
)
lmpath: str = field(
default=MISSING,
metadata={"help": "Language model for KenLM decoder"},
)
lexicon: Optional[str] = field(
default=None,
metadata={"help": "Lexicon for Flashlight decoder"},
)
beam: int = field(
default=50,
metadata={"help": "Number of beams to use for decoding"},
)
beamthreshold: float = field(
default=50.0,
metadata={"help": "Threshold for beam search decoding"},
)
beamsizetoken: Optional[int] = field(
default=None, metadata={"help": "Beam size to use"}
)
wordscore: float = field(
default=-1,
metadata={"help": "Word score for KenLM decoder"},
)
unkweight: float = field(
default=-math.inf,
metadata={"help": "Unknown weight for KenLM decoder"},
)
silweight: float = field(
default=0,
metadata={"help": "Silence weight for KenLM decoder"},
)
lmweight: float = field(
default=2,
metadata={"help": "Weight for LM while interpolating score"},
)
| 2,004
| 27.239437
| 69
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/examples/speech_recognition/new/decoders/decoder.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Union
from fairseq.data.dictionary import Dictionary
from .decoder_config import DecoderConfig, FlashlightDecoderConfig
from .base_decoder import BaseDecoder
def Decoder(
cfg: Union[DecoderConfig, FlashlightDecoderConfig], tgt_dict: Dictionary
) -> BaseDecoder:
if cfg.type == "viterbi":
from .viterbi_decoder import ViterbiDecoder
return ViterbiDecoder(tgt_dict)
if cfg.type == "kenlm":
from .flashlight_decoder import KenLMDecoder
return KenLMDecoder(cfg, tgt_dict)
if cfg.type == "fairseqlm":
from .flashlight_decoder import FairseqLMDecoder
return FairseqLMDecoder(cfg, tgt_dict)
raise NotImplementedError(f"Invalid decoder name: {cfg.name}")
| 943
| 28.5
| 76
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.