repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
mmda | mmda-main/src/mmda/predictors/hf_predictors/bibentry_predictor/predictor.py | import os
import re
from typing import Dict, List, Optional, Tuple
from optimum.onnxruntime import ORTModelForTokenClassification
import torch
from transformers import AutoConfig, AutoTokenizer, AutoModelForTokenClassification
from unidecode import unidecode
from mmda.predictors.hf_predictors.base_hf_predictor import BasePredictor
from mmda.predictors.hf_predictors.bibentry_predictor import utils
from mmda.predictors.hf_predictors.bibentry_predictor.types import (
BibEntryLabel,
BibEntryPredictionWithSpan,
BibEntryStructureSpanGroups,
StringWithSpan
)
from mmda.types.document import Document
class BibEntryPredictor(BasePredictor):
REQUIRED_BACKENDS = ["transformers", "torch"]
REQUIRED_DOCUMENT_FIELDS = ["tokens", "pages", "bib_entries"]
def __init__(self, model_name_or_path: str):
self.config = AutoConfig.from_pretrained(model_name_or_path)
self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
onnx = os.path.exists(os.path.join(model_name_or_path, "model.onnx"))
if onnx:
self.model = ORTModelForTokenClassification.from_pretrained(model_name_or_path, file_name="model.onnx")
else:
self.model = AutoModelForTokenClassification.from_pretrained(model_name_or_path)
self.model.to(torch.device("cuda" if torch.cuda.is_available() else "cpu"))
if not onnx:
# https://stackoverflow.com/a/60018731
self.model.eval() # for some reason the onnx version doesnt have an eval()
def predict(self, document: Document, bibentries_per_run: int = 5) -> BibEntryStructureSpanGroups:
# Recover the (approximate) raw bibentry strings from mmda document
bib_entry_strings = utils.mk_bib_entry_strings(document)
raw_predictions = []
# Do inference in batches to not blow out vram
for i in range(0, len(bib_entry_strings), bibentries_per_run):
batch_strings = bib_entry_strings[i:i+bibentries_per_run]
batch_raw_predictions = self.predict_raw(batch_strings)
raw_predictions += batch_raw_predictions
# Map raw predictions back into valid annotations for passed document
prediction = utils.map_raw_predictions_to_mmda(document.bib_entries, raw_predictions)
return prediction
def predict_raw(self, bib_entries: List[str]) -> List[BibEntryPredictionWithSpan]:
if not bib_entries:
return []
res = []
tokenized_inputs = self.tokenizer(bib_entries, padding=True, truncation=True, return_tensors="pt")
# put the data on the same device of the model.
tokenized_inputs = tokenized_inputs.to(self.model.device)
predictions = self.model(**tokenized_inputs)
pred_ids = predictions.logits.argmax(2).tolist()
num_items = len(bib_entries)
for i in range(num_items):
# Combine token-level prediction into word-level prediction
label_ids = BibEntryPredictor._get_word_level_prediction(tokenized_inputs.word_ids(i), pred_ids[i])
word_ids = [id for id in tokenized_inputs.word_ids(i) if id is not None]
num_words = word_ids[-1] + 1 if word_ids else 0
spans = [tokenized_inputs.word_to_chars(i, word_index) for word_index in range(num_words)]
# Extract output fields from word predictions
res.append(BibEntryPredictor._aggregate_token_level_prediction(bib_entries[i], spans, label_ids))
return res
@staticmethod
def postprocess(pred: BibEntryPredictionWithSpan) -> Dict:
citation_number = pred.citation_number.content if pred.citation_number else None
title = BibEntryPredictor._clean_str(pred.title.content) if pred.title else None
doi = BibEntryPredictor._clean_doi(pred.doi.content) if pred.doi else None
return dict(
citation_number=citation_number,
title=title,
doi=doi
)
@staticmethod
def _get_word_level_prediction(word_ids: List[Optional[int]], predictions: List[int]) -> List[int]:
"""
If a word is split into 2 or more tokens, only take prediction for the first token.
"""
res = []
prev_word_id = None
for word_id, pred in zip(word_ids, predictions):
if word_id is not None and word_id != prev_word_id:
# Tokenization process removes empty string and skips word id, so we're adding it back here
# For example:
# input string list: [' Anon ', '1934', ' ', 'University and Educational Intelligence', ' ', 'Nature', ' ', '133', ' ', '805–805']
# tokenization removes empty string: ['[CLS]', 'an', '##on', '1934', 'university', 'and', 'educational', 'intelligence', 'nature', '133', '80', '##5', '–', '80', '##5', '[SEP]']
# skipping empty string results in skipping word id: [None, 0, 0, 1, 3, 3, 3, 3, 5, 7, 9, 9, 9, 9, 9, None]
# predictions: [0, 9, 9, 0, 8, 9, 8, 8, 9, 0, 13, 13, 13, 13, 13, 4]
if prev_word_id is not None:
for i in range(word_id - (prev_word_id + 1)):
res.append(BibEntryLabel.MISC.value)
res.append(pred)
prev_word_id = word_id
return res
@staticmethod
def _aggregate_token_level_prediction(input: str, spans, label_ids: List[int]) -> BibEntryPredictionWithSpan:
citation_number = BibEntryPredictor._extract_first_contiguous_label_group_token_level(input, spans, label_ids,
BibEntryLabel.CITATION_NUMBER)
authors = BibEntryPredictor._extract_author_token(input, spans, label_ids)
title = BibEntryPredictor._extract_first_contiguous_label_group_token_level(input, spans, label_ids, BibEntryLabel.TITLE)
journal = BibEntryPredictor._extract_first_contiguous_label_group_token_level(input, spans, label_ids,
BibEntryLabel.JOURNAL)
event = BibEntryPredictor._extract_first_contiguous_label_group_token_level(input, spans, label_ids, BibEntryLabel.EVENT)
journal_venue_or_event = journal if journal else event
year = BibEntryPredictor._extract_first_contiguous_label_group_token_level(input, spans, label_ids,
BibEntryLabel.ISSUED_YEAR)
doi = BibEntryPredictor._extract_first_contiguous_label_group_token_level(input, spans, label_ids, BibEntryLabel.DOI)
url = BibEntryPredictor._extract_first_contiguous_label_group_token_level(input, spans, label_ids, BibEntryLabel.URL)
return BibEntryPredictionWithSpan(
citation_number=citation_number,
authors=authors,
title=title,
journal_venue_or_event=journal_venue_or_event,
year=year,
doi=doi,
url=url
)
@staticmethod
def _extract_author_token(input: str, spans, label_ids: List[int]) -> Optional[List[StringWithSpan]]:
res = []
author_span = None
for word_index, label_id in enumerate(label_ids):
# Beginning of new author
if label_id == BibEntryLabel.AUTHOR_START.value and not author_span:
author_span = spans[word_index]
# Middle of current author
elif (
label_id == BibEntryLabel.AUTHOR_START.value or label_id == BibEntryLabel.AUTHOR_MIDDLE.value or label_id == BibEntryLabel.AUTHOR_END.value) and author_span:
current_span = spans[word_index]
author_span = author_span._replace(end=current_span.end)
# End of current author. Close current author span and reset.
elif (
label_id != BibEntryLabel.AUTHOR_START.value and label_id != BibEntryLabel.AUTHOR_MIDDLE.value and label_id != BibEntryLabel.AUTHOR_END.value) and author_span:
res.append(StringWithSpan(
content=input[author_span.start:author_span.end],
start=author_span.start,
end=author_span.end,
))
author_span = None
return res if res else None
@staticmethod
def _extract_first_contiguous_label_group_token_level(
input: str,
spans,
label_ids: List[int],
target_label: BibEntryLabel
) -> Optional[StringWithSpan]:
res = None
existing_span = None
for word_index, label_id in enumerate(label_ids):
if label_id == target_label.value:
# Middle of label span
if existing_span:
current_span = spans[word_index]
existing_span = existing_span._replace(end=current_span.end)
# First label encounter
else:
existing_span = spans[word_index]
# End of label span
elif existing_span:
break
if existing_span:
res = StringWithSpan(
content=input[existing_span.start:existing_span.end],
start=existing_span.start,
end=existing_span.end,
)
return res
@staticmethod
def _clean_str(s: str) -> Optional[str]:
without_diacritics = unidecode(s.strip())
subbed = re.sub("-\s+", "", without_diacritics)
if subbed:
return subbed
else:
return None
@staticmethod
def _clean_doi(doi: str) -> Optional[str]:
lower_trimmed = doi.strip().lower()
if lower_trimmed.startswith("10."):
return re.sub("\s", "", lower_trimmed)
else:
return None
| 9,946 | 43.806306 | 193 | py |
mmda | mmda-main/src/ai2_internal/vila/interface.py | """
This file contains the classes required by Semantic Scholar's
TIMO tooling.
You must provide a wrapper around your model, as well
as a definition of the objects it expects, and those it returns.
"""
import logging
from typing import List
import torch
from pydantic import BaseModel, BaseSettings, Field
from ai2_internal import api
from mmda.predictors.hf_predictors.token_classification_predictor import (
IVILATokenClassificationPredictor,
)
from mmda.types.document import Document, SpanGroup
from mmda.types.image import frombase64
logger = logging.getLogger(__name__)
class Instance(BaseModel):
"""
Describes one Instance over which the model performs inference.
"""
symbols: str
images: List[str]
tokens: List[api.SpanGroup]
rows: List[api.SpanGroup]
pages: List[api.SpanGroup]
def to_mmda(self):
doc = Document(symbols=self.symbols)
doc.annotate(tokens=[sg.to_mmda() for sg in self.tokens])
doc.annotate(rows=[sg.to_mmda() for sg in self.rows])
doc.annotate(pages=[sg.to_mmda() for sg in self.pages])
images = [frombase64(img) for img in self.images]
doc.annotate_images(images)
return doc
class Prediction(BaseModel):
"""
Describes the outcome of inference for one Instance
"""
groups: List[api.SpanGroup]
@classmethod
def from_mmda(cls, groups: List[SpanGroup]) -> "Prediction":
return cls(groups=[api.SpanGroup.from_mmda(grp) for grp in groups])
class PredictorConfig(BaseSettings):
"""
Configuration required by the model to do its work.
Uninitialized fields will be set via Environment variables.
"""
subpage_per_run: int = Field(
default=2,
description="The maximum number of subpages we can send to the models at one time. "
"Used for capping the maximum memory usage during the vila dep."
)
class Predictor:
"""
Interface on to underlying VILA Predictor.
"""
_config: PredictorConfig
_artifacts_dir: str
def __init__(self, config: PredictorConfig, artifacts_dir: str):
self._config = config
self._artifacts_dir = artifacts_dir
self._load_model()
def _load_model(self) -> None:
device = "cuda" if torch.cuda.is_available() else None
if device == "cuda":
logger.info("CUDA device detected, running model with GPU acceleration.")
else:
logger.info("No CUDA device detected, running model on CPU.")
self._predictor = IVILATokenClassificationPredictor.from_pretrained(
self._artifacts_dir,
device=device
)
def predict_batch(self, instances: List[Instance]) -> List[Prediction]:
"""
Method called by the client application. One or more Instances will
be provided, and the caller expects a corresponding Prediction for
each one.
"""
predictions = []
for inst in instances:
span_groups = self._predictor.predict(
inst.to_mmda(), subpage_per_run=self._config.subpage_per_run
)
predictions.append(
Prediction(groups=[api.SpanGroup.from_mmda(sg) for sg in span_groups])
)
return predictions
| 3,302 | 27.721739 | 92 | py |
mmda | mmda-main/src/ai2_internal/layout_parser/interface.py | """
This file contains the classes required by Semantic Scholar's
TIMO tooling.
You must provide a wrapper around your model, as well
as a definition of the objects it expects, and those it returns.
"""
import logging
from typing import List
import torch
from pydantic import BaseModel, BaseSettings, Field
from ai2_internal.api import BoxGroup
from mmda.predictors.lp_predictors import LayoutParserPredictor
from mmda.types import image
from mmda.types.document import Document
logger = logging.getLogger(__name__)
class Instance(BaseModel):
"""
Describes one Instance over which the model performs inference.
Input is a list of page images, base64-encoded"""
page_images: List[str] = Field(description="List of base64-encoded page images")
class Prediction(BaseModel):
"""Output is a set of bounding boxes with metadata"""
groups: List[BoxGroup] = Field(description="PDF Text Regions")
class PredictorConfig(BaseSettings):
"""
Configuration required by the model to do its work.
Uninitialized fields will be set via Environment variables.
"""
weights_paths = ["lp://efficientdet/PubLayNet", "lp://efficientdet/MFD"]
class Predictor:
"""
Interface on to your underlying model.
This class is instantiated at application startup as a singleton.
You should initialize your model inside of it, and implement
prediction methods.
If you specified an artifacts.tar.gz for your model, it will
have been extracted to `artifacts_dir`, provided as a constructor
arg below.
"""
_config: PredictorConfig
_artifacts_dir: str
def __init__(self, config: PredictorConfig, artifacts_dir: str):
self._config = config
self._artifacts_dir = artifacts_dir
self._load_model()
def _load_model(self) -> None:
"""
Performs the start-up operations required to
ready the model for inference.
LayoutPraser uses pre-trained PubLayNet and MFD models managed
by the underlying layoutparser tool:
https://layout-parser.readthedocs.io/en/latest/api_doc/models.html
"""
device = "cuda" if torch.cuda.is_available() else None
if device == "cuda":
logger.info("CUDA device detected, running model with GPU acceleration.")
else:
logger.info("No CUDA device detected, running model on CPU.")
self._lp_predictors = [
LayoutParserPredictor.from_pretrained(weights_path, device=device)
for weights_path in self._config.weights_paths
]
def predict_one(self, instance: Instance) -> Prediction:
"""
Should produce a single Prediction for the provided Instance.
Leverage your underlying model to perform this inference.
"""
images = [image.frombase64(im) for im in instance.page_images]
doc = Document(symbols="")
doc.annotate_images(images)
box_groups = []
for predictor in self._lp_predictors:
box_groups.extend(predictor.predict(doc))
return Prediction(groups=[BoxGroup.from_mmda(bg) for bg in box_groups])
def predict_batch(self, instances: List[Instance]) -> List[Prediction]:
"""
Method called by the client application. One or more Instances will
be provided, and the caller expects a corresponding Prediction for
each one.
If your model gets performance benefits from batching during inference,
implement that here, explicitly.
Otherwise, you can leave this method as-is and just implement
`predict_one()` above. The default implementation here passes
each Instance into `predict_one()`, one at a time.
The size of the batches passed into this method is configurable
via environment variable by the calling application.
"""
return [self.predict_one(instance) for instance in instances]
| 3,946 | 32.449153 | 85 | py |
mmda | mmda-main/tests/test_recipes/core_recipe_fixtures.py | FIRST_1000_SYMBOLS = """Field\nTask\nDataset\nSOTA\nB ERT -Base\nS CI B ERT\nFrozen\nFinetune\nFrozen\nFinetune\nBio\nNER\nBC5CDR (Li et al., 2016)\n88.85 7\n85.08\n86.72\n88.73\n90.01\nJNLPBA (Collier and Kim, 2004)\n78.58\n74.05\n76.09\n75.77\n77.28\nNCBI-disease (Dogan et al., 2014)\n89.36\n84.06\n86.88\n86.39\n88.57\nPICO\nEBM-NLP (Nye et al., 2018)\n66.30\n61.44\n71.53\n68.30\n72.28\nDEP\nGENIA (Kim et al., 2003) - LAS\n91.92\n90.22\n90.33\n90.36\n90.43\nGENIA (Kim et al., 2003) - UAS\n92.84\n91.84\n91.89\n92.00\n91.99\nREL\nChemProt (Kringelum et al., 2016)\n76.68\n68.21\n79.14\n75.03\n83.64\nCS\nNER\nSciERC (Luan et al., 2018)\n64.20\n63.58\n65.24\n65.77\n67.57\nREL\nSciERC (Luan et al., 2018)\nn/a\n72.74\n78.71\n75.25\n79.97\nCLS\nACL-ARC (Jurgens et al., 2018)\n67.9\n62.04\n63.91\n60.74\n70.98\nMulti\nCLS\nPaper Field\nn/a\n63.64\n65.37\n64.38\n65.71\nSciCite (Cohan et al., 2019)\n84.0\n84.31\n84.85\n85.42\n85.49\nAverage\n73.58\n77.16\n76.01\n79.27\nTable 1: Test performances of all B ERT variants on all tasks and datasets. Bold indicates the SOTA result (multiple\nresults bolded if difference wi"""
BASE64_PAGE_IMAGE = "iVBORw0KGgoAAAANSUhEUgAAAlMAAANKCAIAAAAhjVxYAAEAAElEQVR4nOydeVxN+f/HP7dVm1KRLVJUshQlS9YRphQSytdYRonJlmXsZF9HWTIxtrRYKqKkEJWQEqJ931fdtruvn98fH3N+Z+5NqBvN3M/zjx7nfM77nPf7LJ33Ped8Pq83BUIIMBgMBoORGmR+dAAYDAaDwXxXcObDYDAYjHSBMx8Gg8FgpAuc+TAYDAYjXeDMh8FgMBjpAmc+DAaDwUgXOPNhMBgMRrrAmQ+DwWAw0gXOfBgMBoORLnDmw2AwGIx0gTMfBoPBYKQLnPkwGAwGI13gzIfBYDAY6QJnPgwGg8FIFzjzYTAYDEa6wJkPg8FgMNIFznwYDAaDkS5w5sNgMBiMdIEzHwaDwWCkC5z5MBgMBiNd4MyHwWAwGOkCZz4MBoPBSBc482EwGAxGusCZD4PBYDDSBc58GAwGg5EucObDYDAYjHSBMx8Gg8FgpAuc+TAYDAYjXeDMh8FgMBjpAmc+DAaDwUgXOPNhMBgMRrrAmQ+DwWAw0gXOfBgMBoORLnDmw2AwGIx0gTMfBoPBYKQLnPkwGAwGI13gzIfBYDAY6QJnPgwGg8FIFzjzYTAYDEa6wJkPg8FgMNIFznwYDAaDkS5w5sNgMBiMdIEzHwaDwWCkC5z5MBgMBiNd4MyHwWAwGOkCZz4MBoPBSBc482EwGAxGusCZD4PBYDDSBc58GAwGg5EucObDYDAYjHSBMx8Gg8FgpAuc+TAYDAYjXeDMh8FgMBjpAmc+DAaDwUgXOPNhMBgMRrrAmQ+DwWAw0gXOfBgMBoORLnDmw2AwGIx0gTMfBoPBYKQLnPkwGAwGI13gzIfBYDAY6ULuRwfw46FQKD86BAwGg/kBQAh/dAg/BvzMh8FgMBjpgiK1OR+DwWAw0gl+5sNgMBiMdIEzHwaDwWCkC5z5WoDP54s3stlsYhpCyOVyv2NEn40EIz0IhUIIoUAgILcIhcLWV+n4uDCiiJ8pTGcDZz5QW1v7008/rV+//ubNmydOnLh+/fq1a9dErtqwsLAVK1agaaFQ6Obm9vjxY8mG0dzcPHPmzPPnz/ft2/fy5cuzZ8/mcDjiZidPnjx27Fj73eXk5BgYGJw+fTowMHDr1q0lJSXiNjwe72s29QN/BEgP79+/37dv35kzZxYuXAgAYLFYO3fuvH379sWLF3fu3FlTU9PixbNu3bqGhobvE2FeXt6wYcOOHDly5MiRtWvXtvm+HxMTo6OjExAQsGHDhgsXLkg2yO+AyJn6HPX19U5OTsuXLz979uzSpUurqqq+W4QYgEc1AAB69Oihr6//008/zZ49m8ViNTY29ujRAy0SCoUsFktFRWXkyJF37txBjUwm09jYWOJhUCiUP//8s3///rt373ZxcZkyZYpQKKTRaEpKSnJycgAAOp2uqqpqZmb2/PlzPp8vEAjk5eVlZNr428XIyEhNTW3evHl9+vTJzs6eMWNGcnJy165dCY9HjhyZPXu2iYkJhLCpqUlDQwMAwOVyIYSKiooAABaLJS8vDyFct27dmTNnFBQUJHcwMKKEhIQMGzbMycnJ0NAQALBv376BAwfOnz8fAODr63vixAnxi4dKpebl5V28eHHLli3fIcJBgwbp6OjY2tqamppaWVl9+PDBxMRkx44dJiYmOjo6NTU1S5Ys2b1794ABA5qbm21tbaurq/Pz89++fTt58uTKyspNmzah7VhbWwsEgsWLFzs6Ovbv33/lypUAAC6Xy+fzlZWVAQBMJrNLly7oyqfRaGpqat9h774ekTMFALh+/bqsrCyDwairq9u4ceOxY8c0NTV/++23YcOGqamprV27dufOnTdv3tywYcOlS5dKS0vHjx//5s2b7du3BwQE5OXl9e3bV0dHZ/z48d7e3oqKimpqahUVFSdOnPixu/lvBz/zAQAAhUJ5/fr13bt3d+3apaqqam1tXV1dnZSU9OTJE3d3d/Lj3aFDh5KSkpKTkyUeg5qaWv/+/YlZfX39M2fO5Obm/vrrrwCAM2fOFBUVXbx4ES198OBBaGhoOx+2iIGMxsbGgwYNio2NPXbsGPLI5/OfPXuWmppKpVJ37doVGhoaFhZWWVnp5+d35swZJpMZFhaWlJRka2ubl5f38uXLjIyM9kSC+SILFizYunXr2bNnbWxsAAB3796dNGkSWjR+/Pjo6GiRi0dJSSk8PDwoKMjPz6/Ft/cdAYVCefHixZ9//tmrV6/hw4crKipSKBQLCws7O7spU6bIy8vLyclZWFhYWVlduHBh4MCBK1eurK+vnzVrloODA3k7fD4/Kipq//79a9euBQDExcXduHFj27Zt5eXl4eHh7969++OPPwAA165di4+PRwek8yBypp49exYbG4se78aMGSMnJ6eurq6rqwsAoFAoWVlZQUFBGRkZc+bMAQD07NlTS0tr+vTpTk5OAIBevXqpq6v/+uuvR48elZeX37Rpk6am5qBBg9zd3X/oLv4XwJnvE0ZGRmPGjDE0NFRTU+vTpw8AwNfXVygUTp8+nUgwRUVFpaWlU6dOHTVq1HcIydXVlcfjlZSUsFis4uLisLAw9P/w9OlTCoWyaNGiLl26SMqXkpIShUIhPPJ4vJ49e44ePbpbt25Lly4FALx69YrP5/v7+0+aNKlLly5+fn4cDgf9u6qrq48YMUJSkWBaZPjw4UlJSTExMYsWLQJin6LFc5tQKExJSUlKSurRo0doaOh3i9PIyMjCwqKmpiY7OxsAQKFQnj175u3tXVZWhmYjIiICAwNXr14tkqrfvXsXGRmZlZUFAJCVlTU1NR09enR0dDSXyx0xYoSFhQWEMC0tTSAQnDp1ysnJKS0tLSkpSV5efuLEiSwW67vt4BcROVOxsbGTJ09GiyZOnAgAoFAoxI/OPn36mJubKykpJSQkoEUZGRk3b96MiopCs+np6Rs3bty1a1fXrl27deuG1howYMB3363/GjjzAQAAhLBr1649e/ZEr1bQdcnj8YYNG7Zo0SLitg4hrKmpQQYdNw6S2PKBAwf69++vqakpFAp37typpaXl6uoKADAzM/Px8WlubpaUx48fP+bl5U2aNInsEUWSl5cXGBhoYWEhFApVVVUDAwM3btz4/v17Op0+bdo0d3d3DQ0NPCT0O3Dnzh0dHZ2wsLCUlJS6ujobG5tXr16hRcnJycRzD3EuIiIifvvtt/Hjxx87dszb2/u7xamtrW1paWllZRUSEoJaJk6cuGHDhn79+qHZqVOnFhUVqaioiKzYpUsXVVVV9CJdRkamd+/eDg4OQqHw5cuX9+/fLygoMDAwEAqFkyZNWrt27axZs2RkZAQCwYwZMzw8PFr8Iv6jEDlT/fr1q6ys/Jxx165djY2Nly5deuXKFdQyZMgQZ2fn+fPn0+l0AMDQoUOFQiH63oGRIPiAgtra2rS0tK5du9ra2srLyzc0NOTn57979+7XX3+1t7e3t7d3cnLKzs4uKSnp3r07hULZsmVLZWUlhNDOzk7iwcTExDQ0NLx8+XLcuHFZWVmBgYF0Ov3Ro0epqamzZ8/mcrlpaWnNzc2urq4ODg5+fn7otUkbyMnJKSsrCw4O7tGjR2pqqr+/v7q6OtmjiYnJX3/9NWXKlBcvXmhra79+/To+Pp7H4y1atEhLS2vKlCk2NjZTpkxZvXq1trb29evX//e//0n2UGDI3L17t66urnv37pMmTdLW1j506NCePXuEQiGXy01PTz906BAgXTwGBga+vr7onOrp6eXn5/v7+y9ZsqRDI8zPzy8oKLh7925SUlJOTo63tzeHw8nPz4+Pj8/Pz3/69OmpU6dQv6o///xz2bJlly5d0tDQKCkp+fDhw9ixYwcPHjx48GAAwJMnT5qbmwMDAysrKw0NDceNG3f8+PHi4mJlZeWioqLi4mIzM7P58+ebmJgUFxe7urqOHj2a6H3WGRA5U4sWLVq/fv3FixcNDQ01NDSGDBmSmZnZ2Ng4duzY169fKyoqduvWLTo6eseOHQCAjIyMnJyc+/fvR0ZG7t27Nz09PSsr6/Tp046OjkpKSlZWVunp6agr3I/ey389WMOlNTgcjoKCgoiwJ5PJVFRUlJWV7WjvQqEQdZCTlZWlUCgsFgt94f8+HmVkZLhcroKCAofDUVRURIcCvVWTl5cHALDZbPS6VSAQUCiUNve1wXwNHA5HXl6ewWCQO3QwGAxZWVkJvvTutJAvQvI/AnERdh5aPFMsFktRURH/j3QecObDYDAYjHSBf4NgMBgMRrrA3/kwGKkGddciv9JHsy02tj5LNLayhc/ZfL3T9gTWhuD/jYF9ffBS+84PP/NhMBgMRrrA3/kwGEwL1NbWOjs7Dxs2bOzYsWVlZX369Plu3XczMjKmTZu2c+dOTU3N2NjYXbt2RUdHu7m5tbIK6o3VEcFwudzNmzfn5eUtW7aMwWAUFhYePHiwIxyJI34KWCzWsmXLWule13HH4T8GftuJwWBaQFzVj8vlUigU1O+3sbFRTU1NVlYWCekBAGRlZeXk5AjBPwAAm80WCATiQ/e+yJAhQ5SVlX/55Rd1dfWffvpJRUVl2bJl4O++x1wul9hmc3Nz165dq6urT58+vX//fqFQqKCgwOVy5eTkZGVl2Ww2hUJB0+Bvvb1vHRunoKBgaWkpKyvr5OTE4XDq6uoghHw+XygUKioqMplMCoWipKQkFAqRzi2FQkG5B8XWnuPQirCiyDaRihuPx0NSghBCdC4AAPLy8mw2W1FRkc1mKykptfk4/MeQ6p3HYDCtgFT9IIQJCQlbtmwxMzNzc3MbNWpUVFTUtGnTbt265ebmlpWV1bNnzxMnTixbtqx///5oKN4vv/ySlZX1/v17fX39pqam48ePt8F1dHQ0hLCiosLR0dHJyenFixf/+9//LCwsMjMzHR0dx48ff//+/crKSg6H89NPP71586a4uHjdunU+Pj5RUVFNTU22tra//fabm5tbaGjogwcPwsLCunXrdvjwYX9//549e35rMIWFhffv379///7hw4cdHBxGjhyJlJ40NDSKioq0tbXRnr569aqhoeHQoUNEbBoaGu08DsQp2Lt3r7W1dWBg4O3bt8nbvHbtmpaW1rlz5wICApCUYGRkZK9evUaPHr1hw4bg4OAJEybs3r374sWLgYGBiYmJbT4O/yVw5sNgMJ8FqfrV1NTo6Oh0797dw8MjNze3uLh40qRJfD7/5MmT/v7+ycnJKioqCxYsWLZs2cKFC5Hgn5GREYvF2rp1K6Hd9a2MGjVKVlb2+fPnenp6SEXdwMBg0qRJU6ZMCQkJKS4ulpeXHzJkSHFxsa6uro6OzqBBg/r27QsAGDx48KtXr0xMTLp167Z8+fLbt28zmUw/Pz93d/c5c+bU19e34Y7fq1evMWPGVFVVKSoq6unp2dnZjRw5ctSoUa9fv2axWEOHDs3Pz29ubj569Gh0dLSfnx8R26BBg9p5HIhTQAgrko8tUnGbPXv2xIkTVVRUkJRgZmYmm81GygDdunXr3r27k5NTaWnp27dv23kc/jPgzIfBYFpGRNUPDcSWlZXNzc0FAPTo0UNZWZnH4/3+++8hISEMBgMJ/vXu3buysjItLQ1tREQI4utda2lpqaurI3lP8kZkZGRQwRA1NTU7O7uqqipUQoRYUaRAErJHenszZsxoRUuslWCUlJS0tbWRWAx65QsA+PjxI3rN2LNnTwqFsnnz5r1798rIyJBj+/DhQzuPg7iwIgGFQlFUVEQqbhMnTuRwOB16HP5L4MyHwWBaQETVr7q6uqioKCMjY8iQIRMmTLh3715aWtqWLVsOHjw4ZMiQ1NTUvLw8suBfVlZWfn5+Xl5eSUlJQ0MDobb8NWRkZFRVVcXExDg6OgIACgsLc3NzS0pK8vLycnJyhEJhQUHBhg0bbGxs3r59O23atEmTJqGaIRMnTjx8+HC/fv1KS0szMzOrq6srKipKSkpycnLIenvfdBy4XG5CQkJubm5dXZ22tjaLxcrLy0tKSjI1NT1y5AgqTLFt27anT59mZmYCAE6cOPHbb79Nnz4dxdae4/A5YcX8/Hxim927dyeruCEpwVGjRq1evRqtUlJSUl5eXlhYmJ6eTqFQ2nwc/mPgvp0YDOabYTKZLWrptSj410EIhUI+n4+6kxB9Gnk8HuqDI27fEVJnXC5XRkZGvLcIObbvgLiUII/Hk5eXFwqF4oeiE0q+fX9w5sNgMBiMdIFHsmMwGAxGusCZD4PBYDDSBc58GAwGg5EucN/ONvY2xmAwmH87UtvPAz/zYTAYDEa6wH07MRgMBiNd4Gc+DAaDwUgXOPNhREGS82TpI4FAgBrB33r5IgbthM8XfusqbDa/laUCwT/eZJC3L/6Sg0bjfqv3H4LIGWndAMFms69ceXf6dNKJEy8l6B3885JA8Pn8zxlLCpHNIi2urzTuiBi+6AJVS+hokIgamhYIBHV1dSIGdDqdwWAQszwej8VifYfAOjO4h4uUkpeXN3fuXFRxrbKy8tSpU7Kyso8fP75///7o0aOrq6sTExNDQkKCg4MDAgKUlJSuX7/OZDJ37NhhYGDQo0ePmzdvrl69OiYm5s2bN66uriUlJT169Fi6dGlubu6MGTPWr19PpVKVlJR27txJeIQQ3r1719x84rJl9y0seh88+JOCgiwAoLaW8fJl2Zw5xuJBcrkCZCNCYWHDnDk3P3z4rcVd8/JKjI8vkZWl3L69oLmZe/z4Cysr3Zoaxq+/muXmUpctu/vypQuyDA/P+fiRaWdnKCvLf/78+fTp09t/YDsCKpUaEhIyYMCAysrKX3/91cfHZ+jQoa9evdq2bVuLBrm5uR4eHrKysn/88cfy5SOKihorK2mS8k6+JJABqhhgZWVVU1Pz66+/BgQEDBgwICYmZsuWLS3qvLQ/BgCAj49Pnz59ZsyYgQwuXLjw+PFjeXn5oqKixMREb2/v+Ph4WVnZ27dvS6oLm0gM4ruZkpKyd+9eNTW1goICf3//tLS0Dx8+lJSU+Pv7SySAFvHy8mpsbMzPz58/f76VlZWrq+uECRMUFBTWr1+PDJ4/fx4UFMRisVauXGlqahoUFLR3797g4GArK6uOi+pfAMRIK1OnTk1NTYUQjhs37u3btyUlJcOHD+fxeGipn5+fUChcvnz527dvUcu6desuX76MpisqKh4/fhwWFubq6opaXF1d//jjDwjhkCFDqqurBQJBjx49GhoaCHd+fn7l5eUQQheXe2FhWeRIOBw+eZbJ5PF4Ai5XsHJlBHmRUChsaGCh6fHjr7S4Ux8/MrKz6yCEy5ffS06u2Ls37vnzUgihnd11Op0LIZw48SqyfPKkcMuWx8SKRUVFwcHBX3fkvjdBQUEXL14UCoUrV66sqKhYtGgRhNDNzU0oFIobQAi3bt368OFDPv/Tobt2LVUgEErEu8glgdi7d+/z588hhHZ2dnQ6HdVqOHv27IcPH9rstJUYIIT79++/d+8e2aCsrAxCyOPxtm7d+vHjx+zsbAjh8uXLk5OTOygG8d1EMUAIN2zYIBAIamtrIYSOjo4CgUBSMYijoaFx7dq1U6dOmZiYbNu2zcnJ6cOHD6qqqjQaDRmMGTPG19f3zJkzkydPhhBWVVUBAND5kmbw207phUKhvHjxAknuDh8+/NGjR5aWloQC4dKlS1ks1qhRo1xcXLZv3w4AuH37NvETu3fv3tbW1uRf06tWrQoMDESbraysvHHjxujRozU0NAiDqKgoVGOFQqGQV7x3L2ft2ihiNiwsOymp3NY2KC+P+vJlWUbGR9ReX8/atSs2NDQzLCy7lZ3S1lY2MtICACgqyhoaar1+XdG/vzoAQFNTKSenDgAgI/PJtZdXora28qJFd2JiCgEAenp6V65cadOB7HDs7e0DAgJWrVrl6enZu3dvNTU1BweHJUuWEIeRbAAAMDc3/+uvv3766Sf0QpLPFxJ73U7vIpcE4vXr16iigqamZk5OztKlS62trRUUFIYNG9au3f5MDEKh8K+//qqtrV2wYEFpaSkyQPWJnj59OnnyZG1tbSMjIwCAoqKioaFhR8QAABDfTRQDjUbr2rWrjIyMpqZmQEBAv379WhQRlRR9+/atqamBEFZVVaWkpMjLyysoKNDp9OzsbAAAhPDt27eoMSUlBQAgLy/fccH8i8CZT6oxMjKysLCoqanJzs5mMBjkTzUAAGVl5VWrVr1+/frx48ccDkfcgIySkhJxI46Pjz9+/HhwcDCxlMfjNTU1tbjikCHdhcL///bm55fK4QjQy0919S4jRnwqIaah0WXpUlMAwKtX5V/cr6ysupkzDdXVFYVCiG76MjIUkbdeZWXNv/8+7swZm3374lELUV68s8Hlcu3t7alUKnrm1tfXNzMz27dvX4sGAID58+eHhoaampo+e/YMALBkiamkvItcEsiAkEWWQUeZQnFycjpz5gx6vJAI5BgKCwstLS1dXV2dnZ0vXLhANouJiZk6dSqazsrKmjlzprq6ekfEAAD43G5GRETMnDkTACAjIzN16tT09HSiUFFHcPny5YKCgvDwcHNzc7JwNrrakXp1x3n/94IPilSjra1taWlpZWUVEhJibW394sULLvdTdw/iG7isrKyFhYW8vPyMGTMeP35MrCvykTw0NHThwoUAAAjhwoULlyxZ4ubmRiyVl5dns9niAaSn14okJDqdO22avrv7KA2NLpDUGyUvrz4w8IOFRW9ymmyRujpmeXnzzJmDaDTuyJG90CeuxkY2ehYk6NpVsaGBraWlRHSWUVFRQXXXOhvBwcEWFhbBwcGJiYnoN4qnp6eRkVF+fr64AbHWkCFDUE0cObl2/ZuLb5y4JNDsyJEjUbG3xsZGIyOjsLAwd3f3ffv2RUZGtsfv52Lo1atXeXk5AMDQ0JBcZE4gEAgEAhRVXV1deXn5zJkzia4fko0BAPC53UxKSho1ahQAgEKh9O7de/HixTU1NZKKQRxLS8sdO3akpqYePnx41KhRPB6PzWarqakZGxtHR0czmUyi0dLSEvzdwwtK/WA23MNFSsnPzy8oKLh7925SUlJOTo63t7eent7OnTvd3NxmzZolEAh69eolJycXGRk5atSouXPnysjInD592sPDg8FgDBgwgM1mT548OT4+PjMz89atW9XV1UpKSps2bcrJyamoqHjy5MmmTZvmz5+/fft2T09PVBJl4MCBEMKPH5lpaTUyMhQmk5eXR+VyBUOH9sjPr6fTuaqqCgCAKVP0bGyCpkzRW73aUltb+fr1tP/9bxgAoLKS9uJFmba28uvXFenptUVFDWVlzffv55qa6pSVvUxLSzt48CAAgEbjLlgQoq7exccn2d7ecM0aS1/fFIEA2tkZKivLFxQ05OTUZWXVDR6s7ek56dKlt3p6Gu7uowAAEMJ+/fr90HPyWWxsbC5evKiiomJra2tkZNTU1IQSgIGBga+vr6mpKdmARqP9/vvvtra2ysrKI0eOlKz3V69ekS8J5H3NmjW+vr4CgcDOzk5ZWdnU1DQ2NraystLBwaH93sVjUFFRcXR0vH//fmpq6vr161EM48aNi4uLmzhxIgCARqMtWLBAXV3dx8fH3t7e1dVV4jEAAMi7mZqaiq55Op2uoqICABAIBNu2bZs8eTKfz7e2tpZIAC1y9+7dhw8fopMyYMAAFxeXyMjI48ePy8jIODg4REREHD9+/OLFiwCAo0ePstlsPz8/AEBYWJipqamamlrHBdbJwSPZMaJQqVQtLS1iWkNDg/wkxGAwZGVl21Dfq7KyMiMjY9q0aV+0ZLP5XbrIAQAEAkih/P+XOQ5HoKgoi/6iFoEAyspSuFxubGws8Q1SBIEAcjh8ZeUWPm9wOAI+X6iiIg8AiIyMHDx4sL6+/rfu1/cBQshkMtFdFQBAp9NVVVUBAAKBAJ0dsgGNRpOXl5dgDTbyxsmXBOFdIBBwOByiJyeDwSBC7YgYAADNzc3KyspycnJEDGw2W0FBoUNf7onEQN5NPp+PguHz+YqKigAAGo0mJyenpKTUcfEAUmFCAqJ0IofDQZGgz734Cx8ZnPkw34/y8nJNTU1J9XQnINJAm2EwGDU1NZ027WEwGMmCMx8Gg8FgpAvcwwWDwWAw0gXOfBgMBoORLnDm60SEh4cPGjQI9YFOTEycN2/ey5cve/fu7ePjc+PGDTc3t9LS0tDQUAMDg8uXL7u7u9+/f/9HhyzK1wgV8nhCICatCQDg84WoEQ0zaIOYpwgQQi63tfF54jF8vTE5PKEQksdaQAjbH7w4DAaj6W9QS3V1tcjXiubm5tYNJAUx+oVAfKxni+NYJAgU0+0kx9DK2FPJ0tG72X7ENVd/VCSdiO8pGINpHT6fP3369PHjx3O5XAihj48PhNDAwKCxsRFCWF1dTaPRPn78aGxsDCH88OHDiBEj2uPu9OnTw4cPz87O3rVr18yZM5HYlbe39+7duz98+NCrV6+QkBBkyeVyN2zYMGPGDC8vL1dX1+Li4oMHD44bN+7y5cvLly9PSEggtrl69er6+noRR+np6e/evYMQenklenrG3ryZvnnzowMH4sk2TU3siROv5ufX37mT9csvdyCEly695fPbrrklEAhdXcPv389Fs0wmb+XKiDlzbtbVMS9derts2d3CwobHjwsKCkSjbZH793MXLbo9fvyVkpJGCOGePbFPnxbt2xcHIUxKKj97NunPP1+/fl2BjI8cSbh7NxtCGBMT09zc3OZdEMHU9NOAdBMTk7KyMmtr67dv344YMQKpZEEIa2pq0ODIcePGtWjQNs6fP+/o6Ojs7Dx69Gikl/bixYuNGzcSBo2NjTt27IiMjLxy5QqEMCcnx8bGxs7ODkmISQTxGM6ePXvnzh0Gg9FiDMePH3/06NH27dsJES+JxyC+m69fv545c6azs/OoUaNSU1N//vlnZ2fnGTNmXLx4UVIxiOPu7u7k5OTk5LRs2TLxowQhvH///qJFi8aPH19SUgIhPHv2bGxs7JEjRzoupH8FOPN1Ivh8vp+f34YNG1avXg0hPH/+PIRw4MCBN2/eDAsLO3r0KISwrq5u4MCBhYWFmzdvRi1tJi0tberUqRDCmJiY6dOnb926FUKYmZn59OlTCKGBgQFZdfPGjRseHh4QQk9Pz927dyckJMyZMwdC+OjRo1mzZiGburq66dOnHzt2jOyFw+GcOHECQnjnTtbixXdQo0AgDAh4DyHk8QSEFOeyZXfz8+uLixtR5uPzhSjzCQRCJLkJIeRw+Gx2y0qeLBaPMIMQ0micP/54SWQ+COHFi2+2bn1cWtp09Ohz4r6AdEFZLB6EsLmZAyEkb4QAiX/6+78/fvxFXFzxwYPPIISenrHx8cW2tkFcroDN5v/8cyCEMC2tZtOmhyjzCQSCkydPfsWp+DJcLtfb2zstLe3cuXOHDh0KCQnp0qVLdnb22LFj37x5g2xu3rz5/PnzmpoaOp3eokHbIEtiQgibmprOnTuHLgaEiG6niGqoRBCJQVy3kxxDenr64sWLIYRBQUGnT5/uoBjEd5Os21ldXY1+v3p5eZWWlkoqBhGEQiHSwn3z5k1gYKBIhAh0WPz9/Y8fP96i6Kt0gt92djpOnDiRnZ199epVomXUqFEWFhY9e37S8eJwOOfPn6dSqVu3bm2PI7J+5tmzZ589exYSEiIjI4NGRImoa6JpKpWanJw8atQoCoVSU1Nz7969gICAFStWIJvw8PCgoCA/Pz/yi6bbt28PGjQIAHD7duaMGQNRo4wM5Zdfhr95U3XixMuXL8vIup0IGo1rbe1fXU1PSqp48qTI3T3y8ePCo0efr1x5//DhBGRDVvI8cyZp9eoHp0692rLlMQDg0KGEpKSK5OQKkf3NzaXu3v10y5ZxFApFKIS///44JCSzqYljaXkxKChtyZIwH5/kS5febt78SCQeKytdAACXK7Cw6E1ogfbvr5GSUllR0SwvL6OoKFta2sRk8oqKGgcP7v73bsq8ffuWSqW27QSRkZeX9/DwGDp0aFJSEnqSGDFihLm5ub29PTFcPTw8fPz48VOnTi0qKmrRoG2QJTEBAA8ePBApaiGi2ymiGioRyDG0qNtJjqGysrK4uBgAoKOjU1RU1BExADFxVPBP3U4dHR00fq60tFRXV1dSMYhAoVCQFm54eLidnZ1IhAhUk4HL5VpYWLQo+iqd4MzXiUDyS7Kysrdu3Tpx4kRDQwNq19LS6tu379KlS5FGvoqKytGjR6lUqgQVlhUVFW/fvr1z585WNAazs7Pv3bt38OBBe3t7AICGhoaZmZmxsXFERAQAQCgUpqSkJCUl9ejRIzQ0lFgrMzOze/fuAAAGgyfy9evkyZfz55vY2g5KTa0uKGggL1JTU+jTRw0A4Ov7WiiE06cbcLkCY2PtYcN67Ns3GdmQlTyNjLQNDbV27pyQnFxRVNRYWto0deqAUaN6i+yCgoJsWlrt3bs5AAAZGQrSM1NXV+zTp+u8eSYLFw6lUCjr149OTa0W330mk8fh8KdM0RPRAiXuIPLyMjdupI8e3YdG4zQ3c9DOGhgYZGZmfvH4fyUMBqOiokJfX5/L5Q4YMGDOnDm7du16/fo1Wurr65uUlKSvr3/48OEWDdoDksSMjo4eNmxYQ0MDi8Ui5OtEdDtFVEMlCIqhRd1Ocgza2tpz5sw5evSon5/fiBEjOiIGICaOSkDodgIAysvLUWbqaBobGwmFUrJ4KYLJZHI4nClTpsCWRF+lE5z5OhGhoaGPHj1iMplaWlo3b96Uk5PLyMioqqoKCgq6efPmqlWrCgoKnjx5UllZmZOT4+/vf/LkyVu3brXHI/pvQRm3V69e/v7+a9asETdLT0+HEBobGy9fvpx4gFBUVOzfv/+GDRuuXr3K4/EiIiJ+++238ePHHzt2zNvbm1hXW1u7trYWADBjhsHjxwVEO4vFl5WVyc2lAgB69lRVUhIV0kMZhccTDhvWY9GiYSNG9KRQKLKy/3/FtqjkSaFQIIQ1NXRimlgEIRwwoNvDh794esai+gwiP3uJjYv3fBEK4YMHeStXWpC1QCsqmkeO7NWrl6pQCPl8YZ8+XZubOb6+KfHxJQ8e5FVV0ckHWSLcvn179uzZAIDg4GAZGZnAwMB+/folJycjhcbq6mpLS8sbN27IycmJGLTTLyGJyWKxQkNDr1+//v79e6T9D8R0O1EjoRoqKYgYWtTtFIlh48aNGzZsqKqqQr/SJB4D0SK+m4RuJwDg9u3bEpRw+xzv378fPnz45yIUCoUPHjxYuXIljUZrUfRVOsG6nZ2I//3vf6hULABg+PDhJiYmcnJyRO81Z2dnNOHk5IQmMjIy2uzr1KlTTU1N//vf/zgcTkREhJ6e3qZNm8aMGXP48GG05aqqKj8/P01NzZcvX06dOjUhISEjI6O2trZHjx58Pv/Jkyf5+fnXr19/9erVoUOH6uvrfX19/f391dXV9fT08vPz/f39lyxZAgCYNm3aw4cPAQArVpjn5lJ37nxqadmHw+GbmfXcutXq3LnXFArFykpXVVWhoKA+La0GQlBS0lhW1pyfX//uXdWvv5rZ29+wtzd0chr6/n11cXEjoV5GVvLU0lIqKGjIy6svKWns1k2JQqFs2fK4spIGIbSzMwQAsFj8589Lq6vpMjIUD48xc+bcPH/e7t27KiUleVvbQeXlzXl51MzMj+XlzSUlTWVlTW/fZm7dup5Q6F658n5lJS0oKK1fP/XTp39+9KggNbWaQqFMmtSfzxf6+79XVpbfsWMCeil67dp7DY0uurpdAQB0Ot3YuIWiu20jKCgIlTmdNGlSREREdHS0mZmZnZ2dsbFxeHj4hg0bunTpsnTp0gMHDjAYDMJg3rx57fRLSGI6ODg4ODiUlJScOnVqwoQJ4rqdAoFg1apVElQNFY+hRd1OEe3QvLy8W7dunTt3TrLZlywNShZHFdftROTn5xsYGEgwgBYJCwtbu3atSIQAADc3t9OnT69bt66ysjIoKKhfv37e3t5k0deODqwzgzVcpBQIIYvFalFIDCkQStDXzZs3HRwckIQgny9kMHjq6opEGGy2QPyBjwyHI1BQkGnxs4S4kicBk8lTVJSTlW37x4zw8PBZs2Z9bimNxlVT+6SXyGLx5eVlxEsiFBQU5OTkIIHjjoDFYiFZSEKhkc1mk+U6CYN28jlJzBZ1OyWuGtpiDOK6neQYKisre/cWfdct2RhEdlNctxN0jH6pOGQv5Ag/94/cfrW//wA482G+B7m5uRKsEfodEAqFbDa7nRKjaWlpEqzOisFgJAXOfBgMBoORLnAPl5b5bgIQnRChUIhUHr5V6wGJWfyXDh2H09oRIPesEem2KvKDsnUpma+HzWYTPX75fD4av0U24HA4hMHfQQqbm5vb71rkYviihgsUE1iReAziLsgGHaRU8sUYyDJG3+d/AQ1QI2Y/t+OdX2vme4IzH6ivr3dyclq+fPnZs2eXLl1aVVUFALh27dp/W+MnKyvLxMQkLi4OAFBaWmpnZ5efn89kMj08PM6dOxcaGurg4BAXF3fo0CErK6srV664uLg8f/6cz+cfOXLE3Ny8sLBQZIP+/v67d+8GAEhwrAUAICMjIzU1NSwse8CA0zdupF+8+PbAgWcMhsQGioHPpCWBAG7a9CgwsOUxHpmZH93dI62srkRF5QMALl9+FxWVf+DAs/p6FgAgLCx7xYoIwphKZTk6BgMA0tLSCgoKWtzg13Dw4ME///zTx8dn3759Hz9+HDt2bG5u7oQJE4gklJiYuG7dOiS+AwBgsVibNm06dOgQh8Nps1MAAJVKPX/+fExMDDHG9OXLl9u3bycMmpqadu7c+ejRI8LAx8fn7t27EiyVJx6DiAsRg4CAgMTExL179zKZzO8WA5vNvnDhwqpVq1Df5hMnTsTGxu7YsUPivwDIxMfHR0VFXbhw4c2bN+IRAgD4fP78+fPt7e3v37+fkpJiZ2e3cOFCS0vL7Ozsjouq84MzH9DU1Bw2bJipqenatWv79u178+ZNAMCyZcvQUj6f39jY+APD6yAGDx5sZ2fn4uJSWlrar18/GxubgQMHbt++ffjw4WvXrnVycjp37hyEcNKkST169Fi+fLmzs/OJEyfk5OSmTJnSq1cvkVJ2DAZj8ODBaJo4dOR/eOKxg8vlEjcjJpNJ/ECm0WjiQXK53KioKDMzsylT9OTkZBYuHLpixUh9/W7Ozp/GCzY2sokRCBBCGu3/H0QaGj79wkWaLyzWP359C4UQpc9nz0oCAj6Qn95oNC6fL5SVpQwfrvO5o9fQwP7zz5m3bs07dy6ZweDdv59rb284YUI/H59kAMD48f2amv4/2Tx9+mkw9bBhw8LDw9v8HHDz5s2MjAxzc/N3794FBQVRqdSpU6dmZGTcu3cPGRgZGV24cGHw4MEVFRXofkehUHbv3o3GU7aZhw8fysnJTZ8+PSkpCQDQ3NycmppKfrI5deqUra2tra0tkhM7cOBAv379HBwcJFiIUSQGcRciBoGBgaNHj9bW1m7PT41vjYFGo7m6uvr4+Lx69aqoqCgtLW3atGlDhw6V7G9BEa5fvz5o0KDx48e/fPlSJEJEcHDw1KlTg4KC5s2b17Nnz/v379+4cWP8+PES7HL8bwRnPgAAoFAoWVlZQUFBGRkZc+bModFo1tbW1dXVb968OXHixMuXL4lOw/8ljIyMvLy8HB0d2Ww26gN2+/ZtorJ57969ra2txbVaRLRdAADXr1+PjIx8+vQpAKC4uHjChAkAgDNnzhQVFV28eLGhoSEgIMDX13f//v1xcXE3btzYtm1beXl5eHj4u3fv/vjjDwDAtWvX4uPjbWxsRCIk9F/IHhcuHPrsWUljI3vXrqfv39esXfsgNbW6sZF9/PjL48dfLF16t66OuXnzo4KC+s2bHzU0sC0s/goOzrS3v15d/SkTk3Vhnj0rycmpIxLVsWMvcnOpv/56r/VDR0i6mJv3zsr6qKWlBP6WdAHg/4vIAwAePiywtv7/XwkqKipt1hnfsWNHYGCgh4fH5cuXdXV1qVSqQCCAEKK3FAAATU3NP/74Iysra9CgQS9evHjw4IFQKNy3b187n/ns7e0DAgJWrVrl6ekJvqThkpWVJS6w0n7IMbSo4SIS5NKlS62trRUUFCTYw+iLMXTv3p3BYGzfvn3BggVKSkodoSMjzrp165ydnYODg1euXClyEBADBgzIzc01NTUtLi4mq8x0XEj/CnDm+0SfPn3Mzc2VlJQSEhLU1NSQ8sLJkyfnz59va2ubmpoqwR+PnYfZs2fb2dmtXLkSfSdgMBjiTyQiWi0EMTExkZGRtbW1V69eXbBgAZKN0NPTQ93oi4uLw8LCnJycAgMDGQzGkCFDNDU1R4wYYWFhASFMS0sTCASnTp1ycnJKS0tLSkqSl5efOHEiIQuCIPRfyMjIULp0kUMKn5Mm9Xd0NDl5MvHlyzIuV7BixUhd3a7a2sobN45lMHipqdXdunXp3l3FyWnIjBkD3779lCHIujC6uupGRtrdun3qm+7qOpLHE5SUNIo8I7bIgwd5O3ZMEJF0IRsUFDTweAKhEHK5guZmDgBg0KBBaWlpX9xyizCZzDVr1tTX1y9btszOzm7r1q1btmxpamoyNzcnbFavXu3o6Lh169aUlBRNTc2TJ096eXmFh4e3zSOCy+Xa29tTqdTLly9/UcOlsrJSXGCl/ZBjaFHDhWwAAKBQKE5OTmfOnCF+FnyHGAAASkpKS5YsOXLkSIfqyJBhsVguLi6RkZEJCQkiBwExduxYLy+vkydPEo1klRmpBWe+T3Tt2tXY2Hjp0qXo1QS6hcnKyubm5gIAevbsKZFxUZ0KpDe4Z8+epqam6OhoAMCMGTOIsdsAAHRrE9FqIZaqqKioqqrKycl9/PhRKBQSgino0O3cuVNLS8vV1VVRUVFdXd3Ozg6NPi4oKDAwMBAKhZMmTVq7du2sWbNkZGQEAsGMGTM8PDxEnk4I/Rcyz56VWFj07tZNCem/9OihoqwsP22aQX09Kzu7bv36MXy+cMeOJ+PG/UMsUUaGQnQCIOvCgH/2RjlwIL5/fw1NTSXy+88WiYkpXLhwmIKCjK6uemMjG/wt6UK2aW7mvHlT5eOTXFradO9eDmpss6TLzp073d3d//jjj1evXikqKu7YsYPJZM6bN2/cuHFIw6W0tFRJSWnbtm0aGhq6urpcLpdCoWhqaoofw28iODjYwsIiODg4MTHxixouY8eOFRdYaT/kGFrUcCEbAADCwsLc3d337dsXGRn53WIAAMjLy5uamlpYWHC53A7SkRHB29vbzc0tKirK399f5CCQIWvNkFVmpBas4QLq6+tfv36tqKjYrVu36OjoHTt2NDQ05Ofnv3v3buvWrefOnaNQKFZWVh0xMPYHkpaWdv369ZkzZ+rq6gYEBGzbtg0AcPr0aQ8PDwaDMWDAADabPWXKFBGtFgqF8vjx49zc3MLCwrFjx6JNLVy4cNGiRUZGRkVFRTk5Obm5ueXl5RcvXpw9ezaXy509e/b06dPfvn07bdq0oqKi4uJiZWVlNGFmZjZ//nwTE5Pi4mJXV9fRo0cT4tcIQv8lJqaQSmXeuJHOYvHy8ur9/R20tJQmTOh/715OWlrNli1WOTl1ublUeXmZpibOjBkG795VX7uWWlfHTE6uKC9vLixsSE+vpVDAzJmDAABkXRgjI62jR5//9NMAff1uAICsrLrAwA90Ovfhw/z376sBAByOwN098vLlWZMnTw4MDETviwICPvj6vtbRUeXxBCEhC6ys+j17VpKUVLFu3WgAQHx8cVlZU3U1fcSInii5pqZWL148HACQk5Mjoqn49SxZsiQ4OJjH4+3du7eysvLq1as6Ojre3t5sNtvBwSEsLMzFxWXixIljx469ePHiwIEDL126FBYW1q9fv19++aWNVwkAAAAbG5uLFy+qqKjY2tp+UcOle/fuZIGV9vj9XAwtariQDQAApqamsbGxlZWVEhQPayUGpOEye/bsgICAcePG2djYdJyOjAjz588PCAjo06fPvHnzhg0bRj4ISMNl37595ubmVCp15cqVAAARlRmpBY/n+wIQQjab/d974BOBLPfAYDBkZWW/SYODyWQqKioiKQ0EWSNGKBTy+XwFBQXwt9oIh8NRUFAgi8iIiI8QkPVfWvLLU1aWBwBcv542aZIelyuIispbtGi4kpKcgkLL2i4Isi4MlytQUPhkJhRC1GVGVpZCfK7j84VycjLPnj0bPny4hoZGixtsbuZ07dpykARsNtvf39/Nza11s1bgcDjy8vIyMjKEaAvRrqioKBQKGxsbNTU1SVE1S+SLDoSQyWSK3zFb1E8BJIGV9rv+XAziGi4iBh2hn/K5GMDf/0FUKlVDQwPF00E6MuKgh3uk1UmOEIXE5XJZLBbxpkFEZUZqwZkP09n5Gv2Xv/568/Ztlb5+t5Eje5F7lEgQGo2mpqbWni1kZGQMHjxYgn39MRhM28CZD4PBYDDSBf75icFgMBjpAme+HwAxJI4ihkijyCyPx6NQKAKBgLxUZIOtb+HrbT63Shu8tN9pW+OUa3UVGdKs7OecCgTw63ft+5wCGo32lavw+XxJOf2aVZC773PeIYQtrsJmszti11o80cR/omR3rXWnIjYMBoM8iw6LyCromBBUV1eTXUgnOPP9AAidPfhPbt68aWBg8Oeff7q5uSUkJDx58kRTUzMhIQFC6OXl5enpeefOnc2bNx85cgRC6OPjc/78+evXr2/YsKGkpITYoMhmW5wFAKSnp/fq1evs2bPXr193c3MrKCg4ePDguHHjLl++7OLigsYG7d69e8SIEcSKR48eHTlyZGlpKZrlcDi7du2aOXMmGgLo7e29e/fuDx8+9OrVKzQ0FNlwudwNGzb8/PPPXl5eK1asKC8vJyKJjY0tLy+/dy974MAzaIz5y5dljo63qFTmN+0Lh8MX2TUIIZ8v3LTp0aVLr1tcJSOj9rff7o8ZczEqKh9CeOnS2/DwrP374+vrWRDCsLBsF5d7yP7s2aSEhJIjRxIghB8+fEDFPMmRtD7b+in4oo34Kvfu3bt06RKTyYQQ0mi0gwcPJiQkoPHsAIBnz5798ccf1tbWSHVoz549CQkJ+/bta6dTCGFGRsaxY8eysrIAADk5OWPHjhVf5ciRI5GRkaixrq7Ozs6unU7JjefPn3d0dHR2dh49ejSEMCcnx8rKSmSVXbt22dnZof+OkJCQXbt2LV68+Ju8fHE2MzOTOA7+/v6JiYmenp4MBgPZFBQU+Pr62tvbP3v2DEJ4+fLl8PDw/fv3U6nU9jht/eqysrJCdxUTE5OCgoLRo0fLycmdPHmSWOXevXsmJibq6urx8fFlZWXW1tZVVVUjRoyora2F0vypC2I6DVVVVWZmZhDC2NjYCRMmQAi1tLQghHfu3CH+hwUCQUBAQFRU1KJFi1BLZmYmcSf6JgwMDBobGyGE1dXVNBotISFhzpw5EMJHjx7NmjULQvj48WNzc/N79+5BCDkczvLly2fOnEneQkxMzPTp07du3YrCePr0KdpsQ0MDYXPjxg0PDw8Ioaen5+7du1FjdnY2yo58vnD69IDx469wuQIIoY9PMo8nYLP5QqGQyxXweAIIIZPJQxMEAoGQTudCCOPjiy9deisQCIlFzc0cZOznl3rp0tsWd/z581IIYUlJ48yZQXQ6d86cmxDC2NiiffviIIS1tYx584IhhBUVzYsW3YYQurlFCIVCCKGXlxePx2vDoZYIT5482bJlC5rm8/mOjo7kXxIQwqqqKgjh/v373759GxcXd/DgQQihp6dnfHx8e/yisQF8Pp9omThxoohNWlrapk2b7t69i2aDg4Pt7Oza41QEpM3N4/HQxSYeQ3Fx8erVq9FPQIFAgG7rjo6O6GeBRBA5DtOnT+dyuWfPnv3w4QNqQcc/Pj7e29ubTqej/6bY2Fj046Mj4HK53t7eaWlp586dO3To0PPnz4VC4ZYtW4YOHUrYJCQkMJnMiRMnrlmzJiQkpEuXLtnZ2WPHjn3z5k0HRfWvAD/zdSLQywc2m/3kyZPRo0cTLWRRMRkZmV9++eXu3buTJk1CLYMHD87Jyamrq2uDu+jo6Lt37/r5+amqqlJaEirbvHnziRMnAAB3796dO3eu+OuRs2fPPnv2LCQkREZGBvVaFHmLgqapVGpycjIxftbLy8va2hpN/+9/w0aN6r1hQzQAQE5O5v37mtGjL3K5wr/+epOVVRcWlp2UVG5rG/T9tcd691ZTU1N0cLi1ZIkp2ov2aI+1Hy8vL21t7UWLFsXExLx7966qqiogIMDV1ZWQ3enZs+eLFy/S09ONjIwIRbH+/fsTQ87bRlBQkKam5ubNm728vFCLSPdUJpNZVFRECLc+fPiQOLmSAg2jfPr06eTJk1uMAULYt2/fCRMmhIeHy8jIaGpqBgQE9OvXT4I9aUWOg7hAWs+ePYuLi8+ePTtr1qysrCwtLS0giePfCvLy8h4eHkOHDk1KSnJ2draysqJQKKqqquQxo+PHj1dSUpKTk5s6deqMGTNGjBhhbm5ub28/cuTIDorqXwHOfJ2L+vp6pCd77NgxolFcVExklkKhtE0HedSoURYWFj179kSz4kJlaLj6q1ev6urqCDNCtwwAoKioePv27Z07d3740HJZAwBAdnb2vXv3Dh48SIhZ5Ofnk6VMTpyYnp1dd/VqKgDA3LzX0KE9Cgsb1NQUhg3r4eeXyuEI5swxRmUQwHfUHoMQ6ut3MzPruW9fHGppj/ZY+ykrK/v999/PnDmzb9++1NRUZ2fnbdu2KSsrx8bGEjZDhw7V19e/dOkSWVGsnZ9z0GBtb2/vW7dutSgsfuPGjdGjR9NotObm5tzcXB6PJxQKuVyuRKojkYmJifmcDoCent62bduSk5NPnToFAJCRkZk6dWp6enorl+W3InIcKC0JpOno6CxatGjLli0SPP5fhMFgVFRUIBF5Op1eUFBw9OhRkcjHjBkzZ84cLpc7YMCAOXPm7Nq16/Xr1x0aVScHZ75OBIRQU1Pz119/nTFjBvm3qriomI2NzatXr9BsQUGBrq4ukZa+CS0trb59+y5duvTt27dATKhMIBAIBILNmzcvX74cvVxCaxG6ZcigV69e/v7+a9asEd9+eno6hNDY2Hj58uXk35hdu3ZF36IEAqFAIJSVpdy6Nf/EiRcNDSwAwJYtVnv3xvXooQIAoNO506bpu7uP0tD4lN6+m/ZYdjaVweB6ek4yMtLOz69HjW3WHms/Xbt2bWho0NLSYrPZenp6SEhWRD1LXV3d3d29pqaGUBSrqKho5697wpeurm59fb24QXNzs6+vb3x8/IMHDwoLC9+8eePj41NaWkoUkZAI6GJD47U/h46ODhr6SaFQevfuvXjx4pqaGkkFIHIcWhRIU1JSmjNnjpKSkrGxMbrC23/8v8jt27dnz54NAIAQHj16dPny5bdv387KykKadvX19devX581a9aJEyeCg4NlZGQCAwP79euXnJzcoVF1crB6WSfiyZMnpaWlWVlZ6MVRbGxsfX19QkLCihUrcnNzd+7caWlpyeFwzMzMHB0ds7KyvLy8+vXr9+TJE1RZ6ZvIyMioqqpCL3Di4uImT56cm5tLFioTCoURERF6enobN258+vTp0KFDDx06lJubW1ZWhh4EORwOMti0adOYMWMOHz5MbNbPz09TU/Ply5dTp05NSEjIyMiora3t0aMH4d3GxiY3N9fS0jI0NPPRowJn56FaWko3b8579KgAAIDKA02dqg8AmDJFz8YmaMoUvdWrLdG63017zMhIq6mJk5hY3quXqoFBN9A+7bH24+npeenSJT09PXd396lTp96+fTsxMTE/P//gwYNIp+r8+fN9+/al0+keHh7du3d/9OhRamoqhUIhXoy3DXd396NHj2pqahobG/fv37+goCAnJwddpUg5bMOGDQCAa9euaWho/Pzzzz///DMAIDU1dfHixZLZcwAAAHFxcRMnTkTT4jFkZWXxeDw1NbV169YJBIJt27ZNnjyZz+dL8L2ryHEgC6ShGOrq6ioqKnR1dd3d3bt27WplZfXs2bOkpKR169ZJKoYWCQoK8vf3BwCgN7GHDh1SUlJCv37CwsL279+fmJh44sSJadOmnTp1KiIiIjo62szMbN68eR0aVScHj2T/18Dn8xkMBvmZQ7zlx0KWQGsdoVAYFBQkfmdEOmECAbx5M33Rok+fT9hsfpcu/9js99Qeo9O5qqoKQBLaY+2Hw+Hw+XxCPYtKpaKPSYROFZPJJO9j+3VnEEKhsKmpSVx/klAO+w6w2WwFBQXx73YoBgghlUrV1tZGjTQaTU5OTuKigyLHgRBII2Koq6sjVxeRlHpc2xCRuCNgsVj/eTnGL4IzH+bHwGAwGhsbUTUoMo8fF547l/zXX/bobaekwNpjGAyGAGc+DAaDwUgX+AcsBoPBYKQL3MPlB4A+UMG/dYbIiDS2OEs0tj77NVtov9NvX4UCAPxWpy3asFg8JSX5z62CPu+RVpEBQPj3rAwAwr9XkUHxSGLXPs0SPdp/+CkgPgS232mHxtkGpzwej6jL86P+C4gYOtvxbH0V8tdZqX3nhzPfD4C42lq87EQaW5wV2cLnZlvfwosXL27dumVpaXnw4MFp06bdv39/8+bNubm5t27dunz5cmFh4aJFixwdHePi4iCEpaWlM2fOPHny5JEjR0aOHFlYWIi2c+3aNVSEjOylsbHx/v37lZW08eOvTJmix+UKXF3Djx9/0dDASkmp1Nfvhrpifk2cHA6f6MBCtiksbLC0vPi5XfPySpw795aDw00IYWMj+/jxF1ZWujU1DADAnj2xkyfrJSSU7NkzKSGh9I8/XkRH54eGLpCXFzx69OhzB5DL5W7evDkvL+/jx48iNqtXr0aaNQCA9+/f79mz58yZM/Pnzw8ODv7Kk9iKDXnWy8tr9+7d+fn5YWFhDAbj4sWLvXv3Dg4OJvSrTp482djYmJ+fP3/+fC6X6+zs3L179zlz5oSFhbXZKQAgIyPj4cOHxLCZPXv2TJ48OSEhYc+ePeBvTS8fH593795FRUWhOGfNmoV6fLTZqUijSAyXLl2Kiorav3//6tWrkTFSDnvw4AFS+0PqZSUlJajT41d6aX02IyPD29v7+vXr4jGgmohsNvv8+fNv3rwZPHgw6u969uzZPn36zJgxA1UubP/REN9CcHBwaGhoXl6enZ2drq7uzp07IyMjr169Sl4FQmhjY3Pjxg1FRcVr166tWrXKy8sLRSi9QIxUUlNTM3jwYKQACSFEWmIBAQFIaYzH4yEhqN9//11fXx9N+/j4QAgTExPJGmYbN260sbER2fjhw4eRatSiRbejovJiY4tu3UonliKlTRaLx+MJmEwehBBJkZFpbuZACLlcwcqVEYQyJ4RQKBQ2NLDQ9PjxV1rctY8fGdnZdRDC5cvvJSdX7N0bh+TK7OyuR0fnHzz4DELo6RkbH19cVUWDEO7fH//2bRWEsKioKDg4+HNHLCAgYP369SKNdXV106dPP3bsGJrduXPnzZs3IYQPHjz43HbajIaGxrVr106dOmViYgIhRAOonz9/3qLBhw8fmpqa0FCzurq69vhdsmRJQUEBhHDMmDGxsbHiomgohpKSkpkzZ378+DE7OxtCuHz58uTk5Pb4/VwMNBpNXBiMrBzWQeplX4yhtraWz+dzOJwFCxZACPfv34+U/zoUpF9aUVHh4OCwd+9edC7s7OzodDphExkZOWfOnMbGRpEIpRn8nU9KefTo0ciRI1Hn5rKyMmNj48rKSgqFUlhYeP/+/f3790MIAQBGRkZeXl6Ojo5sNhuNWCCLkyUlJS1evJjJZGZnZxNbrqys/PjxI6FkdvdudlJSxYIFQwAAQiH8/ffHISGZubnUceMuBwZ+mD494NatjK1bH9+7l0Ns4dq19/HxxTY2QU1N7JcvyzIyPqL2+nrWrl2xoaGZYWH/704cbW1lIyMtAICioqyhodbr1xX9+6sDADQ1laKj89E00irr2VP1xYuy9PRaZK+np3flypXPbVZElQ0RHh4eFBTk5+eHNHQWLFiwdevWs2fP2tjYfN15+Ab69u1bU1MD/8554mO6yQbDhg3r2rWrmpqaqakpeuHZZlRVVQsLCwEAcnJyz58/FxdFQw+dXC7X3NxcW1vbyMgIAKCoqPjFesJtiyElJUVcGIysHNZB6mVfjKF79+4MBmP79u0LFiwQCoV//fVXbW3tggULSktLJRWDOHQ6nclk6ujoFBUVEZJ1mpqaOTmf/qFycnJ69+6tqqoqEmHHhfSvAGc+KYXBYBC3Ti0trTFjxnTp0gUA0KtXrzFjxgwfPpy4ZcyePdvOzm7lypVQ7N3svXv3Kioqhg0bhiSjEFlZWcSwKgCAunqXq1ffIQ0UGRkKyjGGhlrq6l2WLTMbNqzH0KE93NzMnz0rQfZpabVJSeXy8rITJ/ZXUVFQV++CtFoAABoaXZYuNQUAvHpV/sUdzMqqmznTUF1dkaxPBqGoVtnQoT309btduvQWrcXn8wUCATo+kZGRkZGRaLZFhEJhSkpKUlJSjx49QkNDAQDDhw9PSkqKiYlZtGjRFyP8Vi5fvlxQUBAeHm5ubv6VBiEhIbdv326n3/Xr1yNxVxqNRoyoExflevDgwY4dO9B0VlbWzJkzJTjSlByDsrJyizEQymGgY9TLviYGJSWlJUuWHDlyJC8vz9LS0tXV1dnZ+cKFC5KKQZwjR47s3bv32LFjI0aMEJdM43K5z58/19XVZbPZVCqVHGHb9A7/M+DMJ6VYW1snJiZyuVwAgLKysra2NvpWoaSkpK2tPW/evKamJh6Px+PxAAB79uxpamqKjo4mb6G6utrY2Hj8+PF79+59/Pgx+r8CAGhrayNJTwAAhPCnnwb4+c2ZNetGeXkz+Lt3D4GsrAz6y+V+SjCKirICAZwxw8DDYwx6z0kY5+XVBwZ+sLDo/UVxsro6Znl588yZg2g07siRvSoraQCAxkb2tGkGaJrQKlNXV3R3H4U+AQIAVFRU0Md/GRkZVVVVJOQtvn2kThkREfHbb78hkVVvb28AwJ07d3R0dMLCwlJSUtqgId46lpaWO3bsSE1NRXI5kPTJB+lUiRj4+fmNHz++uLj47t277fFraGjo4+PD4XBcXFw+J4oWExOzcOFCBQUFOp1eV1dXXl4+c+bMFkU+2x/D54TBCOUw0DHqZV8Tg7y8vKmpqYWFhaamZnl5ORCTl5M4NjY2x48fz8vLW7VqFXF2Ghsb0ZM3g8EoLy/38fHJz8+/fPkyOUL0vy+14B4uUoqBgYGXl9fatWttbW0ZDIa5uTmXy01ISMjNzQ0JCWloaLh3797Ro0evX78+c+ZMXV3dgICAbdu28fn8x48f5+bm5ubm7t+/f+XKlerq6iwWS19ff/v27b6+vrKysoMHD0b/VJWVtIyMj3FxxQcP/mRh0XvqVH9/f4d376qUlORHjepdVUWrqqIXFja8e1fVpYtcaWkThyNA7yeLixtdXcNHj+67YsVIbW3l69fT/ve/YWiDL16UaWsrv35dkZ5eW1TUUFbWfP9+rqmpTlnZy7S0tIMHDwIAaDTuggUh6updfHyS7e0N16yx9PVNEQignZ3hzJmD4uOLU1OrKRTKpEn9vb1f9e3blU7neniMAQBACPv164eOj5KSEln0Cx2czMzMGzdu1NXVxcXF/fnnn76+vv7+/urq6np6evn5+f7+/jExMUjFY9KkSeQHX4lw9+7dhw8fRkZGjho1is1m+/n5AQDCwsIMDQ0dHBwiIiLodDphcO/ePRcXF6FQCAB48eJFe/yy2ewHDx5wudy1a9cCAMiiaEi1C/Uu0dHR4fF4V65ccXZ2VldX9/Hxsbe3d3V1lcSui8ZAFgYTVw7rIPWyVmJITU198uTJ7NmzAwICxo0bZ2Nj0717d0dHx/v37yOda0nFIE5dXd3t27cdHR0tLS379evn6+srEAjs7OyUlZWRpp2npycAoLi4eMuWLfn5+USEqNON9PKjPjBixLlz546ent7169f/+uuv/fv30+l0cv3YFStWlJSU3Lx5U19fn6he236nVCqVyxXtYNIiX1+aLjY2tri4uB1BQRbrky8+X0guv8dm84m/hAGEkMPhREdHf25rfL6Qwfj/fUTdZyCEHA6f6C8DIbx//z7qwtBm2Gy2QCBobm5uz0Y+B4fDacVv6wbtoa6uTmTLxA6Si/Z1KOIxNDU1kWMQCoWoVwsRIdF76zvEAP/+76irqyMfE/TiRLJhiFBVVYWKRyKQoiE5JBFEIpRasIZLJ6KxsXHUqFF5eXkAgKCgoJs3b0ZERAwcOPDNmzfq6uo1NTUqKip0Ot3Gxubdu3dxcXF79ux59uzZj466ZXJzcyXYweGL0Ol09A2/zTAYjJqaGlTqBYPB/LfB3/k6EeRPSgsXLnz27FljYyNFrH4s+Gf12s7J90x7AIB2pj0AgIqKCk57GIyUgL/zdVJkZGRQZ0sAwKhRoxQUFJqamtAsUb122rRpPy5ADAaD+beCn/k6Kc+ePbOwsEAVZ8j1Y+Fnqte2GdR7s5W++63zrW/LURUhicBmt9YtW8QRny9scfqL3US/BtSRhORaANpxSFvh48eP5A6T4nVi2Wx2Q0MDuQUNQWs/5N1p8aSL7G9HdB0UcSEeBtmgg3rtk120eIrJfr/PyIFvOizge0XVycGZrxMRExNDpVJv3Lhx5cqVqKgof39/on7szZs3V61aVVBQQFSvbb+7Fy9erFu37tatWwcPHrSxseFyuevWrfv5559v3bp15cqVXbt2ZWVlmZiYxMXFAQBKS0vt7OxycnKOHDlibm5O3E/9/f3J9eIRjY2NkZGRlZW0CROu7tkTy+UKVqyIOH78RWMjOza2qLCwAXw1xGgHEZB62efWQuplc+feghA2NXF27nz66FHB1aupAABPz7iEhNL9++MBAAkJpd7er6ZNC2hq4jCZzEePHn0+DO62bdumTp2KeskfPHhw+/btKOEVFRWh7nOIa9euXb58ed++feRGieDl5XX27NmVK1eGhYVxudyFCxd2797dwcGBMDh48OCff/7p4+Ozb98+1BIREWFhYdFOv5mZmd7e3sHBwTdu3AAA5ObmEnppCCqVev78+ZiYmKtXrxItjo6O7fTbSgx1dXWOjo6ZmZmfi+HEiROxsbE7duyg0+kdFIOXl9fcuXPnzp1LzjSenp4JCQn79+8HAFy+fDkqKurAgQMtFrKXFMHBwXfv3j169Gh6ejoAICwsbMWKFZ8zYDKZe/fuffr0KYpQqvlxnWswPxKsXvat6mVhYWEuLi5oOj09ndDuunDhwtChQ4kjOXny5PLyctgBAmZfFCcbMmTI8uXLIyMjZ8+eDSEsKytzcXHR0NBop1+yahfqRjhx4kSyQVBQ0MWLF4VCIZI7gBAGBwfb2dm102/rMbi7u6en//9FRY6hsLBw8eLFqPH06dMdEUNtba24SFtcXBwh7fbw4UNxebOOgKxeBiGsra2dN2/e5wyuXLly+fJlCOGKFSvev3/fcVF1fvAzn5SC1cvao14mJyeHBryzWKwuXbrMnTuXUEZ2cXGZMGFCbGysxAXMvihOtmPHjsDAQA8Pj8uXLwuFwqioKIl8CSardjU3NwMARF6z29vbBwQErFq1Cj3mPnz4UIKj6NoQg5KSUnFxMQAAaXp1RAwKCgriIm2EeFj//v2joqLE5c06ArJ6GRA7LCIGxC5I9sj8G8GZT0rB6mVtUC+D/5TMBwDcuHFDRkamT58+p06dQo2//PJLaGjounXrJK5Z9UVxMiaTuWbNmvr6+mXLll27dk1NTS01NZXH46FxMm2GrNrVoiAZl8u1t7enUqkoQh6PJxQKuVwuSlES4Zti6Nmz55w5c44ePern5zdixIiOi0FEpI0sHkb+26IMkKQgq5d90cDOzo7JZP7555/R0dESPDL/RnDmk1Kwetm3qpfp6OgQMlR1dXU9evQAADQ2Ntrb2zs5ORkbG0dHRzc3N8fExIwcOTIqKurs2bNffTa+ilbEyZB62c6dO93d3f/4449Xr15xOJygoKDo6GgOh5OUlNQev2TVrhYNgoODLSwsgoODExMTm5ub37x54+PjU1paeu/evfb4bXMMAICNGzdu2LChqqrK3t6+g2IQF2kjS7tNmzatRXkziUNWL/uigZKSkpeX1+TJkwcMGEDIFUkneFSDlILVy75JvQwAMHr06K5du54+fbpPnz4NDQ3jx4/39fVlMBjoJ/+QIUP27NkTHR194MCBhoaGioqKlStXSvaUkdXLyOJkT58+tbW1jYiIWLJkSXBwMI/H27t376pVq1atWhUUFLRmzZpffvmlPX5FVLsKCgpycnKysrIGDx6MlMNsbGwuXryooqJia2s7YsQI9DCRmpq6ePFiiey4eAxUKjUjIyMlJcXExOT8+fMiMQAA8vLybt26de7cuW7dunVEDDQabcGCBYRIm4WFxZMnTzZt2kRIu9na2ubl5RHyZpKKQRyyehkAID4+vqysrLq6umfPnki9jMFgEAYQwhcvXsTExCDpO2kGa7hIO/X19WpqauIlb8Th8/noU98XiYuLGzBgAPrm0TbYbH6XLnIAAIEAUigAvaIEAKDsiP6iFoEAyspSuFxubGzsjBkzWtyaQAA5HL6y8qd9pNG4amoKAAAuV8Bk8jQ0Po2bjIyMHDx4cOvj2RsbG7t06UIMtRQHCVxRKBRFRcVv2+cvweVyFRQUPucUueNwOPLy8hIszQMAoFKpampqLbomqntDCJlMpoqKigT9tieGyspKVDD5+8QASP8dNBpNTU0NNTY3N3ft2lWyYYhQXV2to6PT4gtVFBLZgMlk8ng8CdbQ+PeCMx+mQ8DqZRgMptOCMx8Gg8FgpAvcwwWDwWAw0gXu4fIDQO/c0dcgkUUijS3OEo2tz37NFtrvVLKr/D0nA6HgW1aRBUDQbqciq8gAAJHSw2dsZIilPJ5AXl6WWEr0bkdwOHxFRUUABN//FBAfn9rvtEPjbINTHo+HPk538v+C7+wUXXtfv4rUvvPDz3w/ACQiQEwQhISEGBgYXLp06bfffouIiIAQBgQE6OvrX79+PTAwcMmSJbW1tVOmTNmyZQvqRgFIhbnFZ1v0QrZ59OjRunXrgoKCvLy85s2b9/Tp04ULF7a+CjFRV1dnbW3t7u5++PDh9evXi1T8Iq/C4XDIs0+fPi0vL09Nre7R40RkZG5zM2fKFL/g4AxSiTF44ULyV+4Lny/cuPHhpUuv23w0uFyBeNhNTeyJEy/n51NbXCUjo9bdPXLMmIsPHuQBAPbsiU1IKN23Lw5CyGbzDxyIv3DhDTJubuYcPPgsOblCIODFxMSgsnZff5pEZlevXu3k5OTk5PTrr79CCGk02sGDBxMSEpBcDrLJyMg4duwYErc7e/ZsQkLCkSNH2uO0xc3GxsaSN5uRkfHbb7+NGTMmKioKQlhXVzd37lwkptUep+TG8+fPOzo6Ojs7jx49GkJ4/Pjx2NjY7du3oxEFyObOnTtovAGaraurI3RkvtJL67Pnz5+fN28eEUNOTo6tra2dnR0Sc0E2e/bsefr0KRJtCQkJ2bVrF1KTabNTcmOLWzh48ODJkyf379+/d+/ekpKSXbt2aWtrr1+/nrzKkydPXFxcCFGhiIiIbt26EQZSCsR0Gj5+/GhsbAwh/PDhw4gRIyCEZWVlZmZmaGl+fj6E0MXFJSwsrP2+SkpKhg8fTtSu9PPzq66udnZ2ZrFYRBpDPcEghEKhkMPhoDqcHA4HVbJduXLl3bt3IYSTJk168uQJGryMSqQ2NDSgjcTHx1+6dAndmiGE2dnZSCYNQtinz0kWi/fXX2+QchiEkMPho5KzaBifQCDkcgVkYbPGRjYxzWTyeDwBhNDPL/XSpbdcroDLFfD5QrQFoVDI4fCbmlC9Vj45vaEto80ePpyQkVFLXtTczEGbXbbsbn5+fYuHDmmhlZQ0zpwZFBdXTJZDgxAGB2f4+CRDCPl8oaPjrfLy5r+dCk6ePPk1p6ZFhEIhEkV78+ZNYGAgn893dHRELQSVlZUODg7oyFdUVCxatAhC6ObmRq5c2ga+uNnnz59DCEtKSghZOxFpsfZTVlYGIeTxeFu3bv2cOJmIcJfEFdTIMUAIt27d+vDhQ/JvPrJ6WWxsLKqU6+joSFz/HQFZso5KpUIIPTw8kHwdIjY2VlNTs6qqitgLiWja/dvBz3ydCPR6qqioyN/f38nJCbUgecagoCA0HpmsodUeHj16ZGlpSYxSWLp0KQCgvLz82rVrM2fOBACEhYUlJSXZ2tpWVFQsX7788OHD+/fv/+23327dujVv3jzw9zvbwsLC6upqPT09BweHI0eOBAYG7tq16/3792vXrk1NTX327FlOTg5RX8nLy4vQtaJQKFu3xhgaaiGJlufPS3/66ZqnZ+yHDzUTJlzl84XOzqHe3omrV0dGROQCAA4dSnjypHDs2MsZGR/DwrKTksptbYOqqz/pEScllS9ffq+xkT1y5AWBAC5fHn74cML+/fG//RZ561bGvHnBxI4nJVU8eVLk7h4ZHZ3/7FlJamo1/PuX77FjL3Jzqb/++oXx11ZWugAALldgbt6bkEZDcmgA/P8AjHfvqqqq6AEB711dw/l8oYyMzNu3b4nx/t8KhULp06cPACA8PNzOzu7du3dVVVUBAQGurq6E9H5QUJCmpubmzZu9vLx69+6tpqbm4OCwZMmSdl4wX9wsErDmcrmEuIxkh1UAAPr27QsAePr06eTJkz8nTkZ22hEKauQYAADm5uZ//fXXTz/9hNQewD/Vy96+faupqRkQENCvXz+JHw0yZMk6TU3Nx48fBwYGmpiYEAb79u0bOXLkzp07X7x4IUFNu387OPN1Ljgczvnz56lU6tatW1GLiorKmDFjzM3N2198lQyDwRAvVtK3b9+VK1cqKSk1Njb6+flxOJw5c+Y0NTUZGxuPHTt2y5YtdXV1ixcvlpWVRW+Z4uLiEhISHj9+rK+vr6enZ2dnN2HChOLi4kmTJjk6Op48eVJXV9fIyIgYTZyfn08eS9Sjh8qePbEsFh8AMHRoD01NpaNHrYcP11FSkpOTkzEw0Jw0SW/1asuEhBI6nRsbWzR37uCBAzWHDOnu55fK4QjmzDGur2ehTQ0Y0A0AoKWl1KOHiqwsxdhYe+xY3S1brOrqmIsXD5eVpdBon4rm+Pq+Fgrh9OkGAgHs2VN19Oi+xB3c1XUkjycoKWlEIbXOgwd5O3ZMIEujiSSY1NRqZ+eh27aNV1aWj40tBgAYGBiQywu0jcbGRnV19dTUVGdn523btikrK8fGxv7tMXX9+vXe3t63bt1qbm7W19c3MzMj6ja0ma/c7IMHD3bs2NFOX60TExMzderUL4qTdZCCGjkGAMD8+fNDQ0NNTU2fPXuGFpHVyygUioyMzNSpU9PT0z98+CDZGMiQJesAANbW1nv27Dl27BgqKgIASElJmT17tomJiZubmwQ17f7t4MzXiYAQqqioHD16lEqlEtLJcnJyWlpaxsbGkydPzs/PJ9ujTyltw9ra+sWLF0QRNRaLRSySk5MTCAR0On3atGnu7u6oRiAAAI0XRhPohefkyZOXLl2qq6sLAKBQKLKysrKysrm5uQCAHj16KCsrg39+Qu/atSuSdELtmzePc3QcPHfuLS5XQKFQkJIZ+KfCmYwMBUKgqqowdar+gwd5W7daAQDodO60afru7qOIQeiEI4Hg/0vlERskq6PxeMJhw3osWjQMPWuSwztwIL5/fw1NTaUvqqPFxBQuXDhMQUFm8ODuInJoBHp6GgUF9QAAQ0MtZAMAaOcg4vfv3w8fPhwAoKenV1BQAAAwNDQkNNWIRl1d3devXzMYDE9PTyMjI5HL5lv5ms3GxMQsXLhQQUFBglWBRBAIBAKBAPVqaV2crIMU1ERiQAwZMoT4YUdWLxs5ciSFQundu/fixYuJJNQRkCXrqqqq+Hz+mjVr+vTpo6ysjDTtdHV1uVwu0hSUoKbdvx2c+ToRT548qayszMnJ8ff3P3ny5K1bt2JiYmpqam7evBkQEODg4MDhcNLS0h48eHDjxo39+/ffvHmzzb6GDBmyc+dONze3O3fuhISEvHnzJiMjo6CgoLy8vLCwMDMzc8qUKTY2NkePHlVSUsrOzs7Ly8vMzCwqKqqoqCgrK3v9+vWHDx9iY2NR7mSxWHl5eUlJSQYGBhMmTLh37969e/e2bNliZGR0//59opifjY0Nyovv39d8/Mh88qTQxWVkSUnj/PkhMTGFRUUNHz8yCwsbcnOpJSVNeXnUnJy6tLSagoJ6DkcQH1/8+HHh06dFLBZ/yhQ9G5ugo0efq6govH9fnZFR27OnakUFzdc3hcsVpKfXZmfX5eVRMzM/FhU1VFTQysqa8vI+vWb89Vcze/sbe/fGNTVxTEy6//XXGwbj09uqrKy6wMAPdDr37t3sgoL6tLQaLlfg4hIOAEC1h5BZQMCHPXti3dwi7O1vWFvr19YyCDk0Dkfw6lV5ZuZHBoM3deoADkeQmFien1/v6DgYAECn042Njdt8ygAAYWFhs2fPBgBMnTqVw+EkJibm5+c7Ojq6ubmxWCx3d/enT58+e/bM2Nh4ypQpTU1NiYmJvXr1MjAwaI/TVjbr6+v78uXLgICAPXv2uLm52dvby8rKEtJiUKIdKOLi4iZOnIim8/LyTpw4gcTJUAyAJNw1YsQIT09PT09PY2NjCSqokWOg0WirVq0KDw9XVlYeOXJkamrqyZMnra2ta2trkXrZ+PHjf//998jISD6fL/H3rmSQZF1paenevXvXrVs3duzYq1evnjlzRk5OzsHB4eXLlydOnEhOTs7IyDhy5MiqVasiIiI2b96sqqraTk27fzt4JLu0Q6VSiTI3IrDZ7FZkulqByWSiBz7wT80toVAYFBTUhpvR27dVdDpXX79baWlTczPn558HEvJmBEjnDJC+tH0ODkegoPBJQZ/LFSgoyP4dHkSV3GVlKcRG+HyhnJzMs2fPhg8fTjz+ikDIoYlDpbK0tJTA33KXSFWyzTAYDLJCGHHuiNELQqGwqamJeAppv7QN4nObJZTDvgNsNltBQQG9TiSLk/2oGGg0mry8PPEPIq5eRqPR5OTkUCGwDoUsWUelUjU1NdG1TWja8Xg8Pp//HSL5F4EzH+a7wmAwGhsbUWeNryc5ueLMmSRDQ62BAzXnzx8iL/+931WQxRjbRlpa2rBhwyQVDwaDaQ8482EwGAxGusDf+TAYDAYjXeDMh8FgMBjpAut2ilYJx2AwGClBar924Wc+DAaDwUgXuIcLBoPBYKQL/MyHwWAwGOkCZz4MBoPBSBc480k1AoGA+EtGXMy6dZBcvfh2vhscTmuuyTqcfL6QvIj8th8JuHR+kFw4glBeRYicgm89j1/pVygUtmjAZrNbDKMjYhDx2GJgIgfn+8Two2hoaEClxBDkU9/c3IwaBQIBYdBJwv5R4MzXiQgPDx80aBDSt01MTJw3b97Lly979+7t4+Nz48YNNze30tLS0NBQAwODy5cvu7u7379/vz3u9u7dGxAQsGLFisjISJFFV65c4XK569at+/nnn2/dunXlypVdu3bl5OTo6el5e3vv2rXr0KFDyPLx48fr168PCQnx8vJydnYmtgAhDAsLKy2l/vTTtS1bHhOC0e2hxY0IBHDTpkeBgS3L4WdmfnR3j7SyuhIVlQ8AuHz5XVRU/oEDz1CRh7Cw7BUrIpClj09yQkLJ0aPPAQBpaWlIo7mzER4efvnyZSaTiWZfvny5fft2YmlkZOTSpUsnTJhQWloKAPD09ExISNi/f3/7/WZmZh4/fryiooLNZl+4cGHVqlXe3t5kg927d9vb2x85cgQAEBAQkJiYuHfvXiJOiUDEAADIzc21tbW1t7cvKSkhlrq7u1tZWUVFRaEWKpXq6OgowQC+GENKSoqdnd3ChQstLS3fv39vY2OzcOHCn3/++dKlS5INQ4QnT564urq+evVq69atGhoaGhoaWlpadXV1aGltbS1qtLW1jYiI0Pibdt49/vX8gJqAmM/A5/OnT58+fvx4VAnBx8cHQmhgYNDY2AghrK6uptFo4tVr2waLxTIzM+Pz+Twe7/Hjx6iRRqOhCVTzPSAgwMPDA0LI4/FKSkoghEOGDKmurhYIBD169GhoaBAvb0ts38/PD9VNdXG5FxaWRbSz2Xw+X4gKyaKW+nrW3yHx0FJynEQV2fj44kuX3goE/7+UqCKLitO2uJvkKrJ0OnfOnJsQwtjYIlRCvbaWMW9eMISwoqJ50aLbEEI3twhUcNXLy4vYr07CkydPtmzZQsw2NTWdO3cOnSAEqhDr7+9//PhxcpXU+Pj49vglV6atra3l8/kcDmfBggWEQXFx8erVq9EVAiGcPn06l8s9e/bshw8f2uP3czHAlqrCilfHlXhl2i/GgErXQgg3bNhQXV2N/ou9vLxKS0slGIYI5MKzvr6+79+/f/nypY2NDWFw8+bN58+f19TU0On0W7duJScnp6enjx07lslkdlxUnR/8zNe5+N///jdq1KgNGzYAAJAALoVCiY6Ovnv3rp+fn6qqqnj12rbRpUuXsWPHTpw4sby83Nramk6nnzp1KiEhYcuWLcXFxRMmTECuCwsL79+/v3//fgghaqmsrLxx48bo0aM1NDRaLG+LiIqKQuKcIqV0T5x44eeXmpNTZ2sbBADYvPlRQUH95s2P3r2rnjzZLyjog739dcKYqCL7+HHhs2clOTl1TU0ctKgNVWSzsj4i8WjxKrK9e6upqSk6ONxassQURauiotLZfhR7eXlpa2svWrQoJiYGAPDgwYPp06eTDYgKsRYWFuQqqSkpKe3xS65M2717dwaDsX379gULFhAGEMK+fftOmDAhPDwcALB06VJra2sFBQUJ6pSSYwAtVYUVqY7bEZVpvxgDKl1Lo9G6du2qo6ODihmVlpaiMl4dBLnw7KpVq4YPH56dnT1//nzCIDw8fPz48VOnTi0qKlqwYMGoUaOUlJQMDQ2lXMAaZ75Ox4kTJ7Kzs69evUq0jBo1ysLComfPnmhWvHpt2/jzzz/Xrl07ZcqU169f+/v7Dx8+3MbGZt++fXp6esR/Ra9evcaMGTN8+HCirnR8fPzx48eDg4PBZ8rbAgB4PB5Rh10EVEJ28ODuaHbjxrEMBi81tdrEpHu3bkrLl4+gUChM5qdbCVFFlssV6OqqGxlpd+v2SRpfslVkIYT6+t3MzHru2xeHWgYNGpSWlvbFLX9PysrKfv/99zNnzuzbty86OnrYsGENDQ0sFotcW5HJZHI4nClTpohUSW2PX3JlWhqNpqSktGTJkiNHjhCnXk9Pb9u2bcnJyadOnQIAUCgUJyenM2fOVFVVtcdvKzGIV4VFoOq4HVSZ9itjiIiImDlzJpouLy//VnH2b4VceBa13Lp1a+7cuYSBr69vUlKSvr7+4cOHUYu/v//ChQs7NKrOD858nQhU91JWVvbWrVsnTpxoaGhA7VpaWn379l26dOnbt29hS9Vr28CLFy9qa2udnZ0PHDhw7do1RUXFrKwsAAC6UxD3SiUlJW1t7Xnz5jU1NaG3fwsXLlyyZAn6N/tceVt5efkWv5+np9cCACCEqH4sny/csePJuHH/+EWMStEiJFtFVldXvbGRDVqqIpudTWUwuJ6ek4yMtPPz61FjO6vISpyuXbs2NDRoaWmx2WwWixUaGnr9+vX3798Tj3RCofDBgwcrV66k0WgiVVLb45dcmba+vl5eXt7U1NTCwkKk/4iOjo6hoSEAICwszN3dfd++feLfjyUVA2okV4UFpOq41dXVHVGZ9mtiAAAkJSWNGjUKTd++fdvBwUFSAbQIufAsAKCkpERVVRVduqgybXV1taWl5Y0bN4h3Mw8fPkRl5aUZrF7WiQgNDX306JGzs7OWltbNmzcfPXqUkZFRVVWFXrPExcVNnTo1Ly+PqF47btw4FRWVtr3zVFZWXr16NcqmTk5OQ4YMsbKy+vDhg4ODg6GhYW5ublFRUUJCQm5ubkhISENDw71797y8vCoqKp48ebJp06b58+dv377d09MTlbedNWuWQCDo1avX+PHj0fYHDhwIIfz4kZmWViMjQ2EyeXl5VC5XsGSJ6erVD+TlZRsaWNXV9Hfvqq9dS62rY0ZF5VVX0ysqaCUljTk5dSgzoSqy9vaGTk5DjYy0jh59/tNPA/T1uwFSFdmHD/Pfv68GAHA4Anf3yMuXZ02ePDkwMBC9egoI+ODr+1pHR5XHE4SELLCy6vfsWUlSUsW6daMBAPHxxWVlTdXVdCMjraYmTmJiea9eqgYG3QAAOTk5ne3u4OnpeenSJT09PXd3dwcHBwcHh5KSklOnTk2YMMHNze306dPr1q2rrKwMCgrq16/f6dOnHz16hKqkTpo0qT1+3d3djx49qqmpaWxszOPxPD09x40bZ2Njo6ys7Ovra2pqmpWVxePx1NTU1q1bBwAwNTWNjY1FX8UktOv/iEFTU3PVqlW2traoKiyKoaCgwNfXV0dHh8fjhYSEoJefqampEqxM+8UYxo0bR6fTyQUU8/Pz21kW+IucOHEiICCgX79+RPci1NGMxWI5ODiEh4dv2LChS5cuS5cuPXDgAADg+fPn5ubmRBaUWrCGS+eFqHXZEaB+5wwGQ01NDT3hQQh5PB5RRfabEC9vW1lZmZGRMW3aNHFjHk8oLy+D3j2iwrAcjkBRseXiohKvItvczOnaVbHFRXQ6V1VVAQDAZrP9/f2J10edBw6Hw+fzyfdWxOculfaXFUSQK9NSqVQNDQ1UDBa9ooAQUqlUbW1twl6kgq5EIMdArgr7PSvTfjEGgUDA5/NRPVjQMcdBnM8VniUq07a5xPR/GJz5MB1FeXm5pqYmUZz9u9H+231GRsbgwYOJT5sYDOY/Bs58GAwGg5EupP1tLxkejycvL09+cSErK9sR71KIt4vine5EGlucJRpbn/2aLbTfqWRX+SFxtsEpPgU/3Om/5RT8EKdfv4rUPvngzAcAAAUFBcePHx87diyE8PTp0+/evfP39+dwOFVVVTwe7+DBg5J1R1xtIpcd0me5dOnSmzdvbG1t7ezsAgIC9PX1Dx48KBQKHz16VFtbO2XKlC1btnA4HPRBDm1BZIMim21xFv0lsruIDY/HI383+qIX1O2T/CuhzYF9zmnrqyD1ss85JcYzAAB4PIGc3P+/xhQKhcRdQCCA33oA27On7Tka4ltAogTfdNbaHKf4iRZfBfX87LjzTr5oP7enrdziJXgKyF/RIITEeBLChjDoiPPOYDAaGxvRtLq6OoPBqK6uFlnl48ePzc3NampqAoGATqc3NTU1NjYqKipK9cc/KPUIhUJzc/Pc3Fw0GxISwmAwJk+ejCRIHjx48N0iEddnKSsrMzMzQ0vz8/MhhC4uLmFhYRJx5+npefXqVVdX13v37oksunDhAofDWbt27YwZM27evHn58uWdO3dmZ2f379/fy8tr586dSB8EQvjo0aN169YFBQWdPHly3rx5xBaEQuGdO3dKSuqmTPH7/fdHHA4ftpsWN8LnCzdufPg5DZeMjNrffrs/ZsylBw/yIISXLr0ND8/Zvz+eSmVCCO/cyXJx+bTvZ88mxcYWHTmSACH88OEDOtr/Iv7444+ioqKO2PL58+cdHR2dnZ1Hjx5No9E2btw4ZcoU4gIQMUAKOHV1dZLVTxFxcfbs2djY2CNHjnzO4OPHj3Pnzk1PT++4GHJycmxsbOzs7LKzs5FBRkbGb7/9NmbMGHTTEDfoCExNTdGd3MTEJD4+ftasWSdOnPj9998Jg5MnT+7evXvhwoV37twJCwsj7vwhISEdF1XnB2c+mJeXh7rgkwkICBgwYMDTp0+/ZyR1dXUDBw4sLCzcvHnz0aNHIYTl5eUDBgy4f/9+YGBgUFAQhNDV1fXu3bvt94XVy+C/Tb1MBBaLxWazCfUsd3d3CKFQKGxoaJCsIyTKxePxtm7dmpyczGazIYRmZmboQIkYoBaJK4eRXVRUVCxatAhC6Obm1koM7u7uks18Ii6+qKAmbiBxuFyut7d3WlrauXPnDh06NHv27F9//TU1NVVGRqa6uhrZaGhoXLt27dSpUyYmJli9jAD3XmtZiOSXX34JDQ1dt27dhQsXvmcw4vosKioqY8aMMTc3V1VVlaAjrF4G/m3qZWTevXs3efLkoKAge3t7AEBJSUm/fv3q6+t37doVGhpK/mnfftDIyKdPn06ePHnUqFGoo/yQIUOI00o2AB2jHEZ20bt3bzU1NQcHhyVLlnwuBgCAxLvmirj4ooKauIHEkZeX9/DwGDp0aFJSkrOzs66ubk1NDYRQKBQi4XsUNmqsqqrC6mUEOPOBwYMHQwhzc3PRLJ/Pr6+vj4mJGTlyZFRU1NmzZ79bJLAlfRY5OTktLS1jY+PJkyfn5+eT7dPT09vjDquXEcB/g3oZGRMTk27dui1fvpxCoTCZzJCQEEdHRw0NDfTj49WrVxL3GBMTQwztDwsL27dvX4sGHaQcRnYBIdTX1zczM/tcDJJ1+jkXrSuotWIgcRgMRkVFhb6+/rZt2wwMDE6dOqWkpDR48GC09PLlywUFBeHh4SgfA6xeBgDAPVwAAAoKCiEhIdu3b3dwcFBRUaFQKLNmzTpw4EBDQ0NFRcXKlSu/WyRPnjwR0Wdhs9k1NTU3b97k8Xh+fn5nzpxJS0uTkZFhMpl5eXlcLrfNvW9evHgxaNAgZ2dnPp9/7dq1ESNGZGVl/fTTT83NzUpKSuLqZenp6T179oQQLly4UCAQuLm5+fv7W1tbnzt3jsvlou42LBYLpcxvUi+7eHEW2UZcvax3b7XKStrjx4Xwn+plW7eOl6x62d69k9eujcrPrx84UBN0PvWyFpGRkYF/v7HPyckJCgqaO3duTk6OZL0gXT0kwZyenm5gYGBgYEAeN0kYNDc3v3nz5s2bN0g5TIISKoSLrKwsBoOxd+/etWvX5ufnDxw4UDzIDkLcxecU1Oh0OnpJIy5vJnFu3749e/ZsAECfPn1OnTo1fPjwEydOyMvLR0dHT5w40dLSsmfPnmZmZg8fPkT2Dx8+3LVrV4eG1PnBmQ8AAEaNGnX79u2GhgZ1dXX0cPPo0SMIIYVCIeQYvgNOTk6EFFlGRgaaIF4hopuIpH7OY/Uy8G9TLyOTlZVVXV1dUVFRUlISFxfXr18/AEBlZeWLFy+0tbVfv37d1NQkwcwdFxc3ceJEAEBaWpqLi0ufPn04HM6BAweSk5ORahdhMGLEiBEjRgBJK4eRYzAyMmpqakpMTOzVq5eBgQGhHEYYAACoVGpGRkZKSoqJiYn42KH2x0Cj0X7//fdWFNSuXLmyefNmwkBSAbRIUFCQv78/AODNmzdBQUFbtmxZsmQJUi+LiIig0+kPHz6MjIxEaqJYvQyBR7JLKVi9TJzOr17WIkgoHD1tI8EqQrZKUrDZbAUFBfEvZ8Twhs8ZdFwMxEPVj4rhiwpqZIPvA/H2BYEuA5FGDAJnPkxHgdXLMBhM5wRnPgwGg8FIF9L+tveHgNXLvtVph8bZBqf4FPxwp/+WU/BDnH79KlL75IMz3w8A/lOLSHwpoSAqbgP/1m0iL4WfFzf63CzRKBQKRWy+XoqslX8h8irEBr8msM/F2YZVvmYLbDaf/IlR7OD8/yz6iEjMErJnEEJC9uybwm7nriHZgc+tQhztNgT2Oac0Gu3v/W1BvQyNbyFW6SD1MiIGNIZdZBWRf4qvvD6/NTAiBjRKvfVVyJ/ZOuKSFlEvEwgEHz9+FFmFUC9DLdXV1WTRPukEf8noXHh7e+/du/fOnTu///77kSNHkJQMqpaOuHbt2uXLl/ft2+fp6dl+d0KhcNmyZcTs48eP169fHxIS4uXl5ezsnJGR0bt379DQ0M+t7u/v//jxYzSdk5Ojp6fn7e29a9euQ4cOtbhBYkUIYVhYGIvFev26UkvreFxccfv3pRW4XIF4o0AAN216FBj4ocVVMjM/urtHWlldiYrKBwB4esYlJJTu3x8PAGCz+QcPPrtw4Q2y9PFJTkgoOXr0OQDgyZMn6LbYoWRmZh4/fryioiIlJcXOzm7hwoWWlpbZ2dnEUm9v7+Dg4Bs3bkjcI4PB2LRp07Rp04hTDABoamrauXPno0ePrl69ilqoVKqjo6OkvIvEwGazL1y4sGrVKm9vb/JS8l7X1dU5OjpmZmZ2UAwAgMuXL0dFRR04cICoz37hwoV58+YtXLhwzJgxKPF0xHEQwcrKSkNDQ0NDY9y4cbW1tQ4ODlevXj19+jRh4OXldfbs2ZUrV4aFhZWXl0+bNq2qqsrc3BwlSOkFYjoNd+7cWbx4MZoWCAQBAQFlZWWmpqZkG8kKikZFRVlZWSUlJUEIW5QiMzAwaEUNa+PGjTY2NsRsG7TNIIQ6OiegmKoZh8PncgVIq6yxkU2syGTykGKZQCDkcgVI2AzZs9n/kImSoOxZXFzxwYPPIISenrHx8cUQwuDgDB+fZCgmeyYQCE6ePPm5wyURUK1z9LSB9LQghBs2bCAMlixZUlBQACEcM2YMIe4lKY8tqpft3bsXCXfZ2dnR6XTYAepl5Bhqa2v5fD6Hw1mwYAFhIL7XElcvI8dAp9PnzJkDIYyNjd23bx8y+A4qbiKIqJdt27bNycnpw4cPqqqqhBIhWb0sJCSkS5cu2dnZY8eOffPmTccF1vnBz3ydiNu3b8+YMQNNy8jI/PLLLyLSXwAAFxeXCRMmxMbG2tjYtN9jcXHxgQMH0G/nFqXIxAMgSEpKWrx4MZPJJJ42KN+ubQb+/upJVjWrqWGYmZ0/dy45MbH80KGEJ08Kx469nJHxMSwsOymp3NY2qLy82dk51Ns7cfXqyIiI3MpKmp9f6pkzSYT4i2Rlz16/rujfXx18heyZjIzM27dvqVTq15+CbyUoKEhTU3Pz5s1eXl5ozCKNRuvatSthoKqqWlhYCACQk5OTiIoK2WOL6mWvX7/u378/AEBTUzMnJ6cj1MvIMXTv3p3BYGzfvn3BggWEgfheS7xrLjmGrKwsNIynf//+KSkpyOA7qLiJIKJelpKSIi8vj8bRE/+VZPWyGTNmjBgxwtzc3N7evqNHGXZycObrRHxODIyMBAVF8/LySktLGQxGbGxsWVnZ13iPiYmJjIysra0FANy7d6+iomLYsGGnTp0iDNqsbUZWNdPRUeneXcXDY4ypqU5sbNHcuYMHDtQcMqS7n18qhyOYM8e4uZljYKA5aZLe6tWWCQklfL7Q3//9pEn9u3T5lGV/oOyZgYGBxF+ykUlNTV2/fr23t/etW7fQm9WIiIiZM2cSBuvXr797966fnx+NRpPIYHZxjyLqZURdHhkZmYqKio5QLxOJQUlJacmSJUeOHCEuMInvdesx0Ol0YpdFLonvoOImAqFeRh6fTkRFVi/jcrkDBgyYM2fOrl27Xr9+3aFRdXJw5utEzJgxg/hsBv4eoUzA4XBSU1MlKCgaFRW1ZcuWCRMm/P777z4+PtbW1i9evEAdE8S9AwDS09NVVFRUVVXl5OSqq6uNjY3Hjx+/d+/ex48fo6ccCOHChQuXLFmCxoB/boMi2mbq6p/GXMO/Vc3A3w9VqqoKU6fqP3iQt3WrFQCATudOm6bv7j5KQ6MLYQYhUFVVCAycu3Hjw/fvP6n0ItmzRYuGjRjRE/yzR8CBA/H9+2t8k+zZ4MHdKytp4POyZ56ek4yMtPPz6//eow6UPdPT0ysoKAAA6Orqoi9MSUlJSJ4DYWho6OPjw+FwXFxcOsIjWb0MGYwcObKyshIA0NjYqK2t/ebNGx8fH6ReJpEAxGOQl5c3NTW1sLAgri6J73XrMXTv3h31K6moqCA/PImouEn8OLQIoV42atQoHo/HZrPV1NSMjY2jo6OZTKalpeWOHTtSU1MPHz4cHBwsIyMTGBjYr1+/5OTkDo2qk4P7dnYiVqxYkZubu3PnTktLSw6HY2ZmlpiYWFVVFRgYyGazQ0NDAwMD169fLxFB0ZSUlOfPn7u5uXXp0sXU1HTu3LmLFy8WkSLr1q1bVVWVn5+fpqbmy5cvp0+f7uDgAADg8Xi//vrrypUr1dXVWSyWvr7+9u3bPTw8vlXbLCmp4u7dbCenoQCAUaN6E6pmubnUoqKGjIyPRkZa8fHFtbWMAQM0DAw0p0zRs7EJmjJFb9Uqi7w8ak5OnVAICwrqk5Mrmps5ixYNR6UYQAfIniUklKSmVlMolEmT+nM4glevyplMHoPBE5c9o9PpxsbG7Tk1rePu7n706FFNTU1jY+P+/fvT6XQVFRW0CGlojRw58sGDB1wud+3atRL32NzcLK5etmbNGl9fX4FAYGdnN3bs2LFjxwJJq5eRY+DxeJ6enuPGjbOxsVFWVm5xrztCvYwcA1L7e/bsWVJS0rp168QV1DpOxU0cQr1s3bp1Li4ukZGRx48fl5GREVcvU1FRiYiIiI6ONjMzmzdvXodG1cnBI9k7HXw+n8FgfO65AVXOo3SkoKi4FJnEN0homzEYPBWVT/q/ZFUz1PL2bRWdztXX71Za2tTczPn554FsNp94pUkAIeTzIQBAXv7/32FIXPaMRuOqqbWsAkXInhUUFOTk5Nja2n71sWkLQqGwqakJ6SALBAI+n48uBjTYgEqlqqmpSVawiuyRDDG8QSAQcDicDtXrIcdApVI1NDQI1x20163HAABobm5GX1i/p4LaF2EymehEfE69jFCWl2Zw5sP8GL5G2yw5ueLMmSRDQ62BAzXnzx9CTmwdQftlz9LS0oYNGyapeDAYTAeBMx8Gg8FgpAvcwwWDwWAw0gXOfBgMBoORLnDfTiDl+nUYDEZqkdqvXfg7HwaDwWCkC/y2E4PBYDDSBc58GAwGg5EucObDSAxUHU0EslCZ+CxGUvD5/C/KrrYTQq4M1XRsxQAAQOiKdVAMCJHL6XOBdVwM4rtJjgGVBfgOIQEAGhoaAABMJrOpqampqYn4Z2QwGE1/g1p4PJ64NqG0gTOflJKdna2rq3vixIlNmzZdvHixDVvgcrnr1q2zsbG5devWlStXdu3aFRsbi/TyCQoLCy0tLYlZf3//3bt3tzd0TEucPn26vLy8gzbeem088M/CdaCD6/MBAHJzc21tbe3t7UtKStBScmBsNtvGxmbhwoU///zzpUuXOigGILabIgcnPj4+KirqwoULb968kWAM4jx58sTV1fXVq1cAAEtLSw0NjZ49ezIYDLSUXMCPyWRevHhRT0/v7du3HRrSv4DvXhcJ01kwMjJqbGxkMBgaGhpMJlMoFBKl+NhsdlNTE1GArbm5GU1wOBwul4vqfUMIAwICPDw8kD2qt4fE1SCETU1NaGL8+PFogk6nJycnb968+TvsmjTAYrHYbDYqFwchdHd3hxCST6Kk+GJtPLIBokPr80EIt27d+vDhQ7JHcmDV1dVcLhdC6OXlVVpa2kExQLHdFDk4bm5uubm5aWlpZ86ckVQM4sTGxmpqalZVVUEIS0tLfXx8ysrKiGtApIAfhLCqqgoAgOopSjP4mU96oVAoQqEwPj5+4MCBLBZr165doaGhYWFhBw4ccHFx2bNnD9KfvHbtWnx8vI2NTU1NjZmZ2blz5xITE4ktFBYW3r9/f/369V27dv39999DQkIaGhoCAgJ8fX33799P+Lp+/XpkZOTTp09/zK7+53j37t3kyZODgoLs7e0BACUlJf369auvrydOogR9fbE2HtkAdExdOhEX5ubmf/31108//cTjfarISA5MR0dHXl4eAFBaWqqrq9tBMYjvpsjBWbdunbOzc3BwcDvF5Vtn3759I0eO3Llz54sXL6Kjo9esWWNmZvbkyRO0VKSAH2rpuGD+ReDMJ9XcuHGDTqfHxsZqaGigyrGvXr0yMTGxsLBAVfcePXqUlJQkLy8/ceLErl27du/e3cPDw8rKithCr169xowZY25uLiMjY2RkBAAIDAxkMBhDhgzR1NQkzK5evbpgwYKpU6d+7z38j2JiYtKtW7fly5dTKBQmkxkSEuLo6Eg+iRL09cXaeGSDDx8+fIf6fPPnzw8NDTU1NX327BlhIxJYeXk5Uf1Y4jF8bjfJMbBYLFQ5ISEhQYJhiJCSkjJ79mwTExM3N7fFixdnZmauWLECvYkhbIgCfh0Xxr8OnPm+AQ6HQ0y32JtDUqBfsp9zgdrbHwCEcNGiRfPnz1dVVc3LywsMDLSwsEDf59Ho/m7duqmqqgoEghkzZnh4eHA4HBERegihkpKStrb2ihUrVFRU0FqKiorq6up2dnbkTyAfP34UCoUUCh4/KmFkZGQghHl5eQMHDhQ5iZLii7XxyAbl5eXfoT4fahwyZAi5fIRIYLdv30ZFtToihs/tJjkGb29vNze3qKgoVEKog9DV1eVyudra2rW1tdXV1QYGBkeOHBk9ejSFQkH1+QCpgB/4e+g6/jfEGi6grq5uxYoVffr08fHxQS329vYaGhq+vr6qqqqoRSAQbNmyxcTExMXFBVX9iI2N1dfXl/jPKG9v76ampsGDB6ekpKirq/fv33/Pnj0PHjwYPHgwMrh27RqHw6mqquLxeAcPHmyzo6ysrMrKyocPH6I3M5WVlS9evNDW1n79+vXw4cPT0tJCQkJGjhw5bty4ffv2ubq6jh492t7evqioKCMjY8iQIQAALpebkJCQm5tbV1enra3N5/PfvXunpKS0efPm6dOnv337dtq0aVwut6ioqKysbOHChYsWLTIyMioqKhIvm4L5VrKysqqrqysqKkpKSuLi4vr16wf+eRKbmpokVSD3i7XxyAa2trboJXnH1efT1NRctWqVra2tsrLyyJEjUQw9evQICAggAgMA5OfnGxgYSCoAkRhEdjM1NfXJkyezZ88mxzB//vyAgIA+ffp0aCW8EydOBAQE9OvX78iRI8ePHw8PD1+zZo2HhweLxUL1+aytrYkCfmw228/PDwAQFhZmamrazsok/25+4DfGzsO5c+cGDBhQXV0NIXz//r2tre2JEye4XC6Xy+Xz+Ww2G0Lo5+d36dKlqqqqbdu28Xg8SOrNISnu3LmzePFiNC0QCAICAsrKykxNTck2kydPRn1JHjx4IFnvaDdRCdxTp06hWQSLxfqmTaFqbSKNDAaD3DsAIymYTCaTyUTTxEmUrAuBQFBfX4+m6+rqiPNITJANOgiyi+bmZuKaJGIgBwYhpNPpHRqDCOieIBID6hEm8TBE4HK5xAVA/leV+GXwXwK/7QQAAEVFRTc3N/TMFxcXN2XKFAqFkpSUtHz58sbGxpEjRxKW+fn5b968KSkpQb05JBvG7du3Z8yYgaZlZGR++eUXCoUiIivq4uIyYcKE2NhYGxsbyXpH1U0VFRUzMjKysrLIb8y6dOnyTZuSkZERf6pTVlZG1TsxkkVJSYkoNEqcRMm6kJGRId4ramlpEeeRmCAbdBBkF2pqasQ1ScRADgwAQJSq76AYRJCTkxOPQUFB4Tv0KJGXlycuAPK/ascVr/4PgN92fmLVqlVmZmYuLi69evUqLS0FAAwYMAAAoKWl1aNHD8JMV1dXR0fHwMAA9eaQLOipqHWbX375xcTEZOnSpWvWrOmgPmN79uzpiM1iMBhMJwE/8wEAAI/H09DQcHBwcHFxmTNnDvz78y+aEOlLgho7osLDjBkzHj9+TMyK6CxwOJzU1NSYmJiRI0dGRUWdPXtW4gFgMBiMNIAzH6ipqbl161ZycvKGDRtsbGxYLFZiYuKrV6/U1NQqKip8fX25XG5GRsb79+8zMjJ0dHTy8vISEhLevXuXmpoq2UhWrFjRo0ePnTt33rt3Lzg4uKysLCYmpqqqKjAw8NKlS7Nnz+7bt++BAwdCQkJCQ0Ml8sD3uW6i5EdPoVBImLWhQymPJwQACASifck4HMl3jmWzW3tiFomBz/+HytTnzDoh6EMFuUVcQ0viSmYip7511S7QMb0HvxiD+PUpWbU8CGF1dTWaFgqFSBKMUEshIJSMGhsbO+I4iMBms5F0GWhJvQzR1NREpVIBAHw+v6ys7DtE1dn5MZ8X/yXw+Xx0uyc3Srxjiwg8Hg/9w7QIm81G4h3td+Tp6Xn16lVXV9d79+6JLLpw4QKEkMFgrF+//syZMzdv3pwzZ05sbOzjx48LCgpgqwchPT393bt3EEIvr0RPz9ibN9M3b3504EA82ebp06KFC0PbFjaH03I3mYKC+mHD/vzcWidPvpw164aDw02hUNjYyN6x40lkZO6VK+8ghDk5dWPHXkJmt26lh4ZmHjmSkJZWw2AwHj582LYgO464uLjIyEhfX9+UlBTU8uLFi40bNxIGjY2NO3bsiIyMvHLlikQ81tXV+fr6RkdHExsU8Zifn+/r62tvb//s2TMI4cePH+fOnZueni4R718Zg4hBTk6OjY2NnZ1ddna2pGKIiopavnz5gwcPfvrpJwgh8Zl/x44dZLPw8PBu3bpBCA8cOKCurq6np9fKv3P7OXDgwMmTJ/fv3793714IIep33aVLF0JECULI4/HGjBlz6tSp2tpaCwuLmJgYKyurjr6PdXLwM19ryMrKysjIiAxi6+ge+XJycq30R1dUVOzSpUv7v12z2ex79+4tXrzY19cX9QIHANDpdDSxbNkyAMD27duHDx++du1aJyenc+fO8fl8a2vrvn37VldXe3p6oqcKFotFfrzgcrlRUVFmZmZhYdnv3lXt3TvZyWnIsWPWenoaAAA+X9jYyAYAmJh0hxCw2XziAYtG46KHMKEQ8nhCBoMnEjCNxgUA8HjCdeuiuNz//z0LIUTb1Nfvpq7eck+cujrmzJmG9+45d+umlJJSderUK1vbQba2g+7cyWIweIaGWvLyn3olnDmT5Og4eMkS0z17YpWVlQ0NDSXej6mdXL9+fdCgQePHj3/58iUAoLm5OTU1lfy8derUKdTn/s6dO+KPI23g4cOHcnJy06dPT0pKatGjiorKqlWrNm/ejAQqtbW1e/bs2X6/3xSDiMGVK1c8PDzu3r0rwe/x9+/ff/PmzYgRIzIyMrhcbnV19fv37z9+/Ojp6UnYlJeXo9+RNBpt1qxZmZmZDQ0NMTExkopBnJs3b2ZkZJibm797966srOy3334rKyurqqrq2rUrYRMcHFxTUwMACAoKolKpU6dOzcjIkOBoy38jOPNJKV26dBk7duzEiRPLy8utra3pdPqpU6cSEhK2bNlSXFw8YcIE8M++pr179/7pp59Qj1bUwbWsrCwsLCwpKcnW1pZ4BXT79u1BgwYBAG7fzpwxYyBqlJGh/PLL8Ddvqk6cePnyZdnatVEAgPLy5mvX3s+cGQQAOHbsRW4u9ddf7/H5QmfnUG/vxNWrIyMicolor117Hx9fbGMT1NTEfvmyLCPjI2qvr2ft2hUbGpoZFpbdys5qaysbGWkBABQVZQ0NtV6/rujfXx0AoKmplJNThyJElnQ6l8nk6eioFhU1AgD09PSuXLkikQMuKUQ0sR48eDB9+nSywevXr/v37w8A0NTUzMnJab9He3v7gICAVatWoVu8uMeePXsWFxefPXt21qxZqEXkx+J3iEHEQFzerP2sXLmytrbWyMgoNDRUQUHhyJEjpqamrq6uxPd4oVAYFRU1bdo0AICamtrw4cO7deumpKREFm2XODt27AgMDPTw8Lh8+bK4ehkAICEhwdTUFP1W1tXVpVKp6CUWEvCUWnDmk17+/PPPtWvXTpky5fXr1/7+/sOHD7exsdm3b5+enh7qJC3S15TQJ0MdXAcMGODn58fhcObMmUPIamRmZnbv3h0AwGDwyB/SAAAnT76cP9/E1nZQamp1cXFj375dV640V1KSb2xku7qO5PEEJSWNPJ7QwEBz0iS91astExI+yfCnpdUmJZXLy8tOnNhfRUVBXb3LiBGfHik0NLosXWoKAHj16stlCrKy6mbONFRXVxQKIUp1MjKig0aOHLHeuzfu2LHnhAv0xrsNh7eDIGtiRUdHDxs2rKGhgcVike+/KPHIyMhIpB8Wl8u1t7enUqno3iruEQCgo6OzaNGiLVu2tN9d22IgGwAAWpQ3aydMJnP27Nn6+voODg4cDufdu3f379/Pz8+/desWMrh27ZqamlpqaiqPx8vLywMA+Pv7X7hwQYLaoS1GtWbNmvr6+mXLlomrl3E4nLCwsPLycjqdnpWVNXPmzK1bt27ZsqWpqcnc3Lzjour84Mwnpbx48aK2ttbZ2fnAgQPXrl1TVFTMysoCACARQnTHFO9rStxJ0f8VnU6fNm2au7u7hoYGakcqSgCAGTMMHj8uIK3Ll5WVyc2lAgB69lTt0uXTcBo5ORmBAB44EN+/v4amppJQ+Onlp4wMhfgGr6goKxDAGTMMPDzGoI98xGbz8uoDAz9YWPQmVvwcdXXM8vLmmTMH0WjckSN7VVbSAACNjWz0LEhgYzPw+PFpeXn1q1ZZoBYVFZVONQyRrInFYrFCQ0OvX7/+/v37lJQUZDBy5MjKykoAQGNjo0Te9QUHB1tYWAQHBycmJrboEQCgpKQ0Z84cYlSZxPliDGQDYi0RebN24uXlZWFhERYWVldXl52dzWAwZs6ceerUKQjh69evy8rKOBxOUFBQdHQ0h8NJSkpKSUmh0WhaWlqSrZQkws6dO93d3f/4449Xr16Jq5c1Njbm5eX5+PjU19cnJCQoKCjs2LGDyWTOmzdv3LhxHRdV5weP55NSlJWVV69evXTp0rdv3zo5OQ0ZMsTKyurDhw8ODg6Ghoa5ubnl5eWnT5/28PBgMBgDBgxgs9lTpkxB+mTz5s3Ly8t7+fLllClTbGxspkyZsnr1arTZadOmPXz4EACwYoV5bi51586nlpZ9OBy+mVnPrVutzp17TaFQrKx0qVRmQUF9eXlzYWFDZubHrKy6wMAPdDo3Ojo/L4+ak1MnFMKCgnqBAMrKUgwNtYqLG11dw0eP7rtixUhtbeXr19P+979hAIDKStqLF2Xa2sqvX1ekp9cWFTWUlTXfv59raqpTVvYyLS0NCbzRaNwFC0LU1bv4+CTb2xuuWWPp65siEEA7O0NlZfmCgoacnLqsrLrBg7Xr6pi3b2c5Og62tOwDAIAQImGwzgNZE8ve3t7BwaGkpOTUqVMTJkxAOl5r1qzx9fUVCAR2dnbEF9z2YGNjc/HiRRUVFVtbWwcHB3GPdXV1FRUVurq67u7uAAAqlZqRkZGSkmJiYiKpwT9fjIFsQKPRfv/9d0LeTCIBAAAcHBwePXqkpKTk6uqKtjx16tSpU6e6uLhMmzZt4sSJBw4cWLVqVVBQ0Jo1a8aNGzdmzJiPHz8CAA4dOiSpGMRZsmRJcHAwj8fbu3dvi+plERERAIBBgwa5ublVV1dfvXpVR0fH29u7I8Zl/YvACsJSCnqDx2Aw1NTU0P8AhJDH44n332EwGLKysiIyLoT2JpvNFll08+ZNBwcH9F2BzxcyGDx19U/9cSCEbLZASUn095ZQCFFXF1lZCvHJTQQ2m4+eFAUCSKH8/5c5DkegqCiL/v69a1BWlsLlcmNjY4nvlGK7DzkcvrKyqL5GdTVdR0eFuClERkYOHjy4s4ncc7lcCoUiLg4iEAjQ4ylSj5NI2kNACJlMprgqCvIIIayrq0NvuTuO1mMQMaDRaPLy8t+qPfQ1MXC5XHRtCwQCoVCIzgKXy5WXl/9RuYTD4cjLy6NX3OT/Rw6HI9IVTrxFasGZDyN5cnNzDQ0Nf2wMdDqdEBxvGwwGo6amprOlPQwG035w5sNgMBiMdIF7uGAwGAxGusCZD4PBYDDSBe7b2SHa0xgMBtP5kdqvXfiZD4PBYDDSBe7hgsFgMBjpAj/zYTAYDEa6wJnv/xEpMIbBYFrkc/8pRDG8H6V0KvIGC0JIlB/pOI8iLkQqI37/l2riB79TCc92EnDm+3/WrVtHFHjEYDCICxcuzJs3b+HChWPGjGGxWBcuXFi1apW3tzfZZvfu3fb29keOHAEABAQEJCYm7t27l8lkdlAMNjY2Cxcu/Pnnn8mSmGFhYStWrCBmfXx87t69K8GqEeQYUD4Td+Hp6ZmQkLB//34AQF1dnaOjY2ZmpqQCaJHVq1c7Ozs7Ozv/+uuvmZmZ3t7ewcHBN27cIAwiIyOXLl06YcKE0tLSlJQUOzu7hQsXWlpaZme3Vt7kv893qwTYyamrq5s+ffqxY8cghDwej81mC4VCLpfL4/EghM3NzciMw+FwuVxU5qO+vp5Yvbm5WSgUomkmk4nWwmD+A6AS3jweb+vWrbW1tXw+n8PhLFiwgDAoLi5evXp1SUkJmp0+fTqXyz179uyHDx86Iobq6moulwsh9PLyKi0tJWxqa2vnzZuHpvfv3y9eb1mCMbToIi4u7uDBgxBCT0/P+Ph4CKG7u7tkK/SKIBQKy8vLIYRv3rwJDAxcsmQJKhw9ZswY4nb0/PlzCKG/v//x48fRLkAIN2zY0HFR/SvAz3yfCA8PDwoK8vPz4/P5+fn5pqamfD7//PnzhYWF165di4+Pt7GxqampMTMzO3fuXGJi4ubNmwsKCjZv3gwAOHfu3Nu3b0eMGPH48WPxknUYzL+avn37AgCePn06efLk7t27MxiM7du3L1iwgDCAEPbt23fChAnh4eEAgKVLl1pbWysoKAwbNqwjYtDR0UFqmaWlpeQCQMSzl1Ao/Ouvv2praxcsWFBaWtoRMbTogqiM2L9/f1REQuJ1CkWgUCh9+vQBAISHh9vZ2amqqhYWFgIA5OTkUNEVAICVlRUAgMvlWlhYoF2g0WjkurXSCc58AAAgFApTUlKSkpJ69OgRGhpqbGxsZWWVnZ3do0cPVHBEXl5+4sSJXbt27d69u4eHh5WV1caNGxkMRmpqKgDg1q1bY8eORers4iXrMJj/ADExMVOnTgUAKCkpLVmy5MiRI8QHLT09vW3btiUnJ586dQoAQKFQnJyczpw5I/Hap0QMAIDy8nJ00xensLDQ0tLS1dXV2dn5woULHRFDiy4kXhnx62lsbFRXV1+/fv3du3f9/PxoNJq6ujqxlMlkcjicKVOmoNmIiIiZM2d+z/A6ITjzAQBARETEb7/9Nn78+GPHjqEPGGvWrDl8+HDv3r0VFRUFAsGMGTM8PDw4HA66svl8/o4dO4gCVxs3brx27dovv/yipqYmXrIOg/m3IxAIBAIBetKSl5c3NTW1sLDgcrlkGx0dHSRTHhYW5u7uvm/fvsjIyA6KAQBw+/ZtBweHFi179epVXl4OADA0NESlCiUeQ4suiMqIFRUVEqyO9EXev38/fPhwFIyPjw+Hw3FxcSGWCoXCBw8erFy5kkajoZakpKRRo0Z9t/A6J1jDBdTU1Pj6+vr7+6urq+vp6eXn5/v7+y9ZskRBQWH8+PEUCqW4uNjV1XX06NH29vZFRUUZGRm6urrv3r27du0aqlH5+PFjGRmZmpoaXV1d8ZJ1GMy/nbi4uIkTJwIA8vPzAwICxo0bZ2Njo6ysjGrjZWVl8Xg8NTW1devWAQBMTU1jY2MrKys/l5naGQMiPz/fwMAAAIBiGDduXHx8fFlZWXV1dc+ePR0dHe/fv5+amrp+/fqOiEFFRYXsIjU19cmTJ5s2bXr06FFqaiqFQpk0aVJH1ClskbCwsLVr1wIA2Gz2gwcPuFwumnVzczt9+vS6desqKyuDgoL69et3+vRpOp0uXulJCsEj2T8LUfcLtFSFDhWo43A4HA4nLCzs559/rqure/369bJly8SNMZh/NWw2W0FBAb3woFKpGhoaRCFAVJ+PSqVqa2sT9gwGQ+K3V3IMZBfk/1Myzc3NysrKcnKS/HEvEgPZBZ/PRxM0Gk1NTU2CTr8IcSioVKqamhpRYpMIiYxAIODz+bhKH8587aWxsXH16tUDBgwYMGCAjY1N7969f3REGAwGg2kNnPkwGAwGI13gHi4YDAaDkS5w5sNgMBiMdIEzHwaDwWCkCzyqAVemxWAwUorU9vPAz3wYDAaDkS5w304MBoPBSBf4mQ+DwWAw0gXOfBgMBoORLnDmw2AwGIx0gTMfBoPBYKQLnPkwGAwGI13gzIfBYDAY6QJnPgwGg8FIFzjzYTAYDEa6wJkPg8FgMNLFf1a38/jx4xQKhcVi8fn8bt261dfXHzhwAC2qqKjYvn37pUuXUPFioVDo5+fH5XJXrVrV4qaoVOqZM2f27dsnvojP52/YsMHQ0LCsrExbW5vL5fbr12/JkiUS3JHq6uqnT58mJCT8+eefnURiNDIy8sOHD+b/x95Zx0WxNQz4LN2dgiIKUoogioIIdiKIBTYoKhZe64rXunYLJrYoJgKKEqKEII000o3AEksvu2zN98d53/nmnQ0x8Hp1nj/4DbNnTs+cyedYWEyfPh27Hq3JpUuXnj17dvr06TY2NgAANpvt7++fmppqYWFRX18/c+ZMS0vL/s7k+fPn7e3thw0b1t8JQd6/fx8UFHT+/HnBfQlHb2/vpUuXDAwM5s6d+9VJIwji6+urpKTk4uKCXR8VFfX+/Xue/fY70tDQcPjw4bKystmzZ1dWVlpbW+OyAQA4f/58RETEixcv4NTh1dXV+/fv19DQOH36NAwQGxvb3Ny8ePFinkmgBfnubfr06dOOjo5169YJCNPb23v79m0hIaGWlhZHR8cRI0ZwOJx79+6JiIh8+vTJxsZmwoQJ/I42r169mjhx4jdO0V5dXX3w4MHLly/LyMhw/xoTEzN58uS+xAO7qI+PD7qGZ312dnZid95flV/2mk9CQmLXrl3i4uI0Gm3btm0qKiroT1paWtnZ2ai2TUhISEtLq7i4mF9UbW1t165d4/lTT0/P6NGjt2zZkpeXZ2Jisnfv3vb2dn7xvH79+isKcuzYsfHjx586deonGfaampqeP3/u5eVlbW2N+wmtSTk5OSaTSaFQ4HphYWErK6tXr165u7tv3rx5zpw5hYWFvb29b9++jfwvTCaztLQ0MjIyKiqquroaJhSJoaur64vy6e7uPnTo0M+WJSsr64ui5YelpWVcXBx3XxKchLi4OIlEqq+v/5akSSSSjIwMrDQsw4YNS0tLw63kcDhv377te+Sf7bSampq6urpRUVFOTk6GhoZLlixJSUnBhRk0aFBUVBSLxYL/6ujoMBgMbLVMmDBh5syZ/JJAC9KXNu3jXgaDKSoqFhYWCg557do1Dofj4eFhbW1dUlICAHB1dZWWll6xYoWXl9f169cfPXrE72hz8uTJO3fu9CU/AtDR0SkuLmaz2dxt5+fnZ2xs3Md4YBcFmD7Jsz5xO++vyi97zbdhwwbcv8nJye3t7YqKiuPGjRMWFk5KSoqNjV2xYoW+vr6IyH/qoba29s2bN0JCQq6uruhIo6enJycnB5epVOqDBw/Wr18P/5WVlV22bBmaColE8vDwCA8PLyoqmjt3rr6+fl5eXlNTE4lEEhMT27p1682bN21tbQEAHR0dz549MzAwePPmzcKFC0eOHIlNOjMzk0wml5SUjBgxIiUlJSoqatasWQwG482bN3Q63dHRUUFBITIyUkJCIikpacqUKfX19RwOp7KycvXq1bdv37aysrK1tW1pacnPz6+pqXFxcamtrY2JidHV1c3Jydm4caOkpGReXl5RUZGEhMTcuXN7enpevnxZV1e3bt06WVnZxMRECoViYGBgYGAAAGCxWIGBgWw2e+TIkUZGRv7+/tXV1bGxseiZJrZi0ZpEFyASEhKwPhUVFRUUFAAA4uLiSUlJqqqqrq6ur1+/bm9vHzJkyOzZs5OSkg4cOGBnZycuLq6pqfn69evBgwfLyckVFhbCK8Xw8PDq6uo1a9bk5+dTKBRzc3O0mI2NjTExMfLy8tLS0j09PQYGBoaGhmj2xowZ8+LFCxUVlcTExEmTJllaWrq5uZmbm2tpaTU2NsJmQgvV0tKC1raQkFBAQIC5uXlkZKSrq+vAgQNhmIKCgpaWFgqF4uTkxLPgbDYbTaKlpQUNTKPRUlNT6+rqHB0dYfiGhobg4OCFCxcyGIzKysqGhgZnZ2c0JwLaMT8/v6KiIjMzU1NTE02XQqHExcW1tLRw5/PUqVOpqalaWlpqampotGw2G82PjIwM2oEbGxthpx0zZgw2AG5fExYWhp2fw+HIyMgMHjwYQZDr16/Lysp2dnZ6eHiguxKLxTp69Gh7e3tLSwv2TO7Dhw9dXV2jR4/G1TO2IAwGIzY2FrYptvdi6yctLQ3dy7C7Ia5LJyQkwGDCwsIsFuvKlStycnIrVqyAvQvdCuZNSUnJx8fHyclp8uTJtbW1+fn5b9++vXfvHizy1q1bFy9eDEdE7NEGAJCUlOTp6blv374tW7YICf3nGqOrqyswMFBfX7+4uHjNmjU8M0mn0zMyMpYuXfro0SMzM7NRo0bBToK2HRztGhsbCwsLXV1dq6qqIiIiYL2tW7cuPDxcSEjIzc3t5cuX0tLSw4cPf/jw4apVq+B+h/ZJNTW1Dx8+GBgYaGpq4o5FaB9OT09PS0szMjLq42Xlv4hf9ppPVFQU+y+TyQwLC5s5c+Zff/0F17BYrAkTJkybNg09FWUymRcuXJg3b15YWFhAQADPaEkkkoSEBPZf3CH+0aNHcnJyNjY2Dg4OAIAjR45MmTKFRqOZm5vLycnBYQ8AICYmdvXq1Y6Ojvnz57u4uOCSLisr8/HxGT9+vJWVlYqKytSpU9XV1ZcuXbp48eJJkyZNmjSJzWa/fv06Ojp62rRpEhISZ86cmTBhQmlp6Z07dzw9PXfv3g0A8PPz09fXb29vDwsL09LS+vvvv/X19dva2l68eFFVVfXo0aNFixalpKR0dnYeOnRo4sSJQkJCf/75Z3V1dUhIyIwZM2pra2FWt2zZYmpqunjx4mXLltXW1trY2Ghra6N7ApVKxVUsP7q7uy9fvjxp0qTJkycbGRkBAGRkZOTl5aWkpAYMGKCqqiosLCwjI6Oqqqqjo9PY2Dh37txx48ZJSkrKysrOnz9/1KhRMJ7Ro0f7+vqKiYnV19cbGhpiiykqKurl5TVo0KBBgwY9f/68vLwcmz1hYeHc3Nzw8HAPD4/NmzcLCwsbGxtbWVmpqamhzQSTYLPZ2NqWk5N7/Phxc3OzlZXV0aNH0RLduHHD1tb2wYMHra2tPIuMTQIbOCAggEqlDh8+vKOjAyYXFRW1YMECdXX148ePm5iYYEcXAe1YX1/v4+Pj4OCgoaGBhkcQZM2aNQ4ODqamptz5NDU11dfXNzY2xkaLzY+fnx/agdFOi8swT/z9/U+dOrV37141NbVr165dv3592bJlcAENc+HChVu3bh07dgw77CEIkpOT8+bNGwUFBWw94wqCIAhsU1zvxRYEzTC2FNxdGrszVlVVrVq16v79+zU1Ndit0OwtWbJk8uTJpqamfn5+AwcOzMnJ0dXVRfM/ZMiQqqoqBoOBrQr4GCU1NdXZ2dnIyOjly5fYLnHlypX29vZRo0bxy+TgwYN9fX0lJCTa2trS09PRbdG2g/9GRUXp6uoCALS0tI4fPy4nJ6enp7dhw4bly5eHhoZWVVUhCPL69Wt1dfXY2Fj0NAjtkyoqKrA+ccciNLni4uLExMTFixe7ubnV1dXxa/d/Kb/syIdDWlra2dn53bt3ZDIZrrG1tYXDxqdPn+CawsLC5ubm/Pz8LVu2jB07lmc8UlJSq1atEpDQq1evGAwGjUaDN0gNDAymTZtmYmKCCyYpKSkvLz9mzBhzc3MFBYWYmBhs0tra2np6epaWlvC5CACgvLy8t7dXREREV1eXyWR++vRJXV3d1NR07NixcnJyqqqqGhoaenp6WlpakpKSLBaLwWB4enrm5ubW1dXV19dLSEjIycnp6OgYGRnV1NS8evXKzMwMAHDs2DE5ObnU1NTi4uJRo0atX79eS0srKytr+/bt6M3MsLAwPT09UVFRa2vrmJiYz1YsP2RkZDZv3hwZGVlcXPzo0SO4Mi0t7f79+2/evIH/9vT0/PHHHy0tLRs3bsSdUqD/qqmpWVhYxMfHt7S0DBw4EFtMDQ0NFRWVUaNGGRgYqKqqcmdPTk5u2LBhSkpKuEMVrplwtV1dXS0rK2toaAhrD91qz549kZGR3d3dDQ0NgsuOC2xjY7N3796EhAQtLS0AwI0bN4SFheHoNXv27ClTpkhJSaEbCmjHly9fDh8+HACAXoYCAOC9VhEREXQlz3xio8XmB9eBIbgM82TFihV3797ds2fPmTNnIiIi4B0/NTW18PBwNMy7d+/09PSkpaWxTx9IJJK2tjZcwNYzriDi4uKwTXG9F1sQNE5sKbi7NBY9PT0ZGRkDA4PKykqeZRcRETlz5syLFy+8vLxevnypqqqKPdFpaGiQlpbGthe6Pisr6/r16xoaGt7e3uh6KSkpePvB3NycXyb53TjBUVNTA58gioqKysjIGBsb6+npqaqqiouL6+rqlpSUCI4HrU/csQi9bR4ZGYkgSEFBwf3799ED0S/DLz7ycTgc+DyvqKjowoULkyZNgndmUBQUFAYMGACXtbW1a2pqbG1t7ezs0NP/L01o0KBBHA7Hzs5OV1eXSqWuWbNm9+7d8NE9m83muS2JRBoxYgS/pBEEQRBERUUlNzcXxiAlJaWuri44PwiCrF692sjIaMiQIdwTUUlKSkZHRwMAuru74fFi+PDhdnZ2YmJiDQ0N4eHhurq627dvh4FVVVXhUwESiQRvZGEj5Fex3PmBC+Li4mZmZu/fv4f/Wlparly58s8//4Tn41JSUgcPHkxOTm5qahIQ26ZNm44ePQoPl/yKKbjdUWCVYpsJANCX2maz2S4uLtOmTVNSUhI81RebzcYFFhYWjo+Pf//+/ZMnTwAArq6uvr6+8IHTiBEjAgMDt2/fjp6QCWhHeBUL/ttJeK7kzicsFzZabH5wHRgGxmWYH4MHDwYANDQ0aGlpwVspDAYDNhNEXl6+ubkZYPYXCM8K5C4d/Ivrvbj6gRnGlqK8vBzXpdFguHRxZYcrT5w4AQCYMGHCnj17oqOjJ0yYwGQyKysr4a/v379fsmQJvATEFurhw4fXrl1bv379tWvXampqMjMzuQvIL5MkEglmj8Fg4HKIPYYMHjy4s7OTZ0OQSCQEQT4bD3e1k0gkNTU1uKyjo0OhUOzs7CwtLQW8vvAv5Zd9zgcAIJPJcXFx3d3d1dXVdDo9JSXl2rVrLBYrOjra2Nj48ePHDAZjz549oqKi79+/LywshOf4tra2tra22J0kJyfn06dPiYmJ48ePh2+gxcbGYhPKy8srKCgIDw+3s7NzdXVdsGDBlClTpk+fvmDBAg8PD09Pz8WLF0tLSwsJCd25c2f16tXohk+fPlVVVfXw8BgwYAA26dDQ0Ly8vMbGRjqdXlZW9vr16/Xr1x8+fPjcuXNqamqenp5CQkLZ2dlkMtnR0TEtLa2uro5MJufm5n769MnW1rahoSE3N5fD4Zw4cUJBQaGgoMDGxoZMJldXV6ekpLS3t1++fPnmzZtOTk7jx4/fsWPHqlWrbGxsZs+evXTp0ra2tocPH1pZWaFn9z4+Pvfu3SOTyerq6pMmTTpx4kR+fn5tbS08E8dWbFRUVEJCQmFhYV1dXXZ2NofDcXBwgLtfUFBQa2vr9evX29raSkpKrl692tvbm52dLS8vLysrm5SUZGdnR6VSq6qqWltbL168uHDhwvv378N3kVpbW+fNm4cdukaPHq2oqAjvuGKLOWLEiNra2oKCAk1NzZycHAkJCS0tLWz2cnNzFRUVKysra2pqioqK4B0nAwODo0ePwmaC8SspKWFrm8lkVlVVwSqtqalpa2tTVFRks9lkMvnUqVNUKjUoKGjChAlkMrmqqgr2pc7OTvhsGCYxZMgQbGAFBQVdXd1JkyYZGxu/efNGVlb2yJEjs2fPfvjwYVBQ0Lx585ycnNAbmALa8cKFC7dv396/fz8cJhkMhpiYmL6+vq2trbu7++DBg5uamshkMjbplStXHjx4MDY2FhstjUYzMDCYNGmSubm5kZERtgPDTtvV1QUzbG5unpeXd+zYMXQIbGhogK9d+Pn5VVVVubi4HDx4sKury9nZ+eXLl0wm86+//rp8+TLcj7Zv3z5t2rRt27YVFxcjCNLQ0KCpqclisRISEvLy8j59+oStZxUVFWxBSkpKYJt6eXlhey+2IG5ubjDD2N1QQ0Pj+fPn2C6N7oyNjY15eXkNDQ15eXm6urq4nRcGrq6uPnDggIWFRUxMzPHjxyUlJV+8eHHs2LEpU6YwGIzCwsJz587hjjYtLS0hISGurq5SUlJUKnXYsGG7du2C7V5fX19dXf327dvly5fzy6SUlJSGhsaBAwfgVX5lZSVscWNjY9h2kyZNAgBMmjQJJl1eXt7Q0FBaWpqenl5QUFBfX19YWKimprZmzZr9+/efP3+eRqNlZWXV19fDzgP7JLqPzJkzB3ssYjKZcOfdu3fvwYMHS0tLx4wZs2PHjm8+Hv9kIL8NdDod/YsgSE9PD5vN5g5Go9H6Eo8A4M1GuMxms7u7u+Eyg8GAZ4WQiRMn1tTU9Pb29j3p3t5eJpMpOAx3VvlFi12PLsMLFNwmHA6np6dHcCqfrZbvC7YeBBdTcPZg/WObCfuT4NpmMplsNpvFYgkOBpPABmaz2XQ6nedWPHMiuIBUKpXFYnGvhG8DcucT7XJotLj8YDsw7LS4AH1paw6HQyaTudfTaLSOjo7PdnWeBcHFgy5j6wfdy9BS8OzSuJ0RBVt2bEJkMhlXyRQK5dv7vIBMMhgM7h6CPVwgCPL48eOWlhYB8TOZTA6H89l4uI9FKH1vqX8XxJzs/wy9vb1jxoy5cePGuHHj/um8EBAQ/FtJTk62srL6lhh+z2MRMfL9M7S2tjY3N5NIpB/2qTUBAQEBN7/nsYgY+QgICAgIfi9+8Xc7CQgICAgIcBAj33/o7Ow8cOBAQkKCgDA0Gm3//v2pqak8f+VwOHfu3MF+CRQVFXXw4EF+ye3atevjx4/omtjYWPj5PHSmbNq0CX2vHRfb+fPnoTMCDVlVVcVzk34FzTAkKytr165d2AAbNmwQcEeBQqH4+fndu3cPrYT3799v2LBh3bp1paWlAID6+nr4qV9NTc21a9dw3zn4+fl9uxfqB/DixYtXr14dO3YMXcPd9F8HhULZt2/fN0aCBd0FPn36JOALxZycHE9PzytXrtBotOfPn+/YseOLDHC4bvN1oLsA5OnTpzdu3PjGOL+avLw8d3f3fyp1gq+DGPn+A9ZWx0/9Jykp2dHRwe/TFm5nI09xIpochULp7u5G16DqwpCQECqVeurUKdxXw9z2QjTkhw8feG7Sr6AZfvfuHZ1ONzExiYqKwgbAqTpweHh4zJgxY8WKFevWraPT6QCA9PR0+BGIvr5+d3f34sWL58+fDwBgMpk9PT3oZ0YQOTk5OED2Ee42/Y7STn5ERES0tbXNnTt37dq16Erupv86lJWVsd/eoHx1udBdQFtbOyYmhl8/HzlyZEpKir6+vqSkZFZW1sqVK83NzfueimBFZx9Bd4G+6zcF83VOXciIESOgD/Mf50ulrL8zv+b3fGw2G9Va/vXXXxkZGah9rra2FnoRJ0+efOfOne3bt0NdMqpeRJ1+PF2FwsLCNTU1f//994QJE6ZMmYJ1Wg4fPhx1JeDEiTgd6Lt371paWnCn1VBdaGpqGhoaqqCgYG5ujtqneNoLFRUVYchBgwaFhYXBTfT09FADZ3t7O5RYysvLjx07Fl2fl5fX3Nzc3NwsKSkJpaNYCyK3ubStrc3X19fFxQV+Wrdy5cro6GhDQ8PGxkaY4VWrVp06dQp+/xQaGlpUVLRhwwYGgxEUFLRq1aoPHz7gkoMkJyfLy8sLCQmpqamlp6crKiqeOHEiNTUV6sGioqIsLCxkZGRERERSUlK49f/CwsJ0Ov3s2bOKioqrV68mkUivX7/u7u6WlpaeNWsWAAD7L3eb2tvbQ3uhvLx8XFwc9Cg6OjqiBsj6+vrY2FhlZeXMzMwtW7YoKyvDdJlMZlBQEIlEmjBhwoABA3Jzc4uKihgMhouLS3V1NVaqyWAwHjx4oKWl9eTJk5ycnMOHD4uKiuKaHtU2qqio4JSVLS0tkZGRwsLC8+fPZ7FYWLEq3La8vDw5OXnJkiX8ZKS9vb2wKVesWPHmzRu4Ozg6Or569WrdunUIgrx69crV1TU2NrapqcnW1lZLSwvtwPPmzfvrr78uXLjAc/8SFxcXFhbev3//0qVLoYgO/K/xEvU92traRkRESElJJSQkmJubOzg48FN0cjichISErKwsPT09MzMzeA6Xm5sbFRW1atWqp0+f6ujozJo1y9/f38nJCe4CLS0t/PSbAAAWi4VLGmv4FBMTQz2Zzc3NWKcuh8O5du2akZGRnJxcbGysq6trU1NTU1OTjY0NVmCLPcLAenv69KmKisrkyZOxJ3z8dh91dXVcm/I7anHXIU6li24lJCSUlpaGij07OzufPXsGZTFQcJqTk7NhwwYZGRmsaxfbpdlsNnrQGDFiBKquRfv/L8Ovec2HIAiqtSwtLcXa51Avoqqq6pMnTxgMhqamJvb2y2ddhQ0NDUuXLl2zZk1RURHWaVlVVYWmjvUN4pycL1++LCoqWrhwIfK/DguoLlRXVzc0NDQzM0OHPYSPvRANOWnSJHQTrIETlVgOHjwYu55Op3t7ey9cuPDWrVv19fVYCyKFQuE2lyoqKqalpXV0dBgaGoaHh4uJiZHJ5EGDBqEZVlNTmzt3rpCQUFtbm5mZWU9PT3BwsLCw8JkzZ5hMJi45tMg2NjZxcXEcDodOpwsLCw8fPrympsbCwmLmzJlUKtXExERYWPjZs2eDBg3q7u6urKy8f/9+Tk4OtiHKysoWLVoUHBx869atp0+fVlRULFy48OnTpwEBAbh/udu0u7sb2gs1NDSwHkXUACkpKenl5WVkZKSurg49qJA///xz4sSJ5ubmQUFBlZWV586dW7x4MYvF8vT0xEk15eXlhwwZMnbsWBcXl5CQEA6Hg2t6rLYRp6xks9m7du1ydnYWEhJKTU3FNh+aE9iv+MlIFRUVsU2J7g4jRox4/vw5jUYjkUhycnLv37/v7Ow0MTFBJwyCSEtL5+fnC9jFtm/fTiaT0WEPWxas77G+vh7Ov7F27VovL6+amhqeik4AQHBwMJlMHjdu3NmzZ9ETTQMDg9u3b0MnQHl5uZCQEIvFEhcXh7sAP/0m3JZEImGTrq2txbYv1pOJc+oKCQkxGIzs7GxTU9OHDx8qKCg0NTUNHDgQt7OjVQpt1AUFBZKSklOmTMHd5+C5++jo6HC3Kb+jFq4Oa2tr+eVk5MiRWLGnrKzsq1ev8vLy3NzcFi1aNGLECHFx8cDAQJxrF9ul0YPGwIEDsepaDocjoDP8G/k1Rz4RERFUa4mzz2G9iPBMjZ8cj5+rcOzYscOGDZs5c2ZKSgpPpyXON4jTgT569Ah+N4N1LaLqQm742Qt5gjVwohJLXV1d7Hp5efmBAwcqKCjo6enV1tZiLYh1dXU8zaXr1q3z9/dvaGiorKxsamoSFRUVFhbmzrCioqK2traxsXFFRYWcnJy8vDwAAJccGvj06dMJCQm+vr7Z2dkWFhYAAElJyT///NPKyqqwsFBfX//YsWPz589/+/athYXFkSNHVq5cCcVXKMOHD9fR0VmxYkViYmJISAg8Cs+aNSs0NBT3r4A2xXoUsQZIdXV1JSUlfX39lStXYu9lpaena2hoDBs2bMuWLW/fvoW+rpkzZ4aGhuKkmtisiouLAwBwTY/VNuKUlQUFBRISEiIiIosXL54wYQK2+dA40f7DU0aK7XXW1tbo7gAA2Lx58927dyMjIx0cHOzs7JSUlD5+/Mg9UxKVShXg8Dt8+HB+fv7x48fhv9iyYPc4KJU1MzMbMGCAo6NjbGwsT0UnAKCxsbG4uNjY2HjkyJGw58B6mzNnTkREhIiISHBwcFlZmZGREc9dAKvfhGuEhYWxScfExGDbV7DMc+XKlc+ePevu7paUlIyIiKiurh46dCh2Z4+Li8NWKZPJ3Llz54wZM3jWFffuA09ocG3K76iFq8OYmBgBOcECK3n48OGDBg2SkpLS1NQcMmRISUkJTmaL7dLoQYPD4WDVtehp/S/DrznyYcHZ57BeRCjW4ie1E+wqJJFIxsbGOKcl/AnnG8TpQHm6FgHGoYdbz89eiFuDrkQNnLgM81uPtSCKiIjw1IfOnDkzPj6+qKho586dq1evhvIkbCahDQcXs+APZgYNGnTs2LGenh4vLy84MEAUFRUHDRoEM1ZfX6+srBwdHb1q1SoWi8XzqSFsCE1NTfgQFDYE7l/Ap01xJlWehkw6na6jo4P+W11dDV8jKi4uxqYCrfmCwTUlT0skRFJS8t27d/BEG77Kwd18AqqXzWYLkNA6OzvDa1AxMbFbt25VVlaOGjWKOzZxcXFJSUme8XM4HHl5+YiIiCdPnly5cgX8r4JSXV2dp++xp6dHV1eXX7ZdXV1pNFpKSgp2NgwAgLu7+5UrV2RlZfX19S9fvgwHKjQSfv5JHDBpbPvi/LS4nqCkpDR48ODDhw/fv3/fx8cHjsQ8d3aIqKjoihUrNm3axDN1nrsP4L9LAv7OTFgQATnhJweGQJ8nTmaL7dJoyC8VBf/r+DVHPhqNlp2dnZCQQKfTp0+fHhoa6uzsfOXKFR0dnUuXLjU0NEAvooODw5YtW2JiYmpqaqBqMi0tTUpKCjr9QkJC3r17B12FaMz6+vpRUVFPnz41MjKytLSETsuQkBDotITORnV1degb9PPza2pqYrFY0MkJJ3DZvn37pUuXTpw4UVlZib6JgKoL6+vr09PTExMT0YlYUQ0jjA3aC5OTkzs7O2FI7CbQwLljxw4ajZaWlgYllgAA7PqsrKyqqqr6+vqPHz/m5OS4uLjk5OQ4OTnBZxvYrKKlFhIScnR0tLa2XrBggbKyMvQRwwzDZzbHjx+PjY2tq6urqalJSkrKyckpKSmpra1NSkrCJYfG2d7efuvWLXFx8a1btwIAbty4sXfv3kePHk2fPh1N+tmzZwsXLpw8eXJ1dfXLly/t7OzQzeHLRK9evSooKNi8efOuXbtycnIiIiI+fPiwbds23L+opxHbpvD25vv376FHEfzXkFlRUREZGUmlUltaWoKDg2/fvo29E3j8+PHJkydv3769q6trzpw5WlpaQUFB169f9/b2Li4uRqWa+fn5VCo1Pz8/PT29tra2vr4+NTUV1/Surq4eHh7r169PT09H1aApKSk1NTXKysoTJ060srLat2+foqIitvnQnLx7966urg5ulZeXh5ORNjc3o00pJyeH7g4AAAkJiWnTpsEhhMViPXz4MCYmJj8/PycnB+4CCILQaDRDQ0MAwNKlS3E3mXNyckpLS8PDwyUlJT08PLZs2XLixAlnZ2e0LA4ODtg9DgDw9u3b8PBwRUVFa2trnKITlretrS0wMDAjIyMgIODmzZvoxGEAgGHDhikrKzs5Obm7u8NTora2NrgLoM2anp6O6jc/fPiAzS2atK2tLbZ9S0pKzp07N2bMGDs7OzQe7IYeHh46Ojp6enoDBgyAT46xO7uVlRVapfn5+fA+bU1Nza5duyoqKrS0tLAzu3LvPrhd8rNHLVxB+OXE2Ng4Pj4etQq3tbWVlZXl5eXl5ubW1NSUl5dnZGSUlpZ2dHRgJcbYLo0eNFB1rZ+fn6en5683V8Pv4u3EeimxXkQGg8FtBeTpKkSBz67Qf/k5LXG+Qaz+jsVi0el0bteiAPjZC7npi6VT8K88Q6K5xVkNBawUTF1dHbZuORxOQ0MDrk46OjoER0KlUvv4L8825RYVYg2QhoaGPT093HWOszXiEhUMrum5LZFYPtso/EDLxW+rz1pP4QMz5Et0rLiyoBH+/fffjx49+mwtXbt2rbGxsbS09O3bt7GxsbiY4QJ3XfHTb/JLGqsqxXoyecYD08UmKlhgixIcHIyrT55F+GybCqhDfjnh6d7kBiezFWC1/SJR8L+IX/PdTm7Q6WSFhISw5y+4CWyxK0kkEvYuHIqIiAj20SCJROJ5Uwg3ZRd2PlthYWHBc/p8NjYBYBPqy3ruX3mGRDMsoMa+CHRyKAiJRMJOrwqBrzYIAFctAv7l2abcN5rgrxISEvX19a2trUwmk7txcb2i700DuJpecE/4bKPwAy0Xv62wHRgtMrrm06dPlpaW6HPlPiaKKwuMEEGQ+vp6SUlJfjdOUQIDA6uqqpSVlTU1NRcuXIiLGS5wdzMBHY9n0rjCogs844HpYn/it7PjGDt2LK7meRbhs20qoA755YTnvVNuYD2gjcuvlfsY278Rwl5GQMCDT58+UalUaWlpfm8eEfQFOp0OZzrV0dH57IEezvL62dOd/kj6Z+aXKchPBTHyERAQEBD8Xvyab7gQEBAQEBDw49d8zsdgMG7cuFFRUWFqasrhcMrKyjZv3jxgwIDIyEgGgzF37twfk42wsLDc3FwLC4vp06cLDhkaGhoZGXns2DE5Obn29vbr16+rqamNHj365s2bOjo6ysrKnZ2dkpKSTk5OV65cYTAYysrK4uLiK1as4L41RKPRcnJyvnGqLQ6H4+fnx2AwPDw8+rhJdXX1wYMHL1++XF5ebmho2PfnQ9/Oq1evwsPDR40aJSoqWl9fb21tPXHixJ+2rQsLC69fv66pqclms4cOHbpo0SIWi3Xjxo3y8vKRI0fC7rp+/fpXr16VlJSsXr0afm35pSAI4uvrq6SkhNPflJaWPnnyREVFhcFgdHR0rF+/Hn6W4OHhgT55/fTp09OnT2VkZEpLS8+ePYuL+bt0sK/g/fv3QUFBPj4+fQz/vTpkZ2fnkSNHXF1dTUxMvi6GPsKzGxcWFsbExHB/L8FisV68eBEbG7tnzx7BN+Rh7/r48eOoUaNoNJqzs3N7e/vt27fl5eVXr16trq4eFRWVmpqqrq7e2dmpoKCwevVqDodz7949ERGRT58+2djYTJgwoT/L/Q/xT75e05+EhoaOHz8eLqelpRUXF7e1tf31118nT578MRlobGxcs2YNh8Pp6urqS3g9PT0HBwf4gtm9e/dKSkoQBBk9evTbt29hgAcPHiAIsn///h07diAI4uPjM2vWLO54Tp8+/UVvjfLj9evXf/zxxxdtMm7cuPb2dg6Hc+XKlW/PQN+BDkz4EhqVSg0ODv7J23rChAmvXr1is9nz588/deoUwqu7hoaGjh49+ltyde/ePVwNZGVlWVhYUCgUBEG6u7sXLVqEIMjOnTt37tyJDTZv3ry8vDwEQfbv38895feXdrDY2NjvMq83nU43MzP7ok2+V4d0c3NLSUn5lhj6Anc3ZjAYz58/nzlzJnfgwMBAPz+/rq6uvrzvHRkZaW1tDRd0dHTYbLarq6uPjw+CIL6+vsuXL4cNWl5evmXLFgRBVqxY8fTpUwRBOBzOsmXLHj58+F0L+lPwa17zAQAkJCTQD585HA6cdHHIkCGoSxMLavAzMDCAvkddXV0LC4uWlhbUXCcmJhYYGAgFj0uWLME697CbwwjZbLa/v391dXVsbOzkyZOxDsmMjAwymVxSUrJgwQL4fRJk/fr1oaGhhw8fPnjwoLS0NHxjEC3Fp0+fZs+eDQAQFxeHHwCNGjXq4sWLuIKEhYXp6OgICwvTaDRUOlpQUEAmk2tqajgczqZNm0gkEqpqlJKSwukT0ajQ1/9Qk+fKlStv376tpqY2b96858+fm5iYaGtrY+sBbkIikYYOHfrs2bNFixbhsldVVRUREQGTW7duXXh4uJCQkJubGwAAKxLEKivz8vLQ6pKSkuIpEoSP/WFF5eXlOTk5fce2XrNmDdZI+dm2xpo8GxoaUAsi+v0ybEQSiSQkJGRmZgY/H8Z2VwkJiWHDhtXW1gp4nQGroKytrcX6QiUlJfPz8ysqKjIzMzU1NbFbeXl5eXp6KikpAQCkpaX//vtvwOsNQyUlpbNnz167dg2OiJ/tYDIyMvwEpGJiYlDrunjxYlREaWdnh3WNWllZ4Wyo2Arn7pA9PT2w17m7uz99+vRbOiQAoKCgoKWlhUKhODk5JSUl4RyzPBW7/eTD5NmNR4wYwZ3nxsZG1O5rbGzMzyYKtWoAAHFxcbg8evTourq6jo4OCQkJCQkJFou1c+fOgoIC+OrpkCFDPDw88vPz3759e+/ePZiZrVu3Ll68eOnSpbw74r+WX/k5X319/ZUrV7Zt2xYSEiIgGNbgFxQURKVSHR0dHz58yGazseY6EomECh6xzj3s5micwsLCNjY22trakydPxjkky8rKfHx8xo8fj5PAioqKPnv2zM/P79WrV9j1L168uHTp0rJly3p6euCavLy8gwcPXrt27f79+7iyvHjxAuo9sdLRysrKixcvLly4MD09/fHjx1hVI7c+EQdWOhoYGKihoQFlYE1NTfr6+jx9kgAAMzOz58+fc8empaV1/PhxOTk5PT29DRs2LF++PDQ0tKqqCisSxCkr0epSUFAQLBK8cuXK0aNHeZbiW9oaa6T8bFvjTJ5YdSouJxEREdu2bauurj58+DBcA7vrgQMH+jLhDlZBifOF1tfX+/j4ODg4cH8lkpWVhR1LUMEjjpMnTzY0NJibmxcUFKAiMQjPDiZAQIpqXbEiSjKZjHWNgv9VR2Jj45k9tNd5eXl9Y4cEANy4ccPW1vbBgwetra04xyxPxS7oZx9mX7ox1u4r2CaK0tra+uzZMw8Pj4MHDyoqKsKVpaWlLBYLe/5tbGyck5Ojq6uLnocNGTIE7qEC8vNv5Fce+QYMGLBp0yZvb++pU6cKCIY1+D169MjGxkZUVPT8+fPl5eVYc11TUxMqeMQ69wQLAAEAOIektra2np6epaUltxZBVVU1KCho3bp1RUVF6Mp58+Zt2bLl6tWr6Ln5iBEjxMTEenp6uFOsqamB3nesoFJDQ0NfX19DQ2P58uXR0dFYVSNOn9jR0bFr165du3bl5eXBCHHSUXt7+w8fPrS2tsrKypJIJJ4+SQCAvLx8eXk5d1WIiorKyMgYGxvr6empqqqKi4vr6uriRII4ZSVaXWQyWbBIcNOmTfv27du2bdv3bWuskfKzbY0zeWLVqbiQ06dPLygoGD58OGoQhd318OHDuK/ZeIJVUOJ8oS9fvhw+fDj4XzEsRF5evrGx8bORq6qqvn79euvWrdOnT8eF59nBBAhI0Q1x+lycaxSrjsTGxt0hwf/Kab+xQwIA9uzZExkZ2d3d3dDQgHPM8lTsgn72YfalG2Ppo8NTWlra0tLSz88PO6ejvLx8b29vW1sbNqSqqmprayv6L/zU5Is+Wv1X8MuOfPD2N1yePHmygJBYgx8qsSwvL1dUVBRgrkOdezgBIAq8mwwA4HZI8gQam0aNGnX27NlDhw6hkcAFExOT2tpaCoUCywWvjeDdKiyDBw/u7OwEfKSjUPonQNUoJye3b9++ffv2QXMVAACnfxQSElq+fPnatWuhzwnwcQ92d3dzH3Z5wi0S5FZWQgSIBGFgWJzv3tZYI2V5eXnf21qAyZPD4QgJCT169OjSpUtQnIZtCzs7u+joaFzr4OZd46kYhfATwwIAnJ2dsdOSwFENG6anpyc+Pv7cuXMkEsnDw2PKlClJSUnYGHh2MAECUgibzeYnooRg1ZHY2ISFhXEdEoL2um/skGw228XFZdq0aUpKSl9Ukzi+iw+z790YmyUBDk9sYDExMR0dHXQAg5sPGDBgwoQJz549Q0M2NjZOmDCByWSi4u/3798vWbJEwESb/1J+zed8DAYjLCysrKwsNzcXne6nq6srJSWlp6enpaVFRUUFDVxTU/Pw4UMrKystLS0TE5MFCxbExMTMnDlz4cKF0Fynpqbm6enZ0dEBBY/Lly+Hzr3Zs2cvXbq0ra0N3RyNE0EQ6EKsra3dtWvX9u3boUNyz549t27dysvLa2xsxHb6ly9fvnjxYuHChTo6OsuWLcvMzAQA5Ofnl5aWBgUF1dTUNDc3wzAJCQk0Gq2hoeH+/ftjxoyRk5P7448/UEPEvHnzsrOzjYyMQkJCdHV1oaCSTCbn5eWFhYWlp6f/+eefT548efr0KY1Gy8/Ph1LN3NxcDodTU1MDH1HA/EMHKTw5tbW1tbW1hYd7Nze3srIyGAxbD1VVVVBZOWPGjNzcXHt7++bmZjMzs9zcXPS+bnl5eUNDQ2lpaXp6ekFBQX19fWFhoZqamrq6OioSrK6uhsrKadOmbd26NSQkBK0ubHNgr5iDg4MBAEFBQc7Ozt+9rV1dXRcsWDBlypTp06draGg8f/5cQFvPmTMnJCQkKCjo48eP3t7eqAURe1+xsLCwuLg4MjJyypQpT58+nTt37u3bt9++fVtdXX3r1i0OhxMfH29nZ1dQUFBRUQHr5P379/ApLwpUUCooKBQUFNjY2KC+0Pb29gsXLty+fXv//v3wsMtgMNBhYN++fbt37968ebONjU1vby8UQiYmJrLZ7Js3b3Z2dgYEBAQEBOzZs0dCQkJJSYnJZOJeVeXZwYyMjNAqmjJlCrZHtbW1Qa3rtm3bDh48WFpaOmbMmG3btuXm5ioqKqKuUaiOtLe3X7p0KbbC4aSPkJiYGFhMbK/7xg7JZrPJZPKpU6eoVGpQUJCWlhbWMbt9+3YPD4+6ujroWUVfZ0V9mKKiop/1YcJCYXtC37sxh8OJjo6urq4uLy+HM/FCoLNXSUlpwYIF0OFJJpOhw9PX15dMJjs6OqK3iFgsVmhoaGlpaU5OzsiRIwEAZWVl6enp8NHm48eP//jjDzKZbGBgAG8jqaurv3jx4tixY1OmTGEwGIWFhefOnQO/Hv3z4sy/CZzBD529AcLPXIcNj92cH19qd/zq9zOvXLmCE1S+e/fOw8MDayvlqWoUAC4ktkJ4RnLy5Ek4ewO3wJAfOJEgv62+UST4dW2NGin7o62/GsGNSKVS+XUhFovV1NQkIGYajcZischkMs9fuTsY8jkBKfqTgKrDqiMFx8Ydz7d0SCaTyWazWSwWv6b/IsXuP+LD7KNNVABUKpXbkUuhUPpubf3XQThcfjWYTCb8sAxd8+jRo6dPnz579uzHWPgyMzN1dXXhOXh9fT3Oz0nwb4e7g/3kEB2SgBti5Pv1gZ8GqqqqwtfZCQgICH5ziJGPgICAgOD34pd9t5OAgICAgIAnv+a7nRDBgsHg4ODS0tLdu3f3PcKvsFmissGcnJyvlgf23RxIoVC8vb0FfwbLDU/Nqaqq6p07dyZOnIjKSiB9FwYCANhstp+fX1tbm7KyckNDw5gxY9LS0uzs7GxsbPqevYiIiJcvXxoZGZFIpIkTJ/K0Wnxfzp8/b29vP2zYsO7u7sjISPiCKPY9Qxxox1i6dOnZs2enT5/elwLm5+ffuHED62Vdu3at4E2ioqKCgoJGjBghJSUlIyPj5OQkLCzMHc/48eOh+lVBQWHo0KEzZ8784iroN1CdpoyMDLqyrq4OvvbM81G0AGMnv10jJibGxMSEW0kKKS8vb2lpQT966+3tvX37tpCQUEtLi6Ojo6KiorCwME5/81kQPqJUfnR2dnJ3la+uB36pZ2VlaWtrP3/+PDc3183NzcLCori4+MaNG/b29lAq1B9V8a/gV77mu3z58pgxYwAAr1+/5v5VR0cnOzv7iyIUEhLS0tLCfp3zWSwtLePi4gAApqamt2/f/qLkUOTk5CgUCtT6CUZZWRn3qVlfEBMT09XVTUtLc3V1Xb16tZOTU3d3N4VCefbsGfenVyEhIVQq9dSpU9g3+/mxbNkyNpu9c+dONzc3Y2Pj6upqKpVKoVC+KHtTpky5du3ahg0bli1bZmNjU1payi8kh8PBfff2dbi7u8OXyI8cOQI/ixYs7UU7hpycHJPJ7GMBhw8fnpycPHLkSFdXV09Pz758LDx58uS7d+/CV/+FhYUtLS2bm5u54zE2Nk5PT7e0tNywYcPRo0fv3LnTx4L3B+/evaPT6ei/Ojo6xcXF8IM2FC0trezsbH5PXtCdiBueu4afn5+xsbG6ujqNRqPRaNyvtNy5c+fkyZPov9euXeNwOB4eHtbW1iUlJdra2jExMdw9XzAkEklGRgZOpNcXeHaVr64Hnqmnpqa2t7erqqoOHz48JiYGvpRkYGAgJyeHuvT6oyr+Ffyy13yoYDAhIWHr1q03b960tbXF+jOh049Go927d8/Ozs7IyIife/BbbJbo4UyAPBDrYPzw4UNfzIH8BJjl5eXJycnLly/n5wycMGECavmDpg8IT80p97neFwkDU1JS0tLS0E/p582bV1BQUF1d3dzcfPTo0UmTJo0fPx51MK5bt45CofAsFDz/FRISUlJSGjJkSGZmZkJCAirVxLbpqVOnUlNTtbS00O/namtrKysrGxoaFixYEBERISUllZCQYG5u7uDggE1aVlY2Ly+vqKhIQkJixowZsbGxBgYGIiIikZGRxsbGsbGx3d3dnp6eOL0kVtKIdgy4kJmZGRUV5ebmxuFwXrx4gVOKcFd7Zmams7OzAHcoDC8kJCQqKgo3cXJySkxMPHbsmI+PD0+/q5CQkJCQ0JgxY/Lz87GJtrW1+fr6uri4ZGdnt7a2rly5Mjo62tDQUF1dHVshaAWinUdPTw/2eVdX1/T0dKwMVlhYmKec1s7ODvV2oj4tWEUcDgf1x2ppaQkLCyclJcXGxq5YsUJfXx/dv1xdXbmNnTCHPHeNxsbGwsJCV1dXwGfScyqVOmjQoGfPnlVUVAwZMgQAoKSk5OPj4+TkNHnyZOilmzdv3l9//XXhwgV0q68Qpebk5GRkZCxduvTRo0dmZmby8vK4rdByYTsSrh6wRUYPJrh64Kdp9fX19fPzg50Be7dJQUHhW6ri1+CXveZDBYPm5uZycnK2trY4fyYMlpSUZGFhYWRkJMA9yB3597VZYh2MfTQH8hNgwowBPs7AwYMH4yx/2Dj7ojn9ImEgzhIJ/iuKbGpq2rx585YtWwDGwfjnn3/yKxTcNjQ0dNeuXbq6utOnT0elmrg2NTU11dfXx342fvz4cRMTExkZGRKJFBkZGRUVtXbtWi8vr9raWmzSVVVVjx49WrRoUUpKSkdHx/Pnz8vLy/X09GRlZe3t7e3t7R8/foyrVZykEYe5ufnDhw8BACQSCT3Q8OTFixfe3t6urq7wZilPdyi/bcePH5+RkYHGg/O7pqWlnT17NiMjw9PTE7uVoqJiWlpaR0eHoaFheHi4mJgYmUzW0dHh2W/RzqOtrY32+YCAAJwMlp+c1sDAAHo7cRpJAADWHwvXsFisCRMmTJs2jUajYdNCN8HmkN+uERUVJcCeAwAICgpauHChh4cHKnxfsmTJ5MmTTU1N/fz84GmutLQ07nThK0SpgwcP9vX1lZCQaGtrS09Px22FBuPuSGg9sFgsno2CXckv9bq6Ouyl9nesil+DX3bkQwWDKDh/JgCgoKDg2bNn8I6oAPdgf9sssQ7GPpoD+Qkw0WA8nYG6urpYy19MTAw2zj5qTlE+KwzkZ4k0MTFRUFCAwkZsXfErFNxq+PDhe/fuDQ4OVlRURKWa3G2KY/bs2VOmTJGSkhIWFlZVVTUzMxswYICjo2NMTAw26VevXsFp8I4dO6aqqqqqqoqNBD1fxtYqTtKIA0rur1y5EhkZyc+8DJk3b962bduuXr1aVVXFzx3Kb9vGxka0xbn9roMGDVq+fHl8fDy31GrdunX+/v4NDQ2VlZVNTU2ioqLQD87db9HOQ6PRsH0eJ4Ptu5wWBeuPhWtsbW2nTZsmISERFxeHTQvdBJtDfrsG946PIyYmJjAwkMFg3L9/H5rYREREzpw58+LFCy8vr5cvX8JgVCoVTooC+QpRKu42AG4rNBh3R0Lr4dOnTzwbBbuSX+rYehASEsKeH6B3d76uKn4NftmRDxUMAgDgQwVufya8OHjw4AH4Xz0jzj3Y3zbLr3Mw4oDmQDQMT2cg4G/567vmFE3ls8LA6dOn19fXFxQUoGsaGxu5S8GzrrCFgss6OjrcF0/cbYp7gDRixIjAwMDt27fD2oBAfyk2adTh2d3dXV9fz6+qsbWKkzRys3z58ufPn/f09EhKSvIMADDVbmNjo6GhIcATi4LmjUaj+fv7b9iwAfDyuyIIoq6uzj1dA2TmzJnx8fFFRUU7d+5cvXo1+tRHQFvg+jy6HlamYDktrlEg/PyxCgoKZmZmPNPC5pDfroHd8ZH/VZImJSVFR0evW7du/fr1Xl5e9vb2t27dAgCcOHECADBhwoQ9e/bAbgAAEBcXxzbcV+ykJBIJFhz6aLhrAMKvIykoKMAnlDwb5YvqYdCgQXCeRTRjAICvropfg1925IOCQQCAtLS0kJDQnTt3du3alZOTA/2Z27ZtS0lJqamp2bNnz549e/z8/FatWuXh4bF+/fr09HToUM/NzYVh2tvb4XMOERERnM1y7969ampqAAA3NzdVVVWsPHDHjh00Gi0uLg7KBgEAqDxQS0sL+2QbOhgrKioiIyNTUlJw5sBLly6dOHECmgPRTXgKMHNzc9+9e1dXV1dTUwOdgdu3b+/q6kKdgQAAaPkLCQlRV1dHRzis5hRNoqGhIT8/Pykpqbe3F10JhYGJiYldXV3YqKysrLKzsxMSErA3WFRUVF68eLFr164rV648efIEPnLIy8tLTk6GZczIyMDWFb9CwVvEqFe3vr4eSjUBALg2NTY2jo+Pj42NRfNw6dKlhoYGJycnOAa8ffs2PDxcUVHR1tYWm7SLi0tOTo6Tk9O1a9ckJCRycnKSk5PJZHJtbW16enpqamptbW1DQwO2Vul0OippjIqKgh2jrq4uOzs7LS0NQRAxMTFnZ2d4y93X1xe+Z4gFelnDw8NhpSkpKUGjo5+fH9Ydit3k9evXVCr1+vXrt27d2rt3799//z1hwgTU73r37t3Tp09v3ry5sbGxpKQkLCwM3vncvn37wYMHsfEICQk5OjpaW1svWLBAWVkZvqyErRA0JNp5lJSUcH0elcF6enriGiIpKQnaVgEA0NvZ0dEBI0R1miwW6+HDh1B5Wl5ebmxs/Pjx4xs3buzZs0dDQwObFroTYXPIb9eYNGlSRUUFAKCxsTExMTEuLu7mzZvnzp2bNGmSmJjYnj170PMnbW3t06dPl5SUVFdXHzhwICQkJCYmZt26dQAAGo2Gc2Rjd9Ls7GxUlJqfn79gwYKCgoL9+/dHR0cXFxfDmxkAACkpKQ0NjQMHDhQXF+fn5xcVFWG3amtrg12FRqNhbZ/YeoCzG3IfTLArFy9ezDN1TU1NdLBUVlZet26dm5vbkydPzp49O3HiRDKZ/NVV8YvwvXVoPxFQMIggCIPBQGcuFuBU/KwtEMd3lAcKcDB+qTkQFyc33275+9KoKBRKb28vv1+/fcJubJviEmKz2aiw9O+//3706BE2MC7pvuQEW6s4SSM3V69ehQt9F7F+X6MjpLu7OyQkBLcSzQ+2z3+2BtAA3DJYhP/OxW+3wnX7np4euIMIyAx2Jb9d4/Hjx9xTyQsAxkkmk9Go7t+/X1NTIzi3OPiJUhkMxmcbFNeRvrQe+KWelZX1/v179N+enh5400UAfayKX4Bf2eHyUwkGCXngPwuCIB4eHkOHDt21a9cPmHIlPj7++PHjW7duRW+A/4M0NDR890+yfrAM9ktJTk62srL6um0/ffpEpVJxn7H+SykoKNDS0sJNL9x3fqWqwPErj3wEBCh0Oh3ec9bR0eH5svv3hcPhdHR0oJNf/3oQMliCfzXEyEdAQEBA8Hvxy77hQkBAQEBAwBNi5PsfGAzGsWPHEhIS/umM9BXcN3mCef/+/R9//MHv14qKipUrVwIAYmNjsZ8P/zxs2LDhs7cocnJysC+jQshk8qNHj/qyOeRn7gadnZ27du36+PEjbuWBAwf6kuHz58+jn0gCAJ4+fXrjxg3uYFlZWbt27fr23P4C8KzwlJSUe/funT9/XvC2DAbj8uXLnp6ePEWJ0HWAkpqaev78+cuXL6PflfOkvLw8NTUVLnM4nDt37ly7dq3vHUAAgo8Pvxi/y8gH1Z2f9TqKiYl9hVgSx/eyR34W6Cfse3gB3j8AwJAhQz58+AAAmDBhwg9zHPNUqvILc+zYsc++nMLTj3rs2LHx48efOnXqs5vDtvsu3aCf4Gmq7LsvFFWSwlpVVFQsLCzkDmZiYhIVFfWdsvz9aWpqwn7J0E+bQHhW+LFjx1atWuXu7i54W2jETU5Ohp4ELM3NzZs3b87JyYH/IgiycuXKbdu2rV+/Hn6VwQ+safPrhLH8QI8PP+wI9g/yy3o7sZ69tLQ0qO5MTEyEXkc1NTX0VzExMdTZOHfuXGFhYVQsaWVl9erVKwUFhczMzEGDBhkaGr548WLp0qW6urpYr2BycjJWtinAHuns7IxzP/K0HS5YsIBEIqHx0+n01NTUuro6R0dH1HCP9ROmp6enpaUZGRmNHTv23r17Ojo60Nowe/bslpaWyMhIYWHh+fPnQ5cEg8G4e/fulClTaDRadHT0H3/8wWazQ0ND4evXAIAPHz50dXVNnjz5xYsXKioqiYmJsCo4HE5CQkJWVpaenp6ZmRn8CKyjo+PZs2cGBgZv3rxZuHDhgAED7ty5s3379sjISCaT6eDggIsEW/yKigrYLu3t7dLS0sOHD3/48OGqVasUFRVRo2NlZSUMM2LEiKCgoFWrVomJiWErLSkpCVv53H7Ujx8/pqSkREVFzZo1i8FgCBZjom2H7QY4vyhWEZKXl9fU1EQikSZPnozaFy0tLfvSbdBhODU19dOnTxwOp7KycvXq1bdv37aysrK1tQUAYEsKuCSuqGlWX18fNYZAcnNzo6KiVq1a9fTpUx0dnVmzZvn7+zs5OUElaUtLC6xVYWFhFot15coVOTm5FStWoJvDr6pDQ0OLioo2bNhQXFyM1ZBCtamysnJmZuaWLVuUlZXRHmhnZ4caXKdOndrU1IR1e6JVZGJigla+m5sb2tyysrLPnj0zNzd/9erV3LlzW1pacnJyNmzYICMjg03Czc3N3NxcS0urt7cX1ueKFSvevHmDmmOTk5MpFIqBgQF8L5HNZsNNJCQkXr58ifbPiRMn4gy9uAbiqQaNi4vLz88PCQmZNWvWZ1WrEhISPN+oCgoKOnHihI+Pz927dwEAJBKJxWJdv37dw8Njx44d/A5r3KZNnCkGpaurC83MkiVLsL03MTERVk5xcTF2v0M/70P3AllZWfTAxS9L/1J+2Ws+rGcPVXeiXkfsr1hnI7QeoGJJISGhioqKJ0+erF69+vDhw21tbXPnzj106BDW2xkQEICTbQqwR4L/dT/ysx3Kyclh4w8ICKBSqcOHD0c/BwYYP2FxcXFiYuLixYvd3Nza29sNDQ3v3btXUFAwc+ZMNpu9a9cuZ2dn6KaCG4qJiRUVFRUXF48YMQJ+Xv3nn38aGBjMnTsXfp6Vk5Pz5s0b6IYIDw/38PDYvHkzACA4OJhMJo8bN+7s2bPoACwmJnb16tWOjo758+e7uLioqqo+efKEwWBoamoGBARwR4ItPtouCIK8fv1aXV09Nja2paUFa3REwwgLC585c4bJZOIqDVf5gMuPamJioqKiMnXqVHV19c+KMbFtx88viu1mR44cgecQWPtiH7sNGomYmNiZM2cmTJhQWlp6584dT09POHkWrqQ4UyXWNMvd/w0MDG7fvi0nJwcAKC8vFxISYrFY4uLiUEmK1ioAoKqqatWqVfCzLWwMbW1tZmZmPT09wcHBOA2ppKSkl5eXkZGRurr67t27sT3w06dPqMG1uroa6/bEVpGwsDBa+djmlpWVffXqVV5enpub26JFi0aMGCEuLh4YGIhNgkwmGxsbW1lZKSoqYusTTbe2tjYkJGTGjBnQuQwAEBYWhpsYGRlh+yfO0ItrIH5qUDs7OwkJCUdHR2Fh4S9VrULYbDaLxXJzc3v79i1q+AsMDDx79uycOXP4WYEAL9MmP7A1jO291dXVaOXg9jt0W3QvwB64fjF+2ZEP69kT/CvW2QiPFFixJBQoy8vLDxgwQFdXd8iQISUlJThvJ062yZ0cao8E/+t+5Gc7rKmpwcZvY2Ozd+/ehIQE7MRAqJcvMjISQZCCgoL79+9LS0tPnjxZREREXV1dSEiooKBAQkJCRERk8eLF2Hl2cOeJoaGhhoaGEhISKioqJBIJnXVPTk5u2LBhSkpKsCoaGxuLi4uNjY1HjhyJfiEkKSkpLy8P9ZIKCgrV1dUwTjQJXCTY4vPLD7fREcYDE8VVGnfl8/OjlpeXf5EYk59fFBvGwMBg2rRpJiYmOPtiX7oNtmiqqqoaGhp6enpaWlqSkpJQqoArKc5UiTXNcmdeXFx8zpw5ERERIiIiwcHBZWVlRkZG4uLiOCUpAEBPT09GRsbAwKCyshK7XlFRUVtb29jYuKKiAqchVVdXV1JS0tfXX7lyZVxcHLYHysvLowZXnNsTW0VSUlJo5WObGypzhw8fPmjQICkpKU1NTVh1uE4Oc4itT2trazRdLS2trKys7du3W1tbc9cMtn/iDL24BuKnBv3qHoUSEhLS2Njo5+enp6d39epVuNLc3DwnJ0dLS8ve3p7fhtymTX5gaxjbe7GVw+96EQV74PrF+GVHPpxnDzUHwgXsr310NkKgSZKfwxBFgD0S637kZzvExS8sLBwfH//+/Xt0xh+A8fLp6OhQKBQ7OztLS8v29vbGxsZJkyadPXu2o6NDUlLy3bt3HA4HAIB9rwEaBREEgbc3u7u74fk+LDu/GnB1dYWmJX4z35JIJDU1NRg5P1chTigKKwpnOMQZHdHKhP8KVkQCXn5UaG1QUVHpixgTBuDOPE994po1a3bv3r148WLBGs8+dhtcnnElxRkaBZhmIe7u7leuXJGVldXX1798+TIcBtBy8SsmmjpuDU8NKZ1O19HRwfVA7tig25NfFfETeEJg1XEnwWaz+dVnQ0NDeHi4rq4ubq5KbGfj2T+59zt+1lz4b196FG5D+PwsPz//yJEj69evv3379vXr1+l0elxcXHJysrS09I0bNyorK9va2rij4mna7CNo78VWjgCzKFzPU3v7a/DLjnxYzx4AAKo7Ua8j9lcHBwfU2aikpIQVS6alpWVmZhYWFkJXZGJiYkZGRmNjI4vFwnoFs7KysLJNwfZIrPuRn+0Q50gMCQl59+7dpEmTsOeSqJ9w+vTpoaGhzs7O8GnNrl27Vq5c6ejo6ObmpqamNnHiRCsrq3379ikqKqLev+nTp1+4cOH69eskEqm0tPTQoUNr1qy5cOECnU5PS0tLSEjIy8vr6OjIzc3Ny8urrKysqakpKioKDAzMyMgICAi4efMmi8XC1vbTp08fP37s4eEhKSnp4OCwZcuWmJgYqDzFRYItPqpUHTduXFxc3Pnz52k0WlZWFtboSCaTYZjy8vLa2tqkpCRcpeEqH/zXj4rmrbq6uqys7PXr14qKip8VY8K2e/PmjQC/KDb8/v375eXlFy9ejNV4vnnzpi/dBo0kLS2trq6OTCZDVSx0hObm5uJKijNVurq6oqZZNpuN+kLRaIcNG6asrOzk5OTu7j5o0CAAQFtbG1SSojWfnp6el5fX0NCQl5cH32+CpKenQwFsUlJSTk4Oi8XCakgBAC0tLcHBwbdv3z59+jS2B6qpqWENrli3J7aKAgIC0MrHNndmZmZZWVleXl5ubm5NTU15eXlGRkZpaemECRPQJHR0dEaNGuXn59fc3IzWp5ycHJpuTU3NuXPnxowZY2dnh5YIblJcXIztn7W1tVhDL4lEwjYQPzVoQkJCdXV1VlbWZ1Wr0IhbUVFx7dq1y5cvOzs719fX37p1q6ysDJ50iouLIwiyb9++wYMH79y58+HDh6dOnVq5cqWioiLO9crTtFlcXMwtjIUBsHpbbO/FVg5uv0OPD+gRDKe9/aX4jia0nw2sZw9Vd6JeR5yF7yvskQI2EWCPRLhMj/xsh2j8bDabTqdz2/+wfkIBmeH5EzzLQ+Ok0Wjw2QO/SBAEuXbtWmNjY2lp6du3b2NjY9H1EydOrKmpwRaZwWCw2WzUlYoDW3y0XZhMJofDQfPDs+2wCPCvon5UnnxWjMlPMcqzGrEt+1mNp4B4BIAtKc5U+VnTLE85J7qGXwPxA9WQIghiaGjY09ODjYG7XNxuT35VJFiJyTMJtJm4t2Kz2Ww2m3s9uong/tkXNSgu2m9XrXI4HDqd3tPT09raiib9FbZefmAPJtjKwe13KLCucAeuXwnC4fLv5lv8hF/KtGnTRo8eraysrKmpuXDhQjhrXW9v75gxY27cuAGfiPzjYP2oBN8LnIa0vr7e3Ny8tLQUPhfnx0/u9iT4nSFGPoIvoKGhQVpaGnu8a21tbW5uJpFIw4YN+wczRtCv4DSkUGQsLS2NvgzFE8LtSfDTQox8BAQEBAS/F7/sGy4EBAQEBAQ8IUa+/+endRV+qXYSwm3hwzkbIdXV1a6urjg5U//RR28khFslymKxAgMDN23aVFVVBRd+8PvWPC2O3xe01AQEBP0EMfIBAMC7d+/odPpP6yrsu3YSC2rhQ42FqLMRi46OTnFxMe4DxP6jj95ICLdKNCQkhEqlnjp16sOHD3AB+3V/3+mLL5QnPC2O3xe01AQEBP3Eb+HtbGxsjImJkZeXl5eXHzt2LE7A2NjYuGrVqlOnTi1YsABgXIXS0tKoKnDy5MlozKh7UFZWNiQkZN26dQiCvHr1avXq1TzthV5eXnFxcVBLqKWl1dnZmZSUVFxcbGJiYmVl1drayi1yzM3NLSoqYjAYLi4uxcXFqHYSvlrCT/P48uVLbgsfaixUU1P78OGDgYGBhIQETrqIGhywHki4LVqKOXPmZGVlLV269NGjR2ZmZvLy8jExMbq6ujk5ORs3bpSUlERlgNCU2NbW5uvr6+Likp2d3draunLlyujo6KFDhxYWFvbFG8lTJWpqahoaGqqgoDBo0KCwsDAFBQVzc3NTU1M02xoaGjxdhU1NTdjcZmRkwNShvqu2thbtHurq6i0tLRQKxcnJCWcEBVzaTKxUU4B7E4KtH2w9Y0WRHA4HW2oajcbtayUgIPh2ftlrPqyZU1RU1MvLa9CgQYMHD+YWMKqrq6upqc2dO1dISAjrKsSqAuvq6mBgrHvQxMTk+fPnNBqNRCLJycnxsxcmJiaiWkIAwF9//TVq1Cg6nQ6vL7lFjpWVlefOnVu8eDGLxfL09ES1kwMGDIAB+GkeEV4WPtRYqKKiAp2NOOkitsZwHkgYISzFkCFDfH19JSQk2tra0tPTtbS0/v77b319/ba2thcvXmBlgHBbRUXFtLS0jo4OQ0PD8PBwMTExMpk8cODAPnojeapE1dXVDQ0NzczMJk2aBBdMTU2x2ebnKsTlFps6AADbPW7cuGFra/vgwYPW1lacERRnccRJNfk1CgRbP9gM40SR2FIDAHj6WgkICL6dX3bkw5o5NTQ0VFRURo0apaury0/ACMG6CnmqAnF6xs2bN9+9exeKYPjZC3EWStR+OW7cuPLycm6R49u3b6GUa+bMmaGhodyZ5Kd5FGzhQ52NOOkiGoDbAwn9n7AUuMglJCTk5OR0dHSg8JCnKXHdunX+/v4NDQ2VlZVNTU2ioqKSkpJ99EbyU4lyg802P1chLre4GLDdY8+ePZGRkd3d3Q0NDTgjKM7iiJNq8msUmAS2frAZxokisaUGAPD0tRIQEHw7v+zIh/N2YuEpYITWD+wanjZCnHvQ2dk5JCSEw+GIiYnxsxfitIT79+9PSEhQVlZ2cHDgKR7E2hrhbAxQOiC4vAiCfNbCh1sJpYvov4I9kAIiB3xMiTNnzoyPjy8qKtq5c+fq1asnTZoE+uyN5KcSRasCXRCQbZ4NjU2de6WLi8u0adOUlJS4M4azOH5WH4rNNrZ+sBlWUlLCdgBcqXn6WgkICL6dX3bkw5o54+LiamtrCwoKwP8q7NDAo0ePPn78eGxsLNZVOHnyZKwqEIbEugejo6MlJCSmTZsGL3T42QuxWsLy8nJfX9+cnBx/f/+QkBCcnxMmMWfOHC0traCgoOvXr3t7e6PaSfQwyk/zyM/CB42F6enp0NkI/le6WFVVVV1dnZKSgvVAwmtcGo2GlkJKSkpDQ+PAgQPFxcX5+flFRUUw8pSUlPz8/KqqKm5TopCQkKOjo7W19YIFC5SVlbW0tPrujeSpEq2vr09PT09MTEQXurq6sNnGihOxDV1cXIzNrYSEBEwdrU/YPdhsNplMPnXqFJVKDQoKwhlBcRZHnFSTX6PAJLCyRGyGtbW1sR0AW+qMjAysrxVncSQgIPgWfuUv2Xt7e8XFxel0Om5mSO41AAAmkykqKsodCXdgGC38CwBgsVjYu4s8I0dzIiwsfPPmzYULF1IolNevX2/cuJFnDgEAPT09XzE5CIvFEhYWZrPZuBueDAYDe+ljZGSUmZkpISGBe1kUygx51gOEyWSSSCTuu6lwOggGg4ErCJvNhhfH3NXLZDJFREQEvK1Kp9PFxMQQBBEwXZngbPOsWMGps1gsOKkvgiDcxYRqUxERETRLfWwmXP3gMozNJ7bUUKgoLCwsIiICL1I/WxUEBAR94Vce+X5CyGTy3LlzFyxYICwsbGpqOmPGjB+fhz5KFwkICAh+VYiR70eDIEhNTY2Ghga8ZPzx9FG6SEBAQPCrQox8BAQEBAS/F7/sGy4EBAQEBAQ8IUY+AgICAoLfC2LkIyAgICD4vSBGPgICAgKC3wti5CMgICAg+L0gRj4CAgICgt8LYuQjICAgIPi9IEY+AgICAoLfC2LkIyAgICD4vSBGPgICAgKC3wti5CMgICAg+L0gRj4CAgICgt8LYuQjICAgIPi9IEY+AgICAoLfC2LkIyAgICD4vSBGPgICAgKC3wti5CMgICAg+L0gRj4CAgICgt8LYuQjICAgIPi9EPmnM/CzQCKR/uksEPybQBDkn84CAQHBV0Jc8xEQEBAQ/F6QiFNXAgICAoLfCuKaj4Dg56W3t/f9+/fPnz8vKirKzc0FAHz48KG6uvorompoaIALVVXtz58XvX9f09DQXV7ehgvG4SCJibVkcvc35vyz5OU1ZWeT+xi4ubknOLiwtLQV/osgSGxsVVJSLfbE/ePHZmz4ly+LKyraAAA0Gq29vf0rcshgMFJTU2G1AwCYTGZqampqamp9ff1XxIbCZrNfvHjxLTHwJD8//+PHj+i/DAYjLi4ONnpYWBidTscG5nA4iYmJZPJn6p/BYERFRXV2dn733P7jECMfAcFPyocPH2bMmNHR0TF69OjExMRZs2YBACorK+EBi81mU6nUvsTD4XCuXr0qLS0NAPD0jPD2TjEyUpGTE3dzexEaWoILTKHQNm8Or67u+Pb8d3b2Cvjp4sXUsLDSvsRTXt62d290TEzluHG3YmOrAACenq9VVaXExUV27XoLAEAQ5M2bcjOzazB8RUXbvn0xU6cO8fX9EBlZLikpGR8fX1tb+6X5FxMT6+npGTlyZFBQEABAVFSUTqffuHFDQ0PjS6PC0tvbm5mZ+S0xcNPT03PixIl3796ha5qbm7dt21ZRUQEAyM3NxXUVCoWyefPmz55CNTQ0rFmzpq0Nf3r0C0C84fJl0Ol0CQmJfzoXBJ+BzWYDAISFhf/pjHw9VCp18eLF9+7dmzBhAgBgzZo18MJl0aJFMMC+ffuWL19uYmLy2aju3LljY2MjJyfn6/vh48fm6OiVcH1wsPP9+zm4wKqqUtract+e/4iIsqqq9g0bRvP8VU5OfPhwte5uRl+iqqhou3FjLgBATU06JKRIS0u2qKhl+HA1AMDmzeG1tZ0DB8pNnz5UXl4chr96Nd3aeqCUlKirq9nq1SEzZgydO3furl27zp49+6WlGDVqlL29/Zo1a4yMjIyNjUeNGpWbmysk9E0XDFJSUocPH/6WGHjGaWxsjF2jpaWlo6MDl/fs2YMLr6qqqq2t/dlodXR01NTUvlcmfyqIa77/sGLFCm9v77Fjxx47dmzbtm3R0dHcYdLT0+fMmfPVSTCZTC8vr2nTpj19+vT06dN+fn7cD1kZjD4dC74o5HentLR0xIgRJ06cOHHixJYtW+Aw810Cfzt0On3r1q13796dPn26gLs0LBbr2LFj48ePv3Pnzpo1axISEvo1V19BSkpKV1cXHPYgW7Zs6ezs3Lx588WLFxsbG4ODgyMiIurr69+8eXPr1i1HR8dXr17xjMrPz8/U1BQA8OhR3vz5Ruh6KSnR1avN6XTW3r0x/v65Hh6h8fH/uQiIjCzz8Ah1dg4EAHz61HnoUNzz50Xr14ey2ciePdGuri8OH46bMOFuW9v/30Oj0Vg+PimnTyc6Oj6h0Vg3b2akp9dh72dGRVXcuJFx4kTC7dtZX1QV06YNQTNsbq754UP9kCGKcI2urkJaWh1cRl/Pbm2l1dZ2AAC0tGQLCprhT+Li4lFRUV+ULmTmzJleXl7z5s3r6OgQEhKCw15FRcWePXsePny4atWqT58+cW9169atqVOnHjlyxNzc3NfXNyMjY9iwYbdu3XJycoqPj4fNQafT9+7d6+/v7+HhER8fjw3z2QibmpqsrKw+fvyYm5urq6sLu3paWtqKFStGjBiBve1ZVVU1derUjIwMAMClS5fCw8OPHj0Kf4qMjPTw8HB2dgYAFBcXX7t2bePGjQcPHgQAxMfHX7p06datW5WVlV9RaT8/xDXff9i9e/fw4cPDw8Pt7e1NTU0LCwu7urokJSVFREQAAN3d3TIyMqNGjeJwOBwOh8lkCgsLw5/6jqio6Lhx4ygUCuxqa9eupVAoO3bsQBCko6NDQUGBTCZfuHDhyJEjIiIi3KnDSDo7O+Xk5LAhv3dNfB59fX11dfXZs2ePHDly/Pjxubm5I0aMOHXqlJKS0oYNGxoaGp48eTJkyBAymezu7s4d2NjY+K+//jI2NlZXV29sbFy5cuX+/fvh3jt79mwymVxWVpaZmTlx4sT6+vodO3Z8Ud4SEhIYDIa7u/uECRMoFIqcnFxKSkpubq68vHx8fPyFCxfQfNrZ2X348GH16tUDBw48c+aMjY3Nhw8fLl26tGLFivz8/IULF8bGxpaWlmpra6urq9vY2Hh7e4uLi8vKytbV1Z05c6af6haloqJCUVERu0ZMTExMTGzAgAFMJlNdXX3QoEGzZs1SUlI6efLkmjVr9PT0/v7777lz5+LiaWhoEBUV/W+cbYqKEv8bp/DZs0mqqlIrVphaWGhOnXq/rm47AMDGZtDkybojRvjW13f19DBtbAbZ2up4ekYICQF9faXeXtaBA3Z1dV3v3lU5ORnCqC5cSGGxOLq6ivn5TW/elFtYDFBQkDAz+/8bgzQaa/Zs/Y6O3s2bw9esMf/SCmGxODU1HX/8Me7ixVQpqf+USEpKlELpwYVcsMB4797oZctM4+Orhw79Tx0OHTo0PT196tSpX5ouAMDLyyszM3P58uWPHj2Cazw9PY8ePWpmZtbR0bFr167Hjx/jNrG0tLx169b+/ftXrFhhYmLS3t7e1tY2f/58d3d3AAAcLC9fvqyqqrpixQoLC4upU6fW1dVhwwiO0N3dXUFBgcVijRw5Evz3Gxs9Pb39+/efO3du//79wcHBcMPBgwfD0bqpqenevXuxsbGDBw+GP9nY2EyePHnEiBH19fV79uxxcnIyMzPbsmXLpk2bNm3alJOTIyQk5OPj8xU19vNDXPP9h+HDh6PLJBLp1atXJSUlbm5uAICLFy9WVlbevHkT/lpeXn727Nmurq6vSAX71aCHh8eDBw9aW1v37dsXGBj4/PnzsrKyjIyM2traU6dOcafe1tbm7+/v6+t7+PBhNOQ3lfkbIJFIiYmJV69e1dTUNDU1FRERkZeXHzhwIABg/fr1bm5ujo6OUlJSsNJwgcXFxUkk0ujRo+3t7SdNmiQqKioiIjJ69Ojx48dfv35dT09v/fr1ra2tDg4OPE9+BWNlZZWRkbF27dqBAwfq6up2dHR4enquW7fO2dnZ2dkZm08SidTY2BgSEuLv77927VoAgKGhYWdn59SpU5csWaKoqKipqSkvL+/m5nby5ElRUdEdO3YoKSnp6+tv3Ljxe1cnD3R1daurq1ksFm497hZuWVnZ0KFDly1bdu7cOXhej6OtrU1MTOy/cSqUlbXiAmRlkSUkRAAARkYqvb1sCoUGAJCWFgMAKChItLXRhwxRrK3teP++GgDA4QBhYSFxcREAgJycOPZJ3ocP9W5u5suWjSgv93R0NODOibX1wIiIspISCovF+bK6AAAA4OeX7eVlIyIipKOj0NX1n3S7uhhDhyrhQs6Zo3/njmNSUi2F0jNhwn/u+ElKSjY1NX1FupC7d+/W1NQcOnQI/puVlQWfetjY2OTl5XGHFxYWlpWVBQAMHjxYSUmps7NTRERESUkJ/RUbiZGRUW9vL4VCwYb5bITcYeC2U6dOxR0ZYHJqamouLi7Gxsbo4z346FdBQaGtra2+vn7FihXr1q3r7e2FfQaOl5KSkl9aV/8KiJGPN+7u7kwms7q6mkajVVVVPX/+HF6oNTY2BgUF7d27F3c+/hVISkqSSCQFBYVVq1YBAFJSUgYOHKiurq6rq8sz9QcPHlCpVBMTEyUlJTTkdyjq12JgYDB69OjGxsaioiIAAIlEguN6dXW1goICAGDkyJHoXUTuwPHx8d7e3nAXhacaDx482LRpE/pwAgAwZMiQL82VtLR0YmKiurr6mDFjOjs7s7Oz4UkxAMDW1habTwCAgoKCmZmZoaEhvE9IIpHIZPLLly+9vb2lpaVJJFJ+fv727dv37dsnJyeHtviPqfaJEyfq6ureuHEDXVNRUcFkMhEEgSf4JBKJyWSqqqpGRkbCdxDS09O5bybr6emhZ2nr14++ezebSmXCfxEEKSmhmJioJiTUAADodLaysqSKihSaBFy4eTOzqYk6ebIum83B3p/H3atXU5N+9CgPAECns7KzySQSYDL/JzMbN4aNHKlubKyK25DN/s+/vb1834h5/bps6tQhAwbI1tV1jR49oLKyHa6vq+u0sNDkDj9qlKa9/bDo6MrDhyfBNW1tbfr6+uC/z4D7CJvNhuGlpaVfvHjh5+cH15uYmMC+3dLSYmFhISAGOp0uJyenrKyMXQlrAI2ETqcrKyurqKj0JUtohMLCwrDGenp6sFUKb4TikkMQ5NOnT+vWrYuIiNi8eTPyX9Bfu7q60tPTAQBVVVUsFis/Px92GwRBOJyvOVP5ySFGvv8B7UBHjhzR0dFRUlLicDh79+5VVlaGtyDU1NSys7Ozsr7sQQVPAgMDlyxZUlpa+uDBg9GjR8PuBTPAM3VxcXF5eXl7e/sFCxaAn8AhoqKiYmlpOX78+GfPnmHXy8rKwoNFTU0NOnRxB7a1td22bdugQYPgv1OmTKmsrIQnod/CixcvREREjh49amVl9ebNm4EDB6Jv83MjLi6uo6Ozbdu2u3fvMplMAICGhoaDg8POnTvhOfXw4cM5HM4/cktZRETk9evXERERO3fuvHfv3rVr12pqarq6upKTk5OTkzs7O62srI4dO0ahUJYuXWpubu7u7k6n07lf6hETExsyZAh8qX3FCtNdu6ydnJ5cvZp+507W9esZmpqynp5j6XTWs2cFN29m3Lgxt6Ghu6SEEh1dWVnZXlHRlpRUq6gocf9+jrd3irKy1I0bGSkpn/Lzm+rruzIyGtLS6tB+uG2b1bVrH2bOfHDkSLypqfqYMVqPH+e/fl2G5kRFRWr//tiYmMqKirY3b8qTk2s/fKhvb6evXfvyyZP87u5uRUXF5ORk7qoICip0cwuZNeuhkdEVV9cXOjryc+cOe/Qoz88ve82aUQoKEgCAqKgKCqUnNLSEw0FYLE5kZPnBg7GXL89G7+5+/PgRPqQ3MDCoq6vrSxP09vZevnw5JCSkqqoKAKCrq/vkyRNYwz4+PuHh4W/evImPjz9+/DjPzcvKyp4+fXrhwoWrV69mZma2trbCjxmio6MpFEpCQoKnpyedTn/27NnNmzdv3LiBDfPZCAEACxYs2Llz57lz59TV1RMTE4cNG5aQkBAREZGenv73339XV1cXFRXFxsaWlJSUlJS8efOmo6Nj3759NBrN3d2dTCaXlJRER0dXVlZWVFQkJSUdO3bMwcHBxcUlJibGxMRk165d9vb2R48epVKpP+FT8O8AQvBfysrKdHR0Tp06xWazp0+ffvr06SlTpgQHBx84cCAjI+P8+fNZWVkmJiYfP34cNmxYcnLyl8bPYDC2bdtmbW395MkTHx+f06dPs9nsmJiYyZMnX7hwwc7OrrGx0dLSMjExkWfqZDLZ1NR0586dkZGRdDodhuyPevgspaWlurq6f//99/Xr1+fNm1dZWclkMjdu3Hj06FEEQeLj448dO/b27dtdu3a1tbVxB6bT6fPmzbtw4UJgYODGjRsZDMaiRYvu3r1bUVEBx7+2trYxY8YkJSV9Rd7Onz+/b9++0NBQV1fXpqYmBEFOnjx57Nixd+/excbGovlkMpmHDh0aPnz4w4cPt2zZcvr0aQRBUlNTR48e/fr163PnzgUHB/v4+KxZs6a7u3vGjBlxcXEsFsvDw+P48ePftzI/C5VKbW5u5vkTjUaDC93d3QJiqK2tha9TodTXdzGZbOyazs5eDofDL4aeHiaCIHQ6C1728YPD4VCpDEz2mDzjwYaBwWDSCQkJJSUlAuLH0t5Ob2uj8fyppaWntfV/fqqvr/f29v5vHnr6mARP6HQ6utzR0cEvWH5+/tSpU7u6uthsNr8wkM7OTgE1LzhCmBn0TgCbze7s7OQXA3xHQUCA3t7e3t5e9N/u7m42m81gMPiF/1dDOFx4w+Fw0DfjSSQSjUaTkpLqp7R6e3vFxcXhXwaDISYmxi91DofDYrHgYxsYsp+y9O1QqdRvv4D7CmA1dnR0yMvLoytZLBabzRYXF//x+flJKCwsBAAYGRl9NuQ/SElJybBhw757tDQaLTQ0dOHChT/SzZuWlrZly5bU1NSfNsLfHGLkIyAgIPjOhIWFtbS0jBw50szM7OeM8DeHGPkICAgICH4viDdcCAgICAh+L4iRj4CAgIDg94JwuPwHYmZagi/ixzwmSEtLo9PpAwYMUFdXhx8y02i0jIyMtra20aNHa2pqAgC433oYOXIk/ES6qqoKFXZ8I52dnbGxsY6Ojt8YT1hY2JQpU/oiv83KysrPz583bx4seF8oLi7u7e2FbjB+sFgs9JN/LS0tLS2tb9/3KyoqmpubsWtMTU1jYmK4S8pgMJKTk4cNGwbbDgDQ3t6ekpIyc+ZMXJz81hN8F4hrvv/wz75iS/Cv4wf0yRUrVpSWlioqKrq7u0MDQGZmpouLi7S09Lhx406ePHnmzBkGg+Hl5dXa2nrno5/OawABAABJREFUzp39+/d3dnZeuXKltLQUANDQ0DB79uzv9RlyZWXlkiVL4PK3TFvDPW8A4DXvxJs3bwoKCpSVlfs4JVNnZyeDwTh16lRERITgkCIiIlVVVba2thwOJyIiYty4cbC6vgU1NbUNGzbcu3cPfvoZFBRUUlLCs6TYKRQg2dnZGzZs4I6T3/q+z9GB8ktOM/St/NMHEAICAt5oamoWFBQgCNLQ0BAREdHb26uvrx8XFwd/ZTAY+vr6ERERWVlZCIKcPHly+fLlCII0NzdXVFQgCOLt7W1paRkSEvK98qOiooIgSE1NzYYNG75XnBAvL6/8/Hzsms2bN79+/bqPm4eHh1+9ehVBkLNnz548efKz4fPz82VkZODyunXrtm/f/oX55cHChQvPnj0Llzs7O1tbW/mFnDdvXkJCAvpvd3f34MGDuYPxW89dV4Lp7e2dP39+38P/JhB3OwkIflLmz58/bdq0u3fvTps2bebMmSkpKdXV1TY2NvBXUVFRGxub58+fX79+HbuVioqKiooKk8lkMBg7duzw9vZ2cHDABrh7925gYOCwYcNUVVVHjx5dU1Pz6tUrd3f3uXPnXrp0aejQoZmZmevWrXN0dLx16xabzXZ0dMzJyZGT+8/URYGBgZmZmW/evBkxYkRYWBj0aLu4uKDx37t3r7u7++XLl1evXo2JiTl37tzr16937Nhx+vRpYWFhd3f3U6dOWVhYoGmtXbs2ODhYWVlZUVFxwIABAIC8vLy8vDwpKSlFRUVTU9MjR44YGhomJiYuXbpUWlp6yZIlf/75Z1hY2PPnzwEAbDb75s2bCgoKUNn18ePHAwcOxMTEBAcHq6mpYTMzdOhQ7kr+8OHDypUro6KiKioqKBSKmpra7NmzV61aNWbMmNra2rKyspcvX0pLS1+/fp3BYCQmJvr7+x8+fLi7u7u6unrv3r1oc4D/qs5qamoiIiJmz569aNEiWFJ+ebhx44aMjAz2+o/nemze7O3t0bp6/PixiYlJcHDwhg0bzM3N0frct2/fmzdv0GYdMGBAdHT006dPFyxY8I/YiH5OiLudPwXop+tYqSB2GmXo1urvKX76D3iehf7LXRBcABgGlrpfweaEWw/NvRL5gV8B+fj4bNiwwcHBwcPDg8Vi1dbWqqioYCeHU1dX5yfiCgoKmjdv3vz580tKSrKzs7E/WVpa1tbWent7b9++/eTJk5KSknCeB+jynzBhwvz589XU1OBUAPCZGbbUlpaWAwcOnD59+vv379PT05cvX459rpaSkvL27VsFBQUREZErV66sXbvW1NT04MGD3t7eQ4cOxc0bANNC552Awx4AYMSIEbq6ura2tpaWluiEBp6eni4uLqNGjYITGsBhDwAgLCxsYWFhYWEBP3QbPHjw4cOHzczMoqKicJnBVgKDwThz5szKlSvnz5/v6elJo9Fmz57t4ODw4MEDTU1NGRmZCRMm3L9/f9iwYdevX79w4UJ3d7eWllZ+fn5sbKyurq6YmFhERAR22AMAJCYmXr58+cqVK11dXWhJ+eXh48ePERERS5cu9fDwwEbCvR6bN2xdycrKTps2beTIkS9fvsTWJ51OxzarhYWFpKQk1LXz7Wq/H0Rd/DhYLNapU6fCw8PXrFmTmJjo5uZmY2PT09Pz119/DR06VE1N7cmTJ1u3brWxsVmyZAmdTl+1atXChQvfvn0bGho6duxYMpmcnJx89OjR2bNne3p6Kisr5+Xlbdy4UUtLC94AcXBw+PDhw+7duw0MeJjy/0Hi4uKoVGpNTc2YMWMGDx787NkzXV3d+vp6OBkFLoCFhUVAQIC/v7+kpCQ6KUx/UFBQEBkZCefXXrJkycGDBydOnPj+/fsDBw7AAB0dHadPnx4/fnxjY6Obm1tLS8v69esPHz7cl8lgvx0owdm7d+/8+fOnTp06fPhwGxsbMpkMJTUwTHFxMbQwcxMVFQUHxREjRvj4+KCqZQCAiIgIFCij8zwsW7YM/gRd/jdu3MBNc8qTuXPnvnjxYsyYMQEBAejKDx8+zJo1CxvnX3/9NXfuXHSyG9y8AZ9NKysrC05S2JcJDcB/5x9QVFRsa2vjzgyKmJjYrl270H+tra2Dg4NVVFTgiQ46McKECROys7MbGhouXLigpaUF47l+/TrOQA2xtbXdunUrAAA+lIUl5ZeHhIQEaLXFTYbAvR6XNxQrK6vHjx+3tLSwWCxsfXI4HFyzEnBDXPP9OEREROzs7NTU1FavXu3i4gLneNuzZ4+pqemWLVucnZ2vXLnCYrECAgKmTJny8OHDhQsX1tTUQCnt0qVLt2/fbm9vb2BgICsru3DhwuXLl7u5uc2YMaOnp2fcuHG6urobN240NzfH3fv6GXj06JG+vr6NjU1SUlJkZKSIiMj06dOxbyRiAyAIEhkZefjw4YCAgH49Sz116pSjo+OSJUsuXrz47t07MTGxSZMmcTic+Ph4GMDHx2f27NmzZ88ODg6mUqkqKipwmPwxlJWVwcsaIyOjhQsX1tXVmZmZWVtbBwYGwgBdXV0ZGRnr16+H/7LZbPRllrS0tOnTp+/YsWPHjh3Xrl17+vQpmUzmTgI3z0N1dTXW5c9vKgA4RwQAoKKi4tGjR3/++ecff/yB/gpP4GB46LZ9/fr1X3/9hYaBF/e4eQPQOFHQewB9mdAA3RzdCi7gMoOGhwZLbAwbN24cOXKksbEx7pq+trZ27Nixampq8CSMTqfjLqBRoE4TLhsaGn42DyoqKomJiYBrMgTu9bi8wcKy2Ww48ZaGhgauPlVUVHDTd6A3Nv69N42+O8TI90PhnhMuKChoxowZ8NcBAwZMnTpVV1e3pKRk5MiRVVVVb968sbS0RAcAOJ8R+hK2oaGhvr5+bGwsiUTq6enJz8+Pior69vfOvzvwPlVAQMD69evnzp0LJ6GGUz9zB6DRaGPGjFmzZs2ePXv6NVfooxQREZGEhAQ4O5KOjs6HDx9ggPT0dLhSSUmpuLgYAIC909jfDB06dO/evd7e3v7+/vn5+Zs3bwYABAYGxsTE3L59+9WrV1u3br1x4wY8yNbW1sbGxn748CErK4tMJm/duhWdtIHFYsnIyHh6enZ3d8M1b9++LS0t/fjxo7q6Onaeh+7ubtTlTyKRcFMBJCUltbW1vXv3ztjYOC8vz9fXNyEh4ebNm6qqqosWLUKzPW/evO7ubgsLi82bNw8cOHDv3r0jRoxYsWJFZGTkxYsXy8rKuOcNIJFIcN6JgoICGElVVVV2dnZkZCScXvGzExqMGTPm8ePHr169Sk1NTUlJIZPJaWlpaWlpDg4OaGbQ2a9YLNbTp097enqwdxRUVFT2798fExNTUVEBB56nT5++fPmSRqMtWbJk27Zt165dmzlz5pEjR/T19RMTE5OTk7EnEzk5OZmZmW/fvkUnxkNLiq0QHR0ddAoFe3t7GRmZlStX+vj4sFis3NxcuKGDgwNuPS5vsK6KiopERES2bNny6dOn169fl5SUoPWpoaGBm77DyMho79697e3tfZ+n4tenf16cIeBNQkLCrFmzqqqqjh07tm7dOgRBFBQUqqqquEMGBQXt27fPx8fH1dUV95OZmdmnT5/g8qJFi0JCQgIDA+3t7adPn37v3r3+LsJXkJ6efuXKlVGjRkVFRbW0tJw5c2bBggVHjhzhGQCuYbFYFhYWWC/+d6e4uHjTpk13794dOXLkqVOnHj58iCCIn5/f+fPnYYBZs2bV1dUhCOLq6pqZmYkgyObNm7/otbpvhEqlMpnM2tpanMufSqXCaSi+C+g8D9wuf9xUAChwjggo8qdSqdxxdnV1CU6UOy103gmefHZCAwGbfzYzEHhpC4uzcOHCuLg4bPY4HA7PkvYRfnno6OjgcDjcNYxbj80bgql/2EBUKpW7PrHTd7BYLDjlwjfOU/ErQVzz/Whwc8LNmDHj7du36K80Gg0umJiYKCoqTp06NTExkcFg4H6FNDc3l5aW2tnZIQiira396NGjU6dOoSePPw/e3t7wVsz9+/cDAgJGjx4dEBCAvfODDQDXCAsLjx49WlRUtP9yNWzYsMuXL/f29q5Zs2bUqFH19fUAgLq6ulGjRsEA6Ep4stx/OeGHlJSUiIiItrY27lNrKSkpVVXV75UKOqUGnLMX+9k4fKDIfc8Zfp0tJCQkKirKcw4TGRkZwYlypyX423ZZWVnB35sL2PyzmYHA52qwOFQqlUqlYrNHIpG+ZbYWfnmQk5MjkUjcNYxbj80bwNQ/bCApKSnu+sTOlCIsLAx3pV91gvWvgHjD5cfBYrGio6PLysoePXqUkpJy7NgxUVHRCxcu/PHHH1QqVVdXl06nT5s27dChQxYWFhQKZf369fAdh3Xr1jk4OLDZbE1NTVVV1dra2oCAADhH7v3796WlpePj4/Py8jgczr179xYvXvzkyZOfSui+aNEif39/LS2thQsXjhgx4ubNm9LS0rNnzwYArFu37sKFC9gAKSkpYWFhY8aMmT9/fr/eXaTT6eHh4QwGY8uWLQCAN2/eZGdnk0gkOzs7X1/fkSNHbt682dfXl81m29vbS0lJUSiUjx8/fvjwwdjYmDD+/MJ8+vTJxcUFDn7/yExbBD8AYq6GnwIqlSosLAxP5RgMBo1Gw04vBwCgUCg8Xyf7t8BgMEgkEjzxRBCkp6cHHlNYLBY8scUGoFAoCgoK3NOLf18oFIqsrCx2jsOuri541sxms2HqbDa7t7e3/6ZmJCAg+EcgRj4CAgICgt8L4jkfAQEBAcHvBTHyERAQEBD8XhAj36/A9/Lx/+OgwjaeIrF/BGzdEh8CExD8GhAj3394+fKlvr5+Y2MjACA5OXnhwoUlJSWTJ0/29PR88uTJmTNnHj16FBgYOHTo0Nu3b2/cuDE0NPQrUqmpqRk3btyJEyc+fvxobGz87t07uNLe3r6wsNDT03PWrFnoRF9FRUUDBw48c+bMjh07bt68WVpaOmLEiBMnTpw4cWLLli3oUbiyshL7VfhPyKZNm1xcXFxcXNzc3K5fv75w4cIlS5aMGzcOfcbMYrEWLVo0d+7c0NDQnp6ev//+OyYm5vDhw/2aK2xO0tPT7e3tlyxZYmlpCdVTAICCgoKNGzeOHz8eTnzj7++fnJz8999/9/T09GvGCAgI+p1/8mPCnwkWizV9+nQbGxv4yefly5cRBFmzZs2LFy8QBOnp6amvr29ubjY0NEQQJDc319zc/OsSWrBgwfv37xEE2bVr15AhQ6qrq9Hk/P39t27dig1sYGDQ3t5OpVIVFBR6enqmTJmSnZ2NIIi1tTX8thpBkOvXrw8fPvyn/UaVw+HA7+4zMjIePHhQW1uLIAiTydy9ezca5uHDh76+vh0dHQiC3Llz5/bt2wiCrF27Nicnp/8yhs0JXEYQZNu2bWgAOJVMdXX1nDlzEASZPn06g8G4dOlSbm5u/+WKgIDgB0Bc8/0/S5cuHTNmzLZt28B/v94lkUjp6emvXr3at2+fpqYmiURisViVlZX37993dnb+ulTgN6cAAAMDg/Pnzy9YsIBOp6PJ4T4UI5FIHA4nLi5OT09PUlKSRCIlJiZevXpVU1MTCvJpNJqEhMT8+fPRb8B/NkgkkpaWFgDg5cuX9vb22traAICYmJiJEyeiYbDCNlQqpq6uXllZ2X8Zw+YELnd1daFz8QAAxo8fDwBgMBgWFhYAgFWrVk2dOlVMTGzEiBH9lysCAoIfADHy/Q9nzpwpKiq6e/cuusbAwGDs2LHorFq9vb3Xrl2jUCi7d+/+9uQcHR3t7e3Xr1+P8P+25PHjx93d3bGxsWh+Ro8e3djYCG/KPX78WEhISEtLy8fHR0AkPwPt7e3oR4pRUVFTpkxBf7Kysjp//vy5c+du375tb2/f09Nz9erV169fm5ub93eusDl59erVnDlzcAHCw8P/+usvAACJRHJ2dr548WJDQ0N/54qAgKBfIUa+/wCN5sLCwk+fPj1z5gw0nQMA5OTk1NTUNm7cCO8uSktLnzx5kkKh3Llz56vTggMAtMsfOHCgo6Pj9evXuDAlJSXQ2rds2bJFixah9iMVFRVLS8vx48c/e/YMANDe3j537lxnZ2dDQ0PuSH4ecnJy0FncYFVzm8mgsE1SUvL8+fMTJ07U1dUdNGhQv+YKl5PU1NQxY8ZgA0RFRS1ZskRMTKy7u/v58+cbN248dOhQWFhYv+aKgICgvyHsZf8hMDDwzZs3Li4uysrKT548efPmTVNTU15enri4eG9vb1VVVUNDw7hx4+rr64uLi+/fv29tbS0tLf1F9zxpNNqaNWsGDBgwfPjwvLy8R48ezZkzZ+DAgf7+/l5eXgwG4/379wUFBXDOrXfv3h09erS+vj4yMnLx4sUAgLKysvLy8hcvXqSmphYXF3t7e/v6+lKpVDiOmpiYHDhwwNLS8udUvTx//hxKwgAA7969s7W1hcvQXoYVtiEIkpiYGBUVhZ1Srp/A5qS7uxtVVUF7WXl5ua+vr7q6OpPJfPbs2ciRI2NjY+vr652cnPo7YwQEBP0K4XDhDWrV+r7wMwH2U3I/D9iC0+l0MTEx6OSEBccK23p6ephMJk7e1k9gc8Jms1ksFlQAo/YyAaUgICD490KMfAQEBAQEvxe/8nUGAcG/HQqFAheEhIQUFRU/G769vR390FNRURE72QWbzS4rK+uP6ZZKSkqGDh3a34ZxAoLvCPGGCwHBT0pFRYXKf3F3d+/LJikpKVpaWioqKjdv3jx+/Li1tXVgYCD86fr164aGhpmZmd83kwUFBQYGBhcuXPi+0RIQ9CvE3c6fBSaTKSoqyu8JEz8YDIaoqCiHw/nJz7hhN0O/VuQuJofDwc3GB5UC8MFb/4HNCb/Kp9PpgudN7Sdu3brV3d0NK83Ozq6Pcy4aGhoWFxeTyWR1dfXr1697eHhcvHhxy5YtVVVV/v7+O3fu/L7Tk/b29p45c2bJkiXolz8EBD8/xN3OHweDwThy5EhWVlZISIiwsLCPj09ra+vBgwdjYmJCQ0PHjh1LJpOTk5OPHz8+f/78pUuXAgDq6+t9fHwqKioWLVq0bNmyHTt2YIeHjo6OSZMmvX//Pjk5eciQIUOGDPnnCieIuLg4KpVaU1MzZswYSUnJyMhIDQ0NAMCSJUtggPfv36elpb1+/TowMFBeXr6rq+vixYt2dnbW1tb9lysKhfLs2TNdXd36+no3N7fLly8PHz48JSXFy8sLDbN///7s7OxRo0bt2bPHyclJQUGhra1t4cKFfbwC+0bev3+voaFhYWGxaNGivs+Fiw25YsUKT09PLy8vd3f3gIAABoPR3NysqqoaEBDQ2tra3t6+e/duKSmpd+/e5efn9/b2Ll++PCoqqqioaPbs2b29vdHR0bq6uqtXr87Ozk5MTOzu7tbX158/f/6rV6+qq6u7urpmzJhRVFTU29v76dOnoUOHtre33717V0RERF9ff+bMmdnZ2UFBQdra2gMHDnz9+rWLi0u/NigBwRfwT8ljfk+ioqKmT58OxV0FBQUxMTHV1dWmpqbw0z0EQfz8/BAE4baUzZgxIz09HRdbd3e3vb19d3c3giC9vb3wL51Oh7/ClyThcmdnJ7rc1dUFF9hsNty2v1m3bl1JSUleXt7FixdXrlxZXl6OIMi4ceM4HA4M0NDQgCDI4cOHMzMzWSzWggULoPCsX3n48OHNmzc5HM769evr6uqWLVsGs4rmqqqqatOmTVAvRyaT4TXo+fPna2pq+jtvCILQ6XT0PdLx48c3NTX1cUNDQ0MAAJlMhv/q6OgAAHJzc8+ePQsASEhI2Lt379KlSxEE2b59e3t7e0BAgJycXG9v76RJk1atWhUXFwcAgGIEBQWFiRMntre3S0lJlZSU1NbWHjhw4O3btwMGDEAQJCQk5MWLFx8+fAAAHD16lE6nGxsbHz58mEajSUpK3r9/H0EQbW3tQYMGvXjxwtbWVl1dncVi9UtlERB8IcRzvh/NpUuX4uPjnz17JiQkJCQk9ObNG0tLS/SThlWrVgEAuC1lOLFZT0/Prl27cnNzS0pKOBzOrl27nj17Vl9f7+fnd/HixZ6enufPn6emps6ePZtMJp86daqkpMTNzQ0AcPHixcrKyps3b6ampkZHR2/cuPHt27f9XWRPT08XF5eAgID169ejcjIREZHOzk4YQENDIzExMT8/38DAICsrq6Ghwd/f393dvV9nbJg7d66/v7+Hh8fBgwcHDBggKyvr5OS0cuVKtJ4RBNHW1p4wYcLLly/V1dXhB+81NTUDBw7sv1yhiImJFRUVRUdHL1q0KDEx8cqVK98Sm4yMDHrDlkQiPXnyxN3dfePGjXJycidPnjQyMhITEztz5szu3buxt5fhJkJCQgiCzJw5MzEx0cvLi0Qi1dfXT5o0SVNTc8aMGWi04eHhBQUFw4YNk5CQGDhw4Pnz52EMampqjo6OY8aMaWxsJJPJ31IKAoLvBTHy/WjExcWDgoL27t2bm5sLAKBSqTyP7zhLGaSmpiYsLCwhIeHp06ejRo2ysrLS19cXEhKCL+yxWKz79+/b2dlJSEj4+fn19vbOmzevtbXV3d2dyWRWV1fTaLSqqqrnz587Ozv7+vpyOBxoYe7vIsNP+MPCwt6/f79169YXL174+fl1dXVhP9obPnz4kCFDbt26lZ2d7eLi4uXlJSUlhTrb+gMGgzF37lwKhQIF2UOGDDEzMzt06BAaYPDgwV5eXmlpaT4+PnDNp0+foIP0B0AikbS1tSdPnhwQELBx40ZUKvRFMBiMhoaGAQMGDB48GF25ffv2BQsW3LlzZ+zYsVQqta6ujkqlAgAsLCyMjIy4I5GVlb13715vb6+Li8uFCxcmT568b9++tLQ0KyurjIwMNBicYwSeH4iJiTU1NeGK8xX5JyDoJ4iR74cCdVmampr379/fvHkzAGDq1KmJiYno8EOj0eACzlIGERUVlZGRkZKSAgDACZVIJBKCIPCwIiMj8+DBg+3bt+fk5HR3d0+bNm3jxo0KCgpHjhzR0dFRUlLicDh79+5VVlaGY+GIESOWLVv2A9yY3t7e69ati4iIuH///rBhwy5fvtzb27tmzRpsGHl5+Y0bNzY2Ng4ePLi8vBwAMGzYsPr6+v7LVUBAwOjRowMCApKTk4uKiqhU6sGDBw0MDMrKyrDB1NXVhw0bBpeDgoJ+mMAlOzvbw8MDXh/r6+s7ODj0cUMEQdC/jx49YrFY165dg/0EBrhz505AQEBCQgKVSv3w4cOYMWM+fvyYl5cHACCTyfAWa29vLwCAzWZzOJyPHz/KyMiUlpY6Ozu/fv364cOHmzdvLi4uNjAwePv2LRqttbW1kJBQd3c3AKCjo2PChAloNggIfjaIN1x+HL29va9evRo8ePCOHTvGjRt3/PhxAICJicnevXvXrVvn4OAAB0UNDQ2cpaysrKy0tPTNmze7d+/W1NQEAOjo6MyaNYvJZLa2tmZmZmZlZUlKSqqqqra3ty9btkxZWXnSpEmzZs2aNGnSpk2bCgsLHzx40N3d/ebNm+zsbEdHRwaDMWLEiLlz50Ln54ABA/q14IsWLfL399fS0lq4cCGdTg8PD2cwGFBmBu1l165d09bW7u7u/uOPP1RUVIKCgpKTk8vKyo4ePdp/uZo1a9bNmzelpaVnz55tYGDQ0dGRnJysqak5dOhQaC8rLCxkMpmysrKenp5wk7Kysh/2BiOTyQwODr5///6KFSumTp2KFXwL4PHjx/C84dixY+rq6h8+fEhKSho7diyLxXrz5g0AICYmJjc3d+/evaampiNHjrS0tDx//nxxcfHEiRPnzJnj7u5ua2s7ZsyYy5cvUygUFovV3t5eUlKCmmxXr17d2Ni4Y8eO+fPnKykpLViwAFpM4+Pjvby8zp49GxwcLCoqqqamdu7cuezs7IaGhvb29vz8/MTERABAbGzs8uXL+7HWCAj6BvFVwz8JVlpGoVC+SLnJ4XDgJw3oi/gIgsAbp/COE/ouPofDgV83CwsLk0gkGo0Grxp7e3vFxMR+zG0oBoNBIpFERUUpFIqsrKyYmBhcj9rLenp6FBQU0PBfWhtfB4IgPT096Isk3d3d0AwOP29AEIRCoaioqKDhf7y9rD9S7OzslJGRqa2t1dbWhp0HQZBPnz4NGDAA7Uv19fUDBgyAqdPpdHgxhyCIsrJyZ2ennJxcXV2doqIi7EhYaDRaa2vrD7snTEDwdRAjHwEBAQHB7wXxnI+AgICA4PeCeM73/cG+Fo+9lwj/RVfi/u3LJt8Y5luiFbAJvzBfscm/qIDEzRICgn8vxDXfPwzqFyb4R+hL/Xd1df2AnBAQEPwwiJHvPwQGBg4dOvT27dsbN24MDQ0FAPj7+w8dOvTx48cPHz5ctWpVU1PT5MmT//zzz89+AIdqArDL8F8qlfrHH39cvHjxyZMnTk5Ob9++nTNnTlFRkYBNsCsLCwu1tbXPnDmzffv2Gzdu8AyDIAh8JZ07Hn7R9iWMgE34hYELdDp95syZLi4uM2bMuHnzJgDg48ePp0+fLiwsxG4SEhJy69YtaB65fPlybGzsiRMnvjS3X1RACoXi6+sbFRV1584dGo12/fr1tWvXent7Y8MUFBScOnWqrq4O3cTe3h6NnICA4N8KQoAgCII0NzcbGhoiCJKbm2tubo4gSG1trZmZGfy1rKwMQZA1a9Y8f/78W1Lx9PSE300jCFJXV/f27VsPD4/CwkLUKIYgSEdHB1yg0WhMJrOnpwdBEFQzZmBg0N7eTqVSFRQUenp6sLqylpYWBEEYDMb69euhzOxnAOf9gnOa4yxW0dHRf/75J1zmKRLrD7D2sqamJhaL1dvbu3jxYjQAd1YDAgLs7e37L0sEBAQ/BuKa7z+QSCQWi1VZWXn//n1nZ2e4pqOjIyws7OHDh6mpqYBLIfYVBAUFzZgxAy4PGDBg6tSpAICwsLCtW7cGBwe3tbX5+/v7+voePny4pKTE2tr6wYMH06dPf/r06e7du0NCQmAeOBxOXFycnp5eRkbG5MmTDx48WFtbu2PHjpKSkiVLltTV1SUlJX38+PFba+Q7gfN+PXz4UElJaefOndBuBTl//ryKisqyZcuioqJ4isT6A6y9TFVVlUql7tmzZ/HixWgAXFYjIyNhexEQEPzbIUa+/6e3t/fatWsUCmX37t1wjbS09Lhx4ywsLOBnXt8OT1eZvb29p6dnfHz8gwcPqFSqiYmJkpLSsGHD5OXlXV1dR4wYMXz48HXr1sXHx8Pwjx8/7u7ujo2NHT58uJKS0smTJyMiIoyMjKysrPT09KKjo+Xl5X+AmeWLQL1f2dnZW7du9fb2fvr0Kfr8rLa2dteuXRcvXjx06BDCSyTWH2DtZQAASUnJlStXnjhxAm0gbFZzc3OZTCb8hhLVjRIQEPxLIUa+/4AgiLS09MmTJykUyp07d+BKERERZWVlQ0PDiRMn4qRW+fn5X5HKjBkzsIZo1FUmIiLCZrPFxcXl5eXt7e0XLFiAhoEfFwsLC8PniwiCLFu2bNGiRTIyMiQSCf21pKQEAKCmpiYlJYX8fE+hUO8XKicbOHBga2sr/FVOTq6trU1ZWZlOpwsQiX1fsPYyAICoqOjIkSNHjx6NPsfFZvXTp08ZGRmXL1+uqamBF98EBAT/XoivGv5DdHR0fX19cXHx/fv3ra2tobqisbHxyZMnTCYTzoGQl5cnJCTU09NTWlrKYDC+wq114cKFP/74g0ql6urq0ul0W1vb8vLy7OxsJpNZVla2e/fuOXPmZGZmTps2raurq6GhoaGhoaKiIisrS0JCoqamJicnp76+PjIyEt6Uy8nJqaysbG5udnFxWb9+fUxMTGNj4/r1658+ffro0SM4w99PAur92rhx48mTJ5WUlAwNDXV0dKC97ODBg7du3Ro8ePDGjRtxIrH+yxLWXlZWVubv729tbT1r1iwpKSloL8Nmdfbs2bNnzwYAZGdnr1ixov9yRUBA8AMgPkv6B6BSqcLCwjyn+eZwOCwWC5V7fRE9PT3QJsVms0kkEm6K838WrIWLw+F0dHQoKioCjL+tt7eXxWJxi8T6FQRjL6NQKAoKCvAaGp2cHZtVAgKCXwZi5CMgICAg+L34iS4LCAgICAgIfgDEyEdAQEBA8HtBvOFCQPCT0tra+urVK7g8efLkgQMHoj9RqdSwsDDs14f8YDKZT5484XA42JVz5sxRUVGB0+bp6OhMmzbt/v37wsLC06ZNgxNAfguRkZE1NTWWlpYjR44EAGRkZOBehNbX1zc1Nb179+7q1au/+xxMZWVl79+/d3Nz+77REvxq/INf0f9U/NPtQPAv4wf0SfihIQBASEiooaEBrmSz2Q8fPtTQ0NDW1u5jPPCzDXt7ezqdXllZ6eDgkJKSAmfpe/DggaSkpKenp7Gx8eHDh11dXQVHdfjw4c7OTgEBYmJiAACBgYGDBw+GazgczqxZswAAqampLS0tN2/eXLduXWJiIolESk5O7kv+P5soluPHj8vKykJtEAEBP4i7nQQEPyldXV1QfcBgMDQ0NOBKISGhpUuXmpiY9D0eeBmnoKAgLi5OoVDOnTsnKSlZVFTU3d0tJSWVnZ1dWVkpISGxYcOG06dPAwCam5vpdDq6eUtLC5PJBAD4+/sfPHiwp6cHF39nZyf6dX9GRgYAwMbG5t27d3ANiURSV1cHACgrK7e1tTk7O8+ZM2f06NENDQ2WlpYMBqO3txe+Q4u1h9fX18PrVDRRDofT29vLZDLZbDZcgGvgIId+G/rHH3+UlZWJior29vZCgS2FQsHmlkwm973qCH5ViJHvP/zTpyAE/zL6u0NyOJxDhw7JysrOnj0bflCPBQrhvpSqqqpr167p6emZmpq+fv0aABAREdHU1FRaWtrc3BwfH6+iouLi4nLnzp1FixYlJCRQqdSZM2cGBwebmZnFxcUFBwcjCHL37l3suLh9+/bjx48fPHhw165dPT09cXFxAAB/f38dHR1c6s3NzcePH+/p6XFwcPD29tbQ0IiNjb1z546EhIS7u7ujo6OOjk5HR0djY6OlpWVycvL48eObmprQRGtqaoYOHTp79uyqqqqhQ4dOnz69ra1t1KhRBgYGu3btGjJkyOnTp1kslrOzs7q6OpvNdnV1lZCQ2Ldvn5WVFfSMP3361MbG5s2bN7Kysi4uLl9RgQS/DMTIR0DwM0KlUr28vJYtWxYXF+fg4PDtERYXF585cwa94pk+fToAwNHR0cbGZvDgwerq6vPnz4+MjHz69OnChQv19fVPnTrl4+OTm5u7bt26FStWMJlMGxsbAMCGDRvQT1HfvXvn7e29bNkyFxeXs2fP5ubmomG4M3D79m043AIAYDAAgJWVFQBgxowZ58+fr6uri4mJ8fb27unpmThxIofD8ff3RyMcPHgwfNI5dOhQXV1dAICysvKwYcMkJCTOnj1ra2v78OFDERGRsWPHwpjHjRsHANiyZYuHh0dYWFhra+vFixfV1NRWrlzJZrO/6KKZ4NeDGPkICH5GZGVl//zzz/v37+fl5dXW1tbX139jhAYGBleuXIEDXmNjI88w0IEXGRk5fvz4rVu3ZmZmwnuJXl5ePG3deXl5AABpaWmoHRCs9PPy8rp06RLPn0RERMTFxQEADAajpKSksbHx3bt3f/75Z18U4dCEIC4uznP6MDRmJpO5YMGC8vLywsLC3t5edIAk+D0hRj4Cgp+R4uJiX1/f3t5efX39OXPmaGpqHj58+MWLF/BX+Lirj1Fh782uXbu2oaHhypUruJ/gAtTFqaurL1iwQE9PT19fn0KhZGdnA4xjls1md3d3w+Xhw4fDn+DDP/RCCnc3GPnv5IgODg6qqqqCczt06ND29nYbG5sFCxbIy8tjExUTE4Ol/robzrNmzbK2ti4pKamqqoJnAAS/LcTIR0DwM1JbW7t79259ff3Dhw/v378fAHDjxo3AwEAEQYKCgtLT0xsaGvz9/bmn/sDBZDKvXbsGAEhNTb179+7Zs2dHjx6tpaUVHR0NAAgODi4qKiopKSkvL09ISJg1a9a8efPWrFmzbNkyOPWVubm5g4PDzp07i4uLx4wZIyYmtmXLFjTRSZMmbdq06cGDB0+ePNmxY4eFhUVCQgIAwM/PD83Ahw8f4MulN2/ebG5uhlK92NhYAEBSUhJ8LpiSkgLDpKambt++XU9Pz9raev369SIiIthEFy5cmJGR4eXl1dDQ0N7enpWVVVhYWF9fn5+fX1BQ0NjYWF5enpiYCABITEyEc5tgY967d29gYOC5c+d27NiB3ncl+D0h7GUEBD8pbDabwWBISkrCf3t6eiQkJH6AjrWpqUlJSQneRQQANDQ0qKurw3SpVKqUlBRu3sT29nYSiYRen307CIKQyWT0y0JsohQKRV5enslkotXSdy5cuJCSkqKiotLR0REdHV1XV/e9Mkzwr4MY+QgICH4LbGxs3N3dra2tCwoK4uLivL29/+kcEfxjECMfAQHBbwGCINXV1WQyedCgQQMGDPins0PwT0KMfAQEBAQEvxfEGy4EBAQEBL8Xv6yx+vTp0yQSiUajsVgsRUXF1tbWI0eOwJ/q6ur27Nlz69YtOAEsh8Px8/NjMBgeHh6C42Sz2WfPnk1NTV2xYoWTk9MX5ae0tPTJkycqKioMBqOjo+PAgQNfVy6eUCgUb2/vr5gjHgAQGBgInZBaWloLFiwQEDI+Pv7x48fGxsadnZ2GhoaCA38758+ft7e3HzZsGPz36dOnHR0d69at++yGWVlZjx49OnPmTL9mry8gCOLr66ukpEQYQwgIfip+2ZFPQkLC09Pz1KlTXV1dhw8fvnDhAvqTlpZWdnY2eptXSEhIS0urL285BwYG2tvbT548eebMmePGjeu71T47O9vd3f3NmzdKSkpUKvW7i+SVlZW3b9/+FRvm5OQ8efIEDn63bt2Cg1lXV9e7d+/ExMQUFBQsLCzi4+PZbLa1tbWtra2zs/OOHTsGDx48ffr01tbWtWvXJiUldXV1wdjMzMxEREQ+fPgAANDU1DQ1NWWxWPDteYihoSG31Iof7u7uUOT/+vXrmTNnKioqpqSkCN7k3bt348aNMzExiYqK+ora+GpgDrnXk0gkGRmZ6urqH5kZAgKCz/LL3u3E+ZM2bNiQnJwcEREBj57CwsJJSUkHDhwoLS0F/9VAAABqa2tv37599+5dno8/p02bZmJiMmbMGEtLSwAAlUq9fv16XzLj5eXl6emppKQEAJCWlv77778BALm5uQEBAQ8ePGCxWFVVVb6+vikpKYcOHWpoaIB5AABUVVXdvXv35cuXf//9NxTvoqXo6uq6e/duQkLC7du3y8vLw8PD2Wx2UFBQXFzc8ePH4TdMHA4nPj7+woULYWFhPN/hVlRUjI2NTUpK0tTUdHV1hTXg7u4+fvx4Q0PDK1euiIiIfPr0KSYmBg5CEhISJBJJREREV1cXflPMZDLv3r1rZ2enq6ubm5urrKx87tw5dXX19PT0DRs2UCgUWMNBQUEDBw58+/Ytdx5yc3PPnz9PoVCuXr0aFhbG4XDu3bvX2dkZGxtbWlqakJCwdevW+Ph4YWFhFot15coVf39/npXc2Ni4atWqFy9ewNffQ0NDz549S6VSAQDp6elXrlyB0whwk5GRERYW5u3tXVNTg4ZksVivXr2Kjo4+dOjQy5cvAQAsFuvJkycPHz7Mz89ns9nh4eExMTFHjx6Nj4+HOQQAhIeHnz9/HhY5Pz//5cuXmZmZfekhBAQEP5JfduTDKX2ZTGZYWNjMmTP/+usvuIbFYk2YMGHatGnoZ7lMJvPChQvz5s0LCwsLCAjgjhMOXXQ6feDAgZqamiQSCRUYCiYrK0tfXx/919jYuLKy8ty5c4sXL2axWJ6enlpaWsePH5eTk9PT09uwYcPy5ctDQ0OrqqokJSW9vLyMjIzU1dV3794NZ2WDpRAWFr5y5Up7e/uoUaNgzoWFhXNzc8PDwz08PDZv3gwACA4OJpPJ48aNO3v2LPRL4Rg0aNClS5dmz569c+dOKNQ/duyYm5ubkpKSjo7OyZMnAcZNBXn48KGrq2tBQcHSpUsBADIyMrKyshISEhQKZdq0aTC8oqKiiYkJmUxWVVXdtGkTnCXA2NgYDq44DAwMbt++LScnBwAoLy8XEhJisVji4uLPnz8vLy83NzeXk5OztbUFAFRVVa1ater+/fs1NTXc8airq6upqc2dO1dISKitrc3MzKynpyc4OLi4uDgxMXHx4sVubm48h/+ysjIfH5/x48e3traiIevr6yMjI6OiotauXevl5VVbW7tlyxZTU9PFixcvW7asqqrq9evX0dHR06ZNs7CwgDn08/OTk5OzsbFxcHCor6/38fFxcHBA51ggICD4efhlRz4c0tLSzs7O7969Q429tra206ZNk5CQ+PTpE1xTWFjY3Nycn5+/ZcsWAVq/W7duwclcpKSkVq1a1ZfU5eXlcabEt2/fDh48GAAwc+bM0NBQUVFRGRkZY2NjPT09VVVVcXFxXV3dkpISdXV1JSUlfX39lStXxsXFYUshJSWlqKg4ZswYc3NzdM5SOTm5YcOGKSkpQYdhY2NjcXGxsbHxyJEj+X1ovHTp0pycnKSkJC8vLwBAWVmZmpoa/Innm9/Lli3z8/ObOHHizp074ZrKyspnz57duXMHW0XHjh27ceOGkJAQ9qtn9Noai7i4+Jw5cyIiIkRERIKDg8vKyoyMjMTFxbk1V3p6ejIyMgYGBpWVlfwrGwAAFBUVtbW1jY2NKyoqIiMjEQQpKCi4f/8+z3lQtbW19fT0LC0t4+Pj0ZBycnKqqqpmZmYDBgxwdHSMiYkJCwvT09MTFRW1traOi4tTV1c3NTXF9pNXr14xGAwajXbt2rWXL19Csxd2OlkCAoKfhF985ONwOPC+ZVFR0YULFyZNmiQsLIwNoKCggB7ftbW1a2pqbG1t7ezsUEshjsjISCcnJwUFhS+a5cvZ2Rl7EdnY2KipqZmWlgYAIJFIUDyPg0T6nw9O6HS6jo4Ov1Lw+zTF1dWVRqOlpKTwe/nl2rVr7e3tOjo6vr6+8IGchYUFfFDHEzShsWPHvn//Hi7r6uouWrTo0qVLtbW1cM2aNWskJCSg77EvuLu7X7lyRVZWVl9f//Lly9bW1ti0UFVjX6Jis9m4kDo6OhQKxc7OztLSsr29XcC2/EL29PTo6uqqqqpmZWUBAEgkEjxrwSYKABg0aBCHw4E3flksVm5uLvhawyQAoKSkJDU1NS0tjUKhpKampqamVlVVAQAKCwtTU1P5Kadx5Ofnf/z48StS7wtsNhv1iP4YEATJzs6GWu1vgcFgxMXFNTQ0fOmGny1yXl4ebKzs7Oy2travzyIX5eXlL1++hDfVvyNhYWHYOad+H37lkY9MJsfFxaWkpFRXV9Pp9JSUlGvXrsF3LoyNjR8/fnzjxo09e/aIioq+f/++sLAQPr6ytbXdu3cvet2D5fHjx56eni4uLlZWVtHR0c3NzZMmTepLTvbt26emprZ58+YnT57cu3evsLBwzpw5WlpaQUFB169f9/b2Li8vb2hoKC0tTU9PLygoqK+vLywshIfOlpaW4ODg27dvnz59GluKgICA6upq+OTs3bt3dXV1VVVVubm5eXl5lZWVNTU1RUVFgYGBGRkZAQEBN2/e5Cl4lJKS8vT0fP78+aVLl+A13549e5KTk8+dOxcZGRkZGclisdLT03Nzc6lU6vv37+vr6/38/C5evHjr1q2LFy8CAFJTU0tKSl6+fOnn53f37l0KhVJRUZGWlnbr1q2//voLZi8xMbGwsFDAgWDYsGHKyspOTk7u7u6DBg0CALS1teXk5CQnJ0tLSwsJCd25cyc9PT0vL6+hoSEvL4/f2Dx69Ojjx4/HxsbW1dXV1NQkJSXl5ORMnjw5NDTU2dn5ypUrPN+vSUpKysvLa2xsnD59Oi7k27dvw8PDFRUVbW1tfXx87t27FxISoq6ubmVllZ2dnZCQQKfT0Ry6urp6eHisX78+PT19xYoVBQUF+/fvj46OLi4u5jmNgGBkZGTmz59fWFioqKj44sWLVatWaWtrw/WnTp1SVFT8bAw9PT0nTpxAZ4j97vT29v7gp5idnZ0HDx6Ez7C/hebm5m3btlVUVMA4+77hZ4uspaW1aNGi8PDw2traVatWbdu27RuzCvnw4cPr16/nzJkTFxcHJ+wVDJvNhk+4Pwvctb85g/8+fqMv2Xt7e8XFxeFfAACNRhMXF+e2INLp9D4+vUPj7GNgNpvd2tqKvYnX09MjJSUleCsjI6PMzEz4agl3KQRz/fp1Jyenzs7OqqoqERGRiRMn4gLAwra1tUlISGBFiFQqVUJCAndl2a+w2WyYHJPJ5H5GKyIignNF8oN7c0jfmxUNeejQoWHDhjk6OqJthCAInU7nNkaiOWSz2RwOB81AT0+PuLj4V1fjoUOHSktLHzx40NHRoampmZ+fP2TIkOjoaHFxcXR+O8Ec+z/2zjquiuXx+0NIiUhKWJSBrSgqghigooCgIFiIigoSFmJiIAaYKCWgoqKglIJIC9JS0iLd3Rw4cGp/f8xz99nvnsMR496r3PP+g9eeZXZmdnZ3Znfi87l6VVBQ0MLC4scy8Bty8uTJOXPmjHCUgQl6eno2NjZLlizZvn17UFDQL8kbRFVV1dLS0tDQsKKiYtq0adD4/tWrVy9evDA2NiaTyWVlZWfPnr1+/Xpzc/O8efOam5uPHz+el5f39u1bISGhqqoqRUXF3t5eW1tbNE4rK6utW7fSP7/DcebMmV27drE8CJkwmr/5cMCmAm0weHl5GYr/jrzZw8Y2Ejg4OHBjV99s9hobGzs7O8lkMlrv486COYGBgXfv3n3z5k1LSwu0AMUBT1ZISAhXm48dO/afbPYAAGhy9O3WmDFjRtjsMTwcMvLLCkMiCNLY2FhXV4ctGTY2NoZCyWgOOTg4sBng4+P7mWLctWtXcHBwT09PV1eXoKAgnO6bkpKyYsWKr1+/enh4HD58+OLFi9hDHjx44OnpaWdnh/oQZWRk7N69e+7cubDb88GDB+/fv4e930+fPnV1dV2/fn1FRUV2dvb06dO9vb11dXWPHj0K5159/vzZ1taWSCTeu3fPyclp8+bN2I/XxMTEefPmAQDCw8OXLVt2//59TU1NR0dHLy8vTU3NmJiYpqamdevWnTt3ztjYWFlZub29/cmTJ5s2bTp27Ni1a9eio6Nv3Ljx8OHDw4cPk0gk5ok2NDRcvXrV19cX192HZltPTw93SExMTGRkpJ2dXUVFhbu7+969ewcGBo4ePYpdSltQUBAXF/fq1SsKhYItGQiNRsPlCj3l+vr6y5cvh4SEHDp0iOG16+3tffHixcaNG+Ezrqio2NfXp6+vv337dkNDQ9i9JCAgcOjQoY6OjpcvX/Lw8Dg6OioqKgIAzMzMVFVV0agqKioyMzPj4+NfvHghJyeXmZn55csXeXl5X19fXV1dFxcXAAD2fmhpaQkODo6IiMjPz1++fHlRUVF+fr6MjExvb+/r169Xr17t4uKycuXKlJSU6upqdXX17Oxs+gi7urpOnjzp5OS0cuVK7BD+6AFh8RtTV1dXUlJSV1f3wzE0Njb29PT8wiz9RyASiSUlJSUlJUQi8V/MxvLlyz08PJycnMLCwiZNmtTb2+vk5IQgiJ6e3rNnzx4+fMjFxdXR0QEDZ2Zmbt68GW5LSEgUFxc7ODjY29sjCHLr1i09Pb2Wlhb4PVFUVJSWlrZz505fX9+NGzceO3YMQRBRUVEYVVNTk5ycHJVKDQ4Orq+vv379+pUrV3x9fWVlZd+8eYPNnpCQEIIgnZ2d0tLSCILk5OQsWbIEQZDo6Oj9+/fDfEZERCAIsmfPHgcHh8LCwrlz5yIIMjQ0JC8vT6FQEAQxMDDw8PBgnui2bds+f/6MIIiFhYWPjw82D2i2cYeYm5v7+vp2dXU1NDSkp6fr6uoiCOLj43P06FEEQXR1dZOTk2FBIQiCLRls5Lhcoaf89evX2NhYEok0adIkOJkARUVFZefOnXZ2dsrKyvBiIQhSXl6+aNGi9PT0Fy9eJCQkIAji6+tra2tbX1+/ZMmSvLw8GCwyMtLU1JT+NtDX14dHrV+/PisrC0GQyZMn9/T0NDY2zps3j/5+UFdXLywsRBBkw4YNcO2ytLR0d3c3nCyNIMjLly+tra0RBNHQ0GAYoYuLy7Vr1xAEmTdvXnV19fB36J/Kf+ib709k0qRJM2bMgAM8P4akpCRcMMDiu+Dh4ZkxY8aMGTO+qw/gl7Nr165Hjx719PRs2rSJi4vLzMxMW1sbANDY2Lh79+6DBw8ODQ3BxTYAgM+fP6O5hS/74K+lOOrq6nV1dRMmTDAyMpo1a1ZNTU1WVpampubOnTvDw8Pv3LkDAODk5ISBJSQkli5dGhkZ2dLSMnHixKysrL179+7cubOiomLz5s3Y7MEvWnZ2djhrV0BAAPZGCAgIwDk4HBwc48aNAwCoqqo2NzdzcnKKiIgAAGpqavr6+uDhKioqBQUFzBNNSkqSlZUFANB/c6PZxh1ia2t769YtCwuLkdgnYUsGux+XK/SUZWVl6+rq4CQv+oE3bW1te3v7+Ph4JycntCuVk5NTUFBQSEgI7WoqKyvT1ta+fPky/I4cCWjnBwcHBzc3t4CAABynZHg/0AOPAgCgB6J9ErgIFyxY8OHDBwKBIC8vz3DSw58Oq+VjweL3xdDQMDc3V1VVlY2NzcTEpKioaObMmQCAvr6+zMxMAEB1dTU6zXj27NnQGBYAAFc0ovG0trYuX768vr7+4MGDERERlpaWYmJi/v7+CIIAAOjnjFhZWdnb28PGZsKECS9fvgQADA4O4ubrInSzBOj3QOrq6rArQCZPnjwwMFBeXg4AaG9vh718TBIVERFJTU2F8Q83xQN3SH9/f2ZmpoiIiIeHBwcHx9DQEACgv78fl0M4LxdbMrgA2FyhJ+jl5dXa2rpmzRr6ucRo9jg5OXl4eNCZk3x8fDNmzNDU1JSXl+/s7EQQZMaMGUFBQba2tugVHK700P3we4V+P+5+YGNjI5PJAAD0xAcGBhgeOFyE0JE4MTHx+fPnP2CF+Pszalu+xMREc3PzBw8eXL16dSQj2B0dHU5OTra2tm5ubjdu3GC4Vvrvg0ajPX78GHpn/yQ+Pj7YfvmOjo7z58//TIQIgri5ufn7+wMAzM3Nh3s4wfBnUVlZaWxs/DN5+M8iIiKyb98+dXV1AMCePXu2bdsG91+9elVHR8fIyOjDhw/oYnllZeWdO3c6OjoGBwcbGBjIy8tPnz49OTk5IiIiMzPz0qVLPT0958+fJxKJpqamenp6BAJBUVHR0tJy6tSpOTk5nZ2d6JT9ZcuWTZkyBaZ77NgxDw+PDRs2XLlyBft1EhcX19HRkZycnJCQ0N7eXlZW9vHjx8rKyoqKiqSkpOrq6tbWVgDAq1evQkNDiUTi9u3bY2JiysrKioqKeHh4Hj9+7OjoGBMTQ6PR4O3BJNFr167Z2NicO3euoKAgNzcXVusAAGy2cYc8fvw4NjZWQUFh3bp1CxYsaGpqsrGxgQb0X758KSkpiY+Pp9FoCgoK586dq6+vR0sGN6iMzRV6ykJCQs+ePbt7966IiIiXlxcaODEx8cuXLwEBAXfv3t27d6+enh6UbE1ISKirqwsKCvL19TU2Nh47dmxKSkpubq6QkND169f19PRqa2vJZHJ8fHxhYSFuxVRra2tpaWl8fHx9fX1FRUV8fHx5eXlra2tqampCQkJbW1tFRQXufli+fPnVq1eLi4u3bt1qY2MDZZVSUlJSU1MbGxtra2sTEhJKSko+f/5cWloaHR1NH2FcXJyXl5e3t/eJEycKCwt/zd38W/GP9av+80hISJSVlZHJ5NWrV3t6eiIIUl5eHhkZmZ+fjyAIhUKJj49///59d3c3DO/h4bFz504EQeLi4mD3/eDgYHR0dORfkEik9PT0yMjIhIQEOLTw5csX9L+xsbE/k9vIyEg4AhEfH/8zY0tBQUGnT5/G7kHHgX6Yp0+f3rhxYyRRoWeBQ0FBgX4nHAEaIS0tLTk5OSMPP2oYGhoabhv7E4VIJGL3U6nU3t5euE2j0Wg0GvoTQZC+vr7h0iWTyeg2jUaDX0vfi76+/sePH7EpYqFSqbgMMEkUqgSQSCQmyWEPoVKpBAIBjZBGo5FIJGz8EAqFQiKR6EtmuFyhwK+owcFB+Nn3r4O7H9A6ZHBwEBnmFJjw7NmzmpqasrKy9PT0e/fu/cJ8/iaM2m8+wEhkUk5OzsLCYtq0aVQqdefOnbKyskuXLrW2toad++jKAXl5eQRBaDQaNzd3ampqRUWFqqpqf39/d3c3lUr18fGZOnXq1q1bKysrY2Njp02bdvPmzalTp6Iru8H/SkFitUCJRGJCQsKLFy86OzsfPnxYXl5eUFBw79498Je+CSo+SaPRUlJSQkNDv379ikbb3t7+8uXLx48fd3R0dHV1PXz4MCMj48qVK+gScgAABwfH4ODgrVu3Hj16hCAIlPQE/ys7CQD49OlTUFBQQECAk5NTe3u7o6MjnDU3MDDg7+9/+/ZtqEONFZ/s6uoKCgqCs/siIyPhYkEAQHFxcWJiYkhICKBTaaFSqW/fvg0ICIAv6TQaLS4uzs/Pr6GhARXkxO4Ef023gxqbaNFRKJS9e/cGBQW1trZiA/wXgI4iDLexP1F4eHiw+9nZ2eFIGwCAjY2NjY0N/QkAYKhpB8FeSjY2tm/OQ2ZIf39/f38/NkUs7OzsuAwwSXTMmDE8PDzDzd2lP4SdnX3s2LFohGxsbGPGjKFXEYLTcelLZrhcocA+QIYro/4VcPcDOuILB/YYngITXFxcXr9+XVJSAieI/sJ8/ib8Ftfs7wMnMgkAgCKToaGhEhISU6ZMERYW1tfXv337NvxvWVmZk5PThg0bHBwc4NgvPz//+PHj+fj4pKSkxMTExo4dy8PDIy0tzc3N3dvba2ZmJisrO2bMGDExMWynIioFKSAggNUCff36dX9//5w5c+DUwa9fv86dOxedgA4w4pN1dXVv375dv3492qpRqdQdO3Zs27Zt9erVq1evFhAQ8PPza2trW758OU6ipby83MDAIDg42NvbG0p6AgBwspNcXFw3b95UVVUtKyt7/PixtbX1qVOnAACXL19etWoVOzu7ra0tTnySg4Pj5s2bZDI5KCiov79/8+bNL168AAB4enquXLnS19e3s7MTV/62trYzZszQ1tYmEAgAgKSkpN7e3tmzZzs5OaGCnNidAIArV66sXbuWSCRiZVSDgoJmzZq1fPnyCRMmoAF+3W3C4m+hvr7eyMgINn7/dl5YfB8fPnyYP38+BwfHtm3boED/KGOUt3z0IpOQmpoaKNAMABATE0NH9aZNm2ZraxsbG3v69Gl0lldGRsazZ8+io6Phz9raWk1NTRMTE2jKg8aJ3UalIGtra7FaoCoqKufOnUtOTp44cSIanuHr2MSJEz9//nz8+HEo5QUAqKioGBoagp+wZDK5pqZm3LhxM2fOVFBQwI1KzpkzZ+rUqbt3705JSUF1I7Gykx8+fIC6lBISEvLy8hMnTuTl5YXdPp8+ffr69euiRYsOHTqEE58UEBCA0+RevnypoqIyZswYOCfwzJkzUVFRBAKBXg7q3bt3M2fO5OHhERUVBQCoqakJCwsXFRU1NjaiYXA7Z8yYAT0xhpNRRQMwuN4sficmTZpkbGysr6/PUC6Vxe/M2LFjNTQ0NDU1RzIz9k9kNLd8CCORSXRPcXEx3C4qKoIvNWh4KSkpCQkJ1A1OSUnJ2NjY1tYWfn5NmTLl8uXLT58+ZagHhgOnBcrBwZGYmJiUlOTv7w8lPxAEQYfrUahUalNT0/v372VkZFDjPVFR0fz8fNhty8fHh7bcw8HGxjZr1iz0pJjITqLAwHPmzFFTU+Pi4oLmDwAzAQz+5eXlhSKfFRUVJBLJyMhIQ0NDWFgYoZv8QiAQYKsM/+Xt7V1VVbVo0SL4E54Lbuf+/ftPnTq1bds2ehlVGB4N8M3CZ8GCBQuGjFpnWlRkUkxMLDo6GopMVlZWNjQ0ZGZmLl++PDEx8cmTJ+PHjy8pKbG3t+/s7IyNjS0oKPD09CwtLZ0/f76ent7Q0FBubu748ePHjRuXmpqqpqbW0NBQVlY2bdo0bW1tY2NjDw+P/v7+qqqq1NRUuNAKgkpBiouLQy3QlStXHj9+3NfXV0ZGZvXq1QsXLhQVFb1+/XpjYyMbG1tpaSnUDu3t7YXik6tXr37x4sXy5cvhKiIAgLCwsL29/e3btydMmGBtbU0mk6FQJ41Gq62t7erqglqOEydO/Pr1a1hYWHFx8alTpz58+ABFLKHsZHNzs7i4+Jo1a+C4WnNzc35+fn19/cqVK5uamvLz8/fs2aOiorJx40bYs/ro0SM7OztoZwHX1Kemptra2m7duvXDhw8bNmyYPHlyc3Ozo6Njf39/YGAgAACeBVxEePny5f3792tpaQ0ODmZnZ1MolFevXhGJRDiBDcpdYndWVFRcunTJ2tp627ZtwsLC2KJbtGiRj4/PjBkzHBwcYAAAwI4dO06dOjV//vx//P5iwYLFH8x/SLeTHjKZTCaTf2zo/rtApSBpNBqZTObg4IA9nFBkkkKh4Do8YRgAAIlEwq2kJpFI6MJhJjBUBEWGkZ0cLrdoVPTikzQajUqlwukGFAqFnZ0dfhfSZ2xwcJCLiwtBEBgDVByFSaByl9idNBqNSCSi/WPYzJBIJC4uLmyA79JNHWXQaLS0tDQ5ObkfswBsbW0tLi4euRTkd0EkEj98+KCurv5rr05WVpaYmBgUE+/q6srMzOzt7d2yZcvPzzEhEolxcXFaWlronubm5srKSnSs4Y+jqqoqPT192bJlDK1gWIzm3s5vMmbMmH+g2QOYeVbs7Ozc3Nxo8wBbDvrWYsyYMezs7Ozs7PQCIlxcXCOZpsXwvIaTnRwut2hU9OKT7Ozs6Cw7Tk5OdnZ2tDmnjw3+F/6EVSFMApW7xO6EU/IYZgZOXcMG+G82e1CJv6Ojw9LSEqc5MhKgSMfbt28dHR1/LAPf9Deor683MTH55fY3VVVVcK0bgUC4ePHiunXrCATCN80aR0J1dfX27duxe9ra2srKyuD2SPwcvsvz4bv4gZjb2trc3NyWLFkCBzhY0POfbvlYsPgTOX/+fHV1tZiY2A/I2tXV1UE7qkWLFv1Y6iQSae/evczDTJs2Dc5p+rUYGBjAuU5RUVFQo8vExEROTu7nY1ZQUMC98M2dOxc6QkRERMA5zEwYSZn8GOj1+i4SExM5OTnl5eW3bNnyd+RqFDBqx/lYsPijyc7O3r59u62tbXh4uIWFRWVlZUdHx4QJE7S0tIKDg0VEROCwblRU1JMnT7q6uuBY6cOHD0kkUkpKyvPnz+3t7QkEQk1Nzblz51BXo8DAwJycnOjoaBERkc7Oztu3b4eGhl66dGn16tW3b9+ePXt2cHCwubk5Dw+Ptrb2pUuXAgMD1dXVLS0t0Yyh/gZ6enrnz59ft27do0ePPD09CQRCeHg4XFUGhUsAAF5eXl1dXXv27MFOyHrw4IGcnFxOTs7+/fv37NmzZMmSurq68vLy0NDQjo6O+Pj4/Px8MTGxy5cvJyQkVFVV1dfXi4uLGxkZnT17dvr06Xv37g0PD+/r6/Pz83v+/PmaNWtsbGywIQ8ePIimRaPRTp8+jWYyMDAwMDBw+vTpYmJiysrK2EMQBPH29g4PD1dVVT127Jirq2tGRsaTJ0+8vLwEBQWXL1/Oy8s7XN4UFRVhmcjLy+/cubOkpMTHx8fX1/fdu3fnz59Hr0JFRQWBQAgNDXVzc5OTk0PLAbsgij7D8HqtXLnyypUrM2fOTElJ2bFjx7Rp03BFh75qtLS0xMXF1dXVvXv3rq2tDT1fIyMjLy+vOXPmREdHX716NTk5mUgk8vHxnT59Ojg4eObMmejN4+/vb29v39TUJCsrGxMTExoaOhI/yD+Mf2rJPAsWLL4P1IUgNDS0rq6usLBw1apVCIKgSvxaWlpxcXEIgsyZM6ehoQFnVuDi4mJlZYWLMzk5WV9fH0GQrKwsVVVVBEECAgIOHDiAIMjDhw8pFIqLi8ulS5cQOvF+LNDfgEgkPn78GPnLSeDVq1cHDx6kUqnQ7mDmzJlhYWExMTG4Y3GuCDgzB6znQFNT09q1a2FCL168QBDk6tWrt27dQhAEzeT169dv3bo1NDSEC4mCyyTWLAJ3iJCQEJFIbGtrmzZtGoIgKSkpWlpaCII4ODi4uLgg/+uHQJ83WCboRnV19YIFC2BW4VXAmWMM5w6ByzB6vW7evHn37l0EQYqKiiQlJWk0Gq7osJH4+PjY2NggCIKeL4IgmzZtgn4Xrq6uRkZGTU1NCIKcPn0aSpXibp5Hjx5BB4+DBw8GBwcjow5WbycLFr8pqAuBsrJyREREaWkp/UIaOOQpKCjY1dWFMytAjRGGAx4rJCTU1dUFAFi+fLmfn197eztMBSfeTw8PD4+IiEhYWBiFQqFQKNra2n19fUuWLEEHX2/cuAHlm/v6+vT09PT09FxdXXGuCDgzB6znQEdHBxxL5uHhgUoU9OPNcJy4rKwMF3K4TKJlQn8IBwcHDw8PWhr0aTHP23CgKeLMMYZzh8BlGN2PGnEoKCjADOCKjnnq2BhQc4ycnJzg4OAbN24AOqcLeleHUQar5WPB4nfn8OHD8+fPR1dnokr88O0V3WBiqoAy3LFUKtXQ0NDQ0BB+sqDhEUZzv+HCyi9fvnh4eGhra/Pw8CAIUllZ+fLlS1tb26NHj8JgXl5eR44cqa6uHjduXEhISEhIiIWFxXCuCNDMAes5AABISkpqb28HAKBGDQhmXSn2xHEhUXCZRPfTH4ItDewGWmLM8wbLBD2Q3hRiwoQJWHOM4coBl2E0ddSIY3BwUEREBDuMivPBAHQODBA0BmiOQSaT9+/f7+XlNXbsWOhgxfDmYXgDjAJYLR8LFr8jWBcCUVFROzu7Dx8+VFZWpqSkQCX+jx8/lpaWxsXFVVVVVVZWpqamYs0Kpk2blpKSkpaWhvsamDVrVkFBgbu7Ozy8vr4+Njb269ev3d3dnJycVlZW9fX1kZGRycnJOPF+bCTQ32BoaKiwsPDSpUtQzDYxMdHLy0tMTMzAwAAuZq2urrawsFi/fj3WBQnrFwG/2LBmDljPgdmzZ58+fRqurCWTyZ2dnWlpaWlpae3t7RkZGVlZWc3NzVlZWZ8+feLn58eGxGaVm5sbm8moqChoFiEmJoY9JCsrq6OjIyUlJTY2tru7u7CwMCoq6uvXr9XV1UuWLPHz84uMjGSSN7RMuru7tbS0du7cGRUV1d3d/eXLF/Qq6OrqYs0x6MuBYYanT58Or5e1tfXg4GBAQICXl5enpycMjC06NIbe3t4PHz5kZGRUV1ej5hgAgHv37r1//z46OjoxMfHatWs3b97k5uZubW318PCIiIjA3jxz5sxJT08vLCxsbGzMzs7OyMgYfe3ff3o9HwsWfwpEIpGXlxddpolbc4mCIAictsAkquGOhYtNubm5GS4GxUKlUmk02pgxYwYHB7m5uREEgUstqVTqNxfIwgqHQCDAnjoDAwMrK6uFCxeiatFQEh0VXx4YGBjhSp7hQmIziTvxEUaOlhiTvKFlAgAYGhqCC0/pu0wJBAIU6caVA5MMY69XX18fPz8/bCnpi24koCoT9Izk5hk1sOZ2smDxBwAXYqK10nBO8SMxVRjuWLjYFAyzGBQLBwcHrNNhVGxsbOhaTOZeCuCvkTm0sqY3c8AZUIy8Ih4uJC6TPxA5eiCTvKFlAv5aZkrf7AGMOQauHJhkGJttbHjmPhjDMVyzB37CkeNPhNXbyYIFi38HlpnDD8Mqup+E1dvJggULFiz+W7C++ViwYMGCxX8L1jgfCxZ/MFQqNSkpae7cucyX7iEIkpeXx8fHN336dLintra2vr5eWVkZKwP9M0AbEOw0RZS0tDQymayiooKTlk5JSenu7l63bh0cHRwcHMzLy4O5XbZsGRqssrKyra0Ne+C8efPgwGdxcfGsWbNGnkOcCPXXr1+HhobmzZs33CEZGRmDg4NSUlLi4uLMR9S6urpKS0vhtoiIiKys7D/p1U5/fYeDTCZHRUXJysrSl1t5eXlHR8fQ0JCCgoKYmFh3d3d6evrSpUtHoYAL65uPBYs/mpaWlsOHDw+3kBmlt7f34sWL6OoCGo3m4+Pj5eUFMDLQP0NWVlZ+fj7DZu/JkyfFxcVeXl6o0yTk1q1bYWFhDg4OmpqacI+vr6+Li4uLi0t3dzc25IQJE8zNzZ8+fQpnUQYFBZWWlhIIBEdHRwsLi5FnEidCTSKRHB0dIyIihgu/e/fusrIyISEhU1PTkpIS5pGPHz8+JSVl8+bNCIKEh4fPmTMHriX4Z8Bd3+FAEOT69eudnZ379u2DC2ZQhoaGTp486eLi4ufnJyIiEh8f/+DBA1VVVWtr6y9fvvyNWf+XYH3zsWDxByMlJYU6ODJh/Pjx2K8BdnZ2RUVF6HJgYGDwk3kgkUj+/v63bt2CPykUyuvXr1FlEw0NjUmTJqmrq2PlNAcGBpSVlZWVlfv7+yUlJbu7u8eNG/fhw4czZ87MnTsXFz8/P7+cnJycnJyioiIAYNasWRQKhZ+ff9OmTZGRkSPP59y5c2HkERER1dXV5ubms2fPZuIvHRcXd/bsWQUFBX9/f7i4m0Kh3LlzR0BAYPz48cHBwW5ubn19fUePHtXX1zc2Np4/f76QkNCyZcuWLVuWlZX1/PnzGzdufPnyxczMbP/+/Tw8PAkJCW5ubm/evPHx8dHV1S0rKzMxMZk2bZqNjY2srGxcXJy6unpxcfHp06dHck2x4K4vFuzl6O7utrGx4ePjQxDky5cvurq6aLDnz59v3Lhx27Zt0IQ9KSlJRERk7NixCgoKzc3NCgoK35Wf3x/WNx8LFr8pT58+dXV1Xb9+fUVFRXh4+LJly+7fv6+pqeno6Ojl5aWpqRkTEwNDvnjxQktLS0NDg0AgEInEe/fuOTk5bd68mUQiNTQ0XL161dfXNzExEQBAJpMdHBwCAgL8/PwAAL29vZaWlvfv3+/s7NTS0rpy5Yq5ufnu3bthtHZ2dp6enmvWrLl06VJfX9+DBw/ev3/v4OBAn09shyGVSg0ICEB/Tpo0iUajubi4QJUsCB8fH+x15OHhkZWVFRQUbGpq4uHhWbt2rYaGxsDAAH1pUKlUKpVaVVX1/Plz2P/G8BMTAGBiYnLy5MmmpiYlJSVos2BmZtbc3Ozi4mJsbEylUr28vDIzM2FjVlRUdOHCBRUVldbWVlw8W7Zs0dDQiImJkZCQ2LBhAwDg8uXL48ePNzMz2759+/79+wkEgqysbG9vL87GLz8/PysrS0dHBwCgoKBQUVFhbGy8bdu2I0eOAAAWL17c0tJiYmKiqKh44cIFAICBgcHhw4cbGxt1dHTOnDmDa4xv374dGRl58ODBz58/018m3PVlWG7o5RASEuLj4+vr64uJicGqkAMAEAR5+fLl5MmTob+0kZERutpdTU2NYcx/NKyWjwWL35H09PSYmBhBQUFOTk5XV1dlZeWWlhZra+tr164FBQUdOHDg+PHjr169goE1NTXfvXvHw8Pz+PFjZ2dnAoEwceLEwsLCiIiI48ePb9q0adeuXUuWLAEAeHt78/PzGxgYwPd9AQEBKSkpMpksLCwsLCw8f/58d3d3KGhSVFSUkZFx8ODBuXPnysjIEInEp0+fqqqq0hvfZGVlwWHC/v7+hoaG+vp6IpHY0NDQ0NAAdTvj4+PLy8tNTU3pTzMsLOzq1asAgEmTJj1+/Pjr169cXFx3796lD5mSkuLi4uLq6trX18e86MzNzYuLiyUlJdXV1ZuamgAA8+bNk5CQWLRoUVdXFwcHh6KioqKi4oIFCwAA0tLS9vb2CxYsiI2NxcVz7949c3NzHR0dMzMz2Bo9e/YM/UTesGHDlClTAABsbGxoG9zZ2eno6Hj//v1JkyZBzVUY4NOnT9HR0VDnjI2NDUGQnp6esLCw1atXAwCw2mNSUlK4Mddx48ZpaGjMnz8/NDSU/jLhri8WhpeDSqUGBwdDmRhs4AMHDsTHx7969crU1HRwcHD69Onm5ubu7u6NjY3/5IDlP8YoPCUWLEYBOIFjdnZ2OMolICCASgm3tLTAwLCSVVdXr6urw0kPJyUlycrKgr/WwicmJkJDO9SjGF1wjYoU8/PzEwiEKVOm1NXVNTQ08PLyzps3bziFZQBAV1cXXOKdmZnp4OBw48aNr1+/Ojg4ODg4EIlEAMDatWtDQkK4ublxxzY2Nra3t2/atAndIyQkdPr06fLycvoCWbly5ZEjR27durV582bmRbd06dLGxsbGxkYODg5/f//s7OzFixeDYZaW42S7Ufr7+zk5Oc+dO5eTkxMWFubh4QEA6O7uxn5o0kcoLCx86tQpb29vTU1NbDeyoKCgiIgIKhbT2dlpZWU1YcIEbA/wcDBUEgd/XSbc9cXC8HJwcHDs2bMnMTExPDycPi1NTU1FRcWWlpbXr19LSEhkZ2enpaXBr8BRBqvlY8HidwQncIz913BrcFtbW5cvX46THhYREUEVmWk0mqioaEpKCvoTMFI3hj/HjRt38uTJT58+HT16dOHChcMpLAMAZs6c2dnZCQBYtWqVu7u7i4vLggUL3N3d3d3dYbsCmTRpkpiYGPhL3Lmrqys5ORl+CKKzIgEA7e3t2ImdECqViiY6c+bMb5be3r17TUxMduzYIS8v//DhQyUlJcBIhJpenxqlvLw8JCQEAKCgoKCvr9/Q0AAAUFNTi4+PR8PgNEJheULGjRuHtaSfMWOGoqLijh07SkpKEAQRERHx9vZOTk7GxjacODhDJXE0PO76YgMwuRw8PDyzZ89Gk8Aexc3NPWnSpMTExEmTJsEXkezsbEZl/GfDmuHCgsXviK6urru7u6KiorKy8tmzZxMSEtrb28vKypKSkiorKysqKpKSkqqrq1tbW+fNm/fq1aulS5eOHz9+y5Ytc+fO3bBhQ1xcnKKi4pUrV65du2ZjY5OUlFRQUEAmk62srKDIck9PT21tbWlpaVpaGicnZ11d3ZcvX9LS0qZPn15dXZ2ens7NzX3jxo0ZM2a8efPG0NBQWlrayclp9+7dOIVlAMDWrVtDQ0Oxn25Ydu7cqaysLCwsbGpqysfH9+zZs9TU1Lt372ppaXV0dFy8eHFoaMjNze3du3efP3/W19fv6uo6cOAANoa8vLycnJz+/n5DQ8PJkyfDnUQiMTQ0tLq6Oj8/n35Zwu7du7OyshQUFPbt2we/IKlUKlaE+vz583Jycp8+fSKTyc3NzRkZGRISEjQaDe3Zk5OT2759e3V1taioaGFh4bNnzwAArq6utra2nZ2dEhISQ0NDGzdurKmpqaqqioyMNDIyCgwMbGhocHZ27unpSUlJ8fHxAQAUFRW1t7e/ePGCi4srJCTExsYmOzu7tra2rq7Oz88POtCuWLGitLS0pqYmJibGxMQEeyLwW9/KykpMTCwyMnLHjh24y4S7vmQymYmAXEtLy4EDB4yMjHp7e+GwK7wcd+7cWbt27c6dOydMmHDp0iUODo59+/bduHGDi4srKirquybQ/imwNFxYsPh9QQWOmTM0NESj0dD+Lpz0MJlMplKpHBwcsE5EEIRAIPDw8DDX2ExNTUUQZNKkSZ2dnaGhoXAuBkOFZQDA3bt3Dx06xFD1kUgkdnV1SUlJwZ+oLjZ9yMbGRhEREYb/+gHgKSMIQqFQ6M90ONluLFCQurm5eeLEidjGvq+vj5ubG6fh+ffxTSVx3PVlDvw6R8cgsZH39vZKSEhgo21qapKUlBxJtH8crJaPBQsWDDhx4kRfX5+2tnZLS4u4uLi2tjaTwDQa7d27d9ra2sPNt2TB4reC1fKxYMGCAVQqNTs7u6GhYfHixWgfIwsWowNWy8eCBQsWLP5bsOZ2smDBggWL/xaslo8FCxYsWPy3YK1qYMHiT4JKpYaFhWEVF1tbW4uLi1etWkUfuKCgACqBcXNzT5069ZeI7v+7pgT/AC0tLWJiYkxOikQiff78GbtHWFh42rRpvzAPRCIxLi5OS0vru46ivze+SXh4+Nq1a+E014qKiqKiIkFBwZUrV35Xun8io+qWxVJQUPDp06dPnz7l5ubi1BmGhoaSkpJCQkJKSkry8/PhzvLy8k+fPiUmJqJ+KJ2dnREREZGRkfX19YWFhdXV1WiEvb29aGw1NTVwf05ODi6h76K7u3s4+d3q6uqQkJCkpKSmpqaKiorW1taEhITvjZ9CoXz6i/r6+n9gfBdBkNzcXOwi5R+IITo6+m+VkMjKyqIXJcFCpVKhqn1vb+/bt2+Zx1ZQUADVIP8+hoaGcnJy4Da8D9++fevo6Mgw8MSJEw0MDN6/f19XV7dnz55jx479fAb+XVOCfwAhIaFnz57hVqlj4eLiqqioWLlyJRsbGwcHR09Pj6ur66/NQ3V19fbt27/3KOy9wRy0BsvPz4eu7llZWZGRkZs2bfr48SNuRfzoBBmldHR0TJ48+cKFC6Ghodra2kePHoX7MzMz1dTUwsLCamtrvb29paSkEAQZHBzU1dXdtWuXmZkZVIvw8fHR0tL69OlTRUXFyZMndXV1BwYGVq5cuXPnzqSkJBcXl40bN8bHxyMI0t/fv3z58j179iQlJe3cuXPfvn0/luH4+HhpaWn6/VZWVtAoJDc3d/369ffu3fP09NywYcMPJOHv78/FxZWamurp6amkpFRaWso8fE9Pzw+kgtLd3a2jo+Pj4/PDMZw7d662tvbBgwf9/f0/kxMmvH79Oj09nUmA/v5+Ozs7BEFyc3N5eXmZhOzp6TE1NXVwcPjFWRyG2tpac3NzBEGysrKY3A8qKipQC6a8vJyNja2/v39oaMjOzs7c3DwgIODRo0dPnjzp6OjYs2fP1atXb9++ff/+fQRBkpKSbGxsrl69ampq6u7u7ujoiI0zNjZ25syZcHvXrl2nTp36G8/z36ChoeHWrVtMAhQWFvLz88NtCoUCXyV/LaKior88TsjQ0NCWLVtwOy0tLWGF9h9h1LZ8CKNnnkAgyMjIJCYmomHg/e3l5eXp6dnd3Q135ubmiouLoz/RYCYmJpcuXYJ7iouLRURE8vLyEATZtWsXrO+gz1l1dfUP5JZAINC3fG5ubmvWrEF/9vf3Q6XaH2v5sI/rwYMHjx8/ziTw+/fv3dzcfiAVLDY2Nj/T8s2ZM2dwcPAn8/AL+WZldO/evV/V8uXm5k6ePLm+vv7evXurV6/u7OzMzc29ePHix48f586diyDInTt3li5dGhUVlZWVpaSkdOvWrZUrV3748AEXD3wKenp6Ll++vGnTJrjTy8sL3slUKhW+AO3fvz8yMhJBkLlz55aUlGRmZlKp1MjISFNTUwRB4Kp2FLTly8vLmzlzZkpKCoIgdXV1ly5dCg4OPnjwIIIg0dHRERER58+fLy8vHxgYuHv3rqOjo46OztDQEP3JEonEs2fPPnv27NChQx8/fuzo6Ni0aZO9vb2ZmdmuXbuwIbGpUKnUI0eOGBgYkMnknJyckydPYhPq6ek5ceLEoUOHNmzY8PHjx5MnT8bExED5EgRBzp8///Dhw9WrV1+8eLG3tzcqKsrLy0tHRyc0NBQmpK6uzuTNr7CwcOzYsRQKpb+///z58wiCVFRUnD592tfX19jYuK6u7vHjxxs3bjx69OjVq1fRo0pKStzd3c3NzS9cuIAgSExMzMOHD69du+bt7Y0gSHx8/OPHj+3t7R8+fIggiIiIiJeXl66u7u3bt7FJNzY2enl5PX361M/PD0EQuHHo0KFPnz6h9wa2HAYGBk6fPr1nz57Lly+rqKh0dnZmZWWNHz/e39+/vLx87dq1WVlZ5eXlS5cuvXDhgq+vr6ysbEZGBi7m4crhz2X0j/P19va+ePFi48aNfHx8cXFxfX19qqqq6H+trKzAXw4dJ06cePz4sb6+fmBgoKqqKvSpwgbDoqCgoKGh8fjx43v37qE7s7KyJCQkcO7Y9fX1jx49mjdvXmRk5MOHD1+/fu3u7r5169bXr19fv359xYoVnp6e/Pz8lZWV9Jl/+fKlkZER+pOPj2/fvn0FBQWdnZ23b98ODQ29dOnS6tWrnz59SiAQQkNDYVulq6trYWGRmZmJIIiGhkZUVJSCgsLp06exMWdlZW3fvt3GxoZAINTU1Jw7d25gYCAnJ0dISCgvL+/u3bteXl6CgoLLly+HkvY4YmNjKysrOzo6JkyYoKenZ2xsDGWCCQTC8+fPGxoafHx8pk6dmpiYOGfOHOyBmZmZO3futLa2Dg0NlZKS8vHxiY6Orq2tDQsLMzU1XbJkyf79+5ctW/bu3bsLFy60tLT4+/urqamVlpaiebt///7z588DAwOnT58+bty4r1+/SktLAwCSkpLOnTv38ePHioqKoKCgpqam8PBwLi4uLi4ubBkCAJ4+fTp58uSQkJCrV6+ePXt2+vTpxsbGhw4dYhhPYmKipaUl2itOf0GfPHkCMyMmJgZ1EVtaWvT19Y2MjCwsLDw9PSUkJKBhzXcxf/782bNnNzY2Hjx48Nq1a2PHjq2vr9+7d+/UqVPr6+sBAEpKSqmpqevWrcvOzubm5j5x4sTUqVP9/Pyg/D+WsLCwoqKiuLg4dASIjY2tvr4+PT09KCjo5s2b4C8py+zsbDKZLC0tjRNSWb58OS5OaEpQVlaGmhIMDAyoqKisXLnS2toaQZCQkJAVK1acOHFiYGDA2dmZQqHIyMhA+wh6yWkXFxcxMbHdu3crKiqqq6s3NDRARwI7OzsFBYWOjg70mcKmwsbGdvr0aWj1Xl1dfeTIEWxC8fHxMjIyX79+jYiIGBwcrKioUFdXf/jwYU5OjqioaEZGxpUrV4qKimRkZMaMGXPjxo39+/fLy8tfunQJrtmXlJQsKiqiP3EUMpns4uIyNDT04cOHK1euWFtbOzg4LFiwoKen5+TJk+fPn7979y5OFfrMmTN6enoLFiywsrI6cuQIkUjcuHFjT0+PpaXl7t27HRwcYmNjBwcHg4ODAQA0Gm3Xrl26urrKyspYU9+kpKTMzEx3d/eSkpKcnJzPnz/fvXt36dKl1dXVq1evhvcGthyio6OnTZs2NDR04cKFhoaGhIQEPT09Xl5eQ0NDAAAczpSTk5s8efKaNWvU1NSeP3/Ozs6Oi3m4QvhzGbXjfJCwsLBbt25FRUVBi6nKykrcID+UIMI5dAwXDMf06dMbGxvh9qdPn9TU1Hx9fXNycnByU/BZ1dLSev/+PYIgixcvhve6ubn569evi4qKIiIiduzYYWZmRp/EcDmBNZ2VlZWfnx/OzkZeXp6Dg2Pjxo2PHz8OCQnR1dWFtTM8nEQi3bx509jYeMuWLSdOnJCRkeHi4oqIiFBSUrKwsDh58uShQ4fa29t9fHywTi70wIdWR0fH19f3u5xTlixZ0tXVtXfv3sjIyMTExPj4+Bs3bvDy8sJKR0JCgo+Pb/bs2Z8+fdq0adPYsWP19fWlpKSweXv06JGSklJdXd3du3ft7e0lJCRmzZrl6OgoJibGxsbm5OTU3NxcX18PK4hdu3bRizq6ublJSUmZm5ujHj2CgoLDxbNy5UpYmwx3QdHMnD17FgYQFxeHbmoAgK6uLubqJ0wwNTV98uRJVFSUqqpqWFhYTU0N9K+htwgYznAAoq2tbW9vHx8f7+TkFBQUBHfy8PAICgpiX+8+fvy4devWuLg4hvphXl5eenp6enp6HR0dgJEpgaysbF1dXVJSEgCARqPZ2treunXLwsJi/PjxOPsI+sg/f/4MJ1koKCgMDQ11dHTgHAnQkLhUJCQkli5dGhkZ2dLSMnHiRFxCnJycsMnk4eEREREJCwujUCgUCgVnQ1FeXi4nJ7dz587bt2+j6sy8vLz0jn1YuLi4jhw5Ymtr++jRI+wpqKioFBQUoEljaWxs3L1798GDB4eGhoSFhZWVlSMiIkpLSykUSllZGRQJ4+HhgS6yHBwcPDw89NdUW1u7r69vyZIl3NzcaWlpUG9MRkYGvvHAewNXDmhhCggIYOcoAEb3ElThoY95lDHKWz7cMy8jI1NTUzOcCzPq0CEjI8PQJwVHUVERWqsuXbrUzs4uPj5+aGgIFwz3rOLuwuTk5OFMRgAAw+UEW9Ph7GzAXyq3AIBx48Zxc3Pz8PBAsT4AABcX18mTJ589e3bu3Dk2Njb0+aypqenr64OPAXx0mZ879qEF3+OcAgDg5OQcO3YsOzv7ihUrcnNzcZUOfZVBnzdsmOHse7AVBC4DV69e1dDQCAsLA5gnn4kNEK52wF1QhnWckZHRx48f6+rq+Pn5f1jQS0dH58OHD+Xl5dBrZtKkSXA/MmLDAYAxEODk5OTh4UENBERFRWfOnHn+/PnCwkJ4rLq6+t27d/ft24eK92NjO3DgQEhISEhIiIiICENTAi8vr9bW1jVr1sCR8v7+/szMTBEREQ8PD5x9BIVCwfkDzJ49Ozk5GQYQERERFRVF/4U7I1wqAAArKyt7e3t4v+ESQo/68uWLh4eHtrY2Dw8PgiA4GwoxMbGoqCjYwMBuXgBAV1cXnK6JyyqE3jsCPYX29nboHU9PX19fZmYmAKC6urq5ufnw4cPz58+fNWsWgiATJkxISkpqb28HAKDeC4DRNa2srHz58qWtre3Ro0clJSWDg4NhDqGhBww8XDmgUWEvMZoQdg99zKOM0dzy0T/zq1atkpGR8fT0RMNUVlZiJ3FBh469e/dmZGTk5eWh+0tKSsD/upCkpKTk5uaam5uDvx4DdXV1a2vrzZs3w7lSKPTPKgRu07vGYDl06NCTJ0/QCBEEQaelgL/uUSZ2NuB/Kw4ajTbcrK3JkycPDAzAVhY+umjFSl9PAQCwDy19ckycU7A0NjaqqqrSVzrY2BAEoc/bcBFiM4OtIHDBJCUl8/LywsLCsrOzGbYWDE8KZbgLimXMmDEWFhbbtm373onpuEi2bt26bt26mTNnSktLb9y4EQAQFxfX0dGRnJw8a9asgoICd3f3jx8/lpaW1tfXx8bGfv36FX3LAQAkJiZ++fIlICDg7t27e/fu1dPTMzIyIpFIaWlp2dnZISEhDx8+vHPnTkdHR2FhYWJi4ubNm+fNm2dubk4gEMhkcnx8fGFhIRy9Runo6EBNCezt7QMDA6EpAZwSeffuXThA9fjx49jYWAUFhXXr1h07dszDw2PDhg1XrlyZN2/ehQsXcN2/1tbWg4ODAQEB6Ig7dCSoqqqCjgRoSFwqAIBly5ZNmTJFXV0dAIBNaNq0aSkpKWlpac3Nzdzc3NCLlUql+vj41NfX37hxw8fHx9bWNjw8XFxcfMeOHQsXLoRdPvBFh0QizZgxo7q6GtddDwAYGhp6+fJlf38/6gwMALh379779++jo6MTExOvXbsWExNTVlaGm/V69epVHR0dIyOjDx8+SEhIiIqK2tnZffjwobKysrS09PTp0/Pnz4dOGrDvJCUlJTY2FpYGGklycrKXl5eYmJiBgYGOjo6oqOjcuXN37do1efJk9N7AlsOcOXPS09MLCwsbGxuzs7PhGJ6CggK0HiwtLY2Ojm5tbS0tLY2Pj6+vr6+oqIiPj8fF/EP37+/NzwwS/s58/PhRSEho69atd+7cMTY2trKyolAoCIJUVVVpaWmdOHHCx8fH3d09Pj6+v79/2bJlDx48ePXqFTqWGx8fr6amdu3aNR8fHzc3t9LS0uLiYgUFhZUrVz548MDR0dHKyqqmpgZBkOLi4mnTpq1ataqmpoZGo2lra2/YsKG2thbNiZ+f3+zZs+/cuTNnzhw3N7eXL19OmTKlpqbmxIkTy5Yta2trW7t27e7du69cuSIlJQWnzGBxdXXV0NBwdXV99OiRu7t7b2/vrVu3ZGRk6urqTp06NWfOnKamplWrVi1cuNDCwqKhoaGsrExUVDQgIKCoqIiHhyc8PDwjI4OTk/Pz58/nzp0DALx48QLGPDAwsHv37vXr1zc1NSEIEhAQYGpqGh0dfe7cOQqFEh0draSkFBERcebMmY0bN+Jydfjw4XXr1rm5uUlKSkZGRi5duvTixYuVlZUiIiL+/v6hoaGzZ88+e/bsunXrrK2tSSQS9lgxMTF3d/fnz5+7uLggCHLq1KmpU6fu378/MTGxvr5+/vz5p0+fJhKJ6enpY8aMefLkCZVKxeXtwYMH0HO8s7NzzZo1R48era+vX7x48enTp2tra6dPn+7q6urh4eHp6RkXF/f06VNc5g0NDVNSUuzs7BobG3V0dLZs2VJTUzNcPNCqOykpKSUlhYODIz4+HndB0cz09PQYGhrq6up2dXXB4tXT0/uZexhBEHjTIggy3MSQn4z/1zIwMIAgyODgIJVKpVKpsPmE/6LRaOgEXSqVSn9REATp7e2l0WjflQrcg6aCSwgLkUiEjuREIjElJSU5Obm6ujonJweds0YgENDAERERcXFx2ORGyDenQw8NDWEvJYwczXB/fz/2XBhCpVJJJBL2HBkmOlw5QCgUCu6pZMhPzu7+nfmP6nYODAwMDAygnSr0Dh2Qjo4OHh4erLvmj0EkEnl5eYeGhsaMGcNwhWxvb++4ceOoVCrq2oyjqalJTExsuP+CEdvZMIdGow0MDKDxQCcXGo0GZ6zhAsOT+gHnFElJSfiWgNrE9Pf3f7OQcXkbyblQqVQymUyfPRqN1tfXhx3i+l6+eUEBAHAKib6+/g+nMlqpq6sTFRVl2BP+z8DchqKmpqaysnJUDm6xQPmPtnwsRs6vradoNJqgoGBTU9PPv0/8tgwMDBw8eBAA4OPjw+RlhcW/BcuGggWr5WPxj5KdnV1YWCguLr5hw4Z/Oy9/I+Hh4UuXLsXO1GDBgsXvA6vlY8GCBQsW/y1G89xOFixGAUwEXUdIeHg4upjhb4VAIODWbv9NfP36FastMBwkEunjx49NTU3/QJYgCEZptqqqys/Pr6qq6tcm0dPTExQUhJvF3dXVBdP9T0hu/gpYLR8AAJDJZFTNuaKi4tOnT+jU8Kqqqk+fPmVkZFRWVsIA2AcpPz8f7kRXtjFRygYANDU1kUgk5pmpra2FSwJGTnNz88gP+Wb19E0R5x/mu6rgn6/xfxgqlZqQkACXbP/roItnfhhUlfjvpqysbO/evX9rEr29vSQSydHRMSIi4puB29rajh07xlAd6YdTZx4Ais40NzfX1ta6ubktWbIE5+qAg0qlfu+lOXr06Pr169EV9wAAAoFw8eLFdevWEQiEETa03zyRUQ+r5QMAgDFjxhAIhGXLlrGzs0+cOPHChQsbNmyAa9InTZp08eJFUVHRCRMmmJubP3z4EDt4IyIioqKiUlhYWFhYuHr16rCwMObq+NeuXfP392eSExqN5uPjAxcqjZy2traysjK4/c17+pvVU1VVFW4J1wj5ZtLfVQX/fI3/w7S0tBw+fJhJIfyTFQe9Ds73cubMGfq19n8HCxcupNcE+YVERES8ePGCi4tr9uzZIwk/ceJEqHrzC1NnHubt27cTJkywtLTMzMzk5OSUl5ffsmULk/Dnz5//Lm2wjo6OkpISfn5+S0tLdGdUVBQUkDMxMZGTk/tmJCM5kVEPq+X7f0CZrnnz5vHw8CgrK/Py8h4+fBgAMGbMGFVVVVlZWX5+fjk5udmzZ2On6U+cOFFQUHDBggVbtmw5fPjwpUuXhIWFp06dOmvWLG1t7bt37zo7O0ODNABAX19fc3MzVueTHnZ2diYrtYdj7ty5e/bsASO7p79ZPRkYGCxduvR78zCSpL+rCv75Gv+HkZKSmjhx4nD/raurw4mg/k14enq+fPny7t278Gd0dPSNGzcePnx4+PBhCoUSHh6+bNmy+/fva2pqOjo6enl5aWpqxsTEAABu374dGRkJFdSqq6vV1dWzs7O/fPkiLy/v6+urq6vr4uLCMEUTE5OTJ082NTUpKSnBq2lmZtbS0vL161cPD4/Dhw9fvHgRG76ysvLMmTMvXrzYs2cP1HijUqkODg5KSkonTpwAADx9+vTDhw9Q9hYbSXNz86ZNm65cubJ06dKcnBw5ObnU1FQajWZmZlZdXR0dHe3t7b1582aoswOhUqleXl6ZmZlQlKSoqOjChQsqKipQY+zp06eurq7r16+vqKigPy+ophYbG7t9+/a+vr4nT55s2rTp2LFj165de/z48cOHD3V1da2srL58+YI7UzT/uNRRsOnGxcVBpdmsrKy4uLjCwsJ3794RicR79+45OTlt3ryZRCLV1dXdu3fv2bNnBw8ebGlpCQ4OjoiIaGxsxBYUlgcPHnh6etrZ2bm6uiII4ufn19ra+urVK2ytEh4eXlRU5Ofnt3Hjxlu3blGp1DNnzpiYmNjb26uqqnZ1dWHzQCQS4YlkZGSYmpp6eXk1NjauX78+MjJyJHfIqGHUtnyDg4Pnzp17/vy5mZkZVLIY+UXl5OQMCAiIiYlxd3eHP0eSYlZW1pQpU9CfWKVsuMfPz8/FxWVwcPDjx4/0h5PJZAcHh4CAAD8/P7gH9/w/ePDg/fv3Dg4OTU1N69atO3funLGxsbKycltbm4uLi7GxMfbhHK6qguCqJ2xCvb29lpaW9+/f7+zs1NLSunLlirm5+e7duwEAXV1dJ0+edHJyWrly5ePHj3ERoo+TjY2NmZmZpqZmYmIitrphUgXjzhRX4+PANgBDQ0O4hxwXODY21tPT8/r161BcEUtMTExkZKSdnV1FRYW7u/vevXsHBgaOHj164cIFGODFixdaWloaGhoEAgEbODAwMCcnJzo6ur6+/vLlyyEhIYcOHaKvbhhmfuTgBF1JJBJOvFRZWbmlpcXa2vratWtBQUEHDhw4fvw4VBUZN26chobG/PnzQ0NDpaWl4YpDBQUFEomko6Pj7u4+XKeCubl5cXGxpKSkuro67NWfN2+euLj4mTNnxo4du2DBghs3bmA1YqytrQ0NDXfu3Ll06dKTJ0/CfJ44cSIpKenZs2f5+fmoRCoAABsJFxcXKtC6aNGiTZs2FRUVsbOzz5kzR0JCAivliqbFwcGB1ZKVlpa2t7dfsGBBbGwsTrqW/rxIJJKCgoK6ujqFQsnJycFKrV6+fPnAgQN79uwhEAgKCgq4M0Xzj0sdgkt37dq1UGl28eLFS5cunTlzppaWlrOzM4FAgFoHERERR44c2bVrl7GxsYqKiqio6JQpUzQ1NaWkpLAFhQJb0IMHD165csXBwaGkpERbW1tYWNjQ0BCtVcaNG6eoqDhnzpzt27ejloHTpk0TFha+cOHCrFmzEhISsHmIjo6GJ6KkpDRt2rSBgQEpKSkpKamhoaGR3CGjhlHb8qHq79bW1kZGRjNnzvyuizphwoSQkJDTp09DaTHmvHz58vDhw/39/Wh7gFPKBgAgCNLa2iouLm5pacnws8/b25ufn9/AwAAK6g8ODmKf/9bW1qdPn6qqqm7ZskVSUpKfn19VVfXZs2fTp0/39PRctGhRV1cX9uEcrqqCYKunzMxMbEKoiDO9DvXLly+FhYVtbW17enrWrl2LjRBNWklJCauCja1uhquCcWfKXMIb1wD4+PjgHnJceKyyNu5fISEhHR0dJ06c4OXlXbRoUXd3Nx8f38KFC/v6+mAATU3Nd+/e8fDwQO1vNLCSktLkyZPXrVuHla5mZ2dnnpPvBSfoSi9eykRodPny5X5+fu3t7aiqKnqZuLm56WWLUaDhRmNjIwcHh7+/f3Z29uLFiwGd1DIaHifTDHPLy8vLzc2toqJSV1eHlUjFRYLVO7W0tPT29v706ZOKigpD/Wh6mEvX4sApVmOTnjNnTmRkJLyB6TOJzT8930wX0IlH5+XlCQoKAgCMjY2xXS8ME0KLFwCwfPnybzoAowqxOHHgbyqGo+DukCNHjujp6f1bgw5/K6O25RtO/Z3JY49DUVHRxcXFwMAANWQYjh07dri5uXl4eKC+CvTq+LGxse3t7bdv3+7q6oqNjaXvk0lMTIR99LCmwz3/EyZMMDIymjVrFpx7wsHBMW7cOACAqqpqc3Mzfe/lcFUVBFs9ZWZm4ioaXEUJ/tKhXrBgwYcPHwgEgry8/IQJE4YrjeEE8uljhtcCd6bMJbzpGwAmOvSATlkbC9ZMgOGJwHJTV1evq6tjGJi5FnlaWho0N2D4if9NcIKuzMVLsWuTqFSqoaGhoaGhhIQEwkj1GN1mqMi6d+9eExOTHTt2yMvLP3z4UElJCdBJLaOBmcg09/b2Lly4ECuROlwkAIDp06cLCQm9evVqwYIFDPWjIcOJdDOXrgV0itXYf504caKlpWXatGlw5AyXSWz+0dRR6NPFZQzQiUePHTsWztv68uVLV1cXGic2IfriBQB0dXUtWLAAdylxF5T+vwzzgCbKwcEBZzNAnTP6CJ2dnUNCQmDX1yhj1LZ8w6m/M1y/2NnZiSDIlClTYM2FTr/cvXu3kZERvGnA/ypWAwCePn0KGMlAM1THj4qKunfv3okTJ86dO2dgYHD//n1cHnA1He75r6mpOXjwYEREhKWlJfYU6urqli5dit7x6D3NpJbB0tvbu2LFClxFM9zzs3DhQh0dncTExOfPn9M3S/T1An11w7AKxp2psLAwEwlvJg0Awys7nLI2AABrJjBcFQAAaG1tXb58OTYweqbMtciXL18OzQ3Q7/7vQkdHh5+f39jY+N69exQKpbS09PHjx46OjjExMTQazdjYOCEhob29vays7OPHj5WVlRUVFUlJSdXV1e3t7ZycnFZWVvX19ZGRkcnJyVCVuLy8vLW1NTU1NSEhoa2traKigl45GgCwe/duCQkJBQWFffv2oT17OKllNDBOphkAICcn5+vr6+/vb2pqKiUldeXKlZKSkjVr1syZMwcbCZVK/fLlS2RkJDrX18rKau7cuQAAhvrRkCVLlvj5+YWFhX369Ck9Pb25uTkjIyMjI0NHR4dAICgqKlpaWmKntNTU1JSUlMTHx48ZMwarWB0VFYXKSV++fPn169eXL192dnamP1Ns/mHq2PnGurq62HQ/ffrU2NgYGBjY3d394cOHjIyM6upqnFr3zZs3TU1N161bl5iYKCQktHz58qtXrxYXF2MTQuNXVlbeuXOno6NjcHCwgYGBnJzcmzdvqqqqsrKy0DAkEikjIyMrK6u5uTkrK+vTp0/9/f04ieqjR49i84CeyPr16319fe3t7QcHB7Ozs+nvkB+4b/8YfkLz87emr68Pur86OzvHx8eXlZXx8fF9+PAhLCxs7Nix5eXl2MCrVq06cuSIvb09giA5OTnz5s1DncQpFMq2bdsQBMnLy5ORkVFTU7t///61a9dWrVrl7e39/v178JcMIAzPUCn7yZMnq1evbm9vRxCETCbb2Njw8PAkJCRg81BRUTFr1qzjx4/v379/1apVra2tWCnnwsJCKyurjIyMa9euIQiir69vZWX19u1bW1vboaGhS5cuycvLV1VVoTLTISEhEhIShoaGjx49oi+cFStWPH/+3M/P7+XLl8j/akZ3dHRAEefa2lp6Heq5c+fq6emZmZkVFBTg4oRJv337FlXBrqiomDx58sWLFw0MDHbu3FlSUiIjI3P16lX6a4HNAIlEYi7hjVWvJpFIZmZmWlpaDQ0Nq1atOnz4ME7yGKusnZycjP3X8ePHIyIi3Nzc8vPzyWTyggULoOWhpqYmgUA4fvy4nZ3du3fvHB0dcYG7urpkZWXd3Nyw0tWurq7Mc/Jj9PT00Gg0VMWYSqWidxoTqFQq9LJnolmMDK8cDWWyaTQaVtQYJ7WMyyT2Z19fHyq0TaVSu7u7RxIJ8r/C01j9aCxMRLqZlwxWsRrd2dLS8vz588bGxoKCgmfPnn3+/BmXSVz+Gab+zSuCE48mk8nYnzBOXEK4nDMptBGCywN6IjDmb8pkjz5GuYZLX1/fSAzSKBRKR0eHuLg4w/8ODQ0x9Or85SAIQiAQeHh40OmjqJQzvEwEAgF2choYGFhZWS1cuBD+xAJlpsFfX64MPXVhVLy8vOgL9Ug0o58/f66mpkYikTo6OtLT048cOTJc0tg93NzcCIKQSCTcv3DgMsBcwvu71KuHU9am0WhEIpGbmxsmgSAIhUKBnoUwwNDQEI1Gg1+3uMDomY5Euvq35V9Xjv7XCQsLu337tpWV1eDgIJlM3r59+z/zpLP41xnlLd9oZePGjfDr5J9MdOnSpQYGBjNnzmxqapo/fz4cAWLB4o+mtLS0oKBAXl5+/vz5/3ZeWPxzsFq+P4/6+voPHz7w8fFpamr+k44H/f39qampFApFWVn5Zyx+WLBgweLfhdXysWDBggWL/xZ/2MgECxYsWLBg8ZOwbDNZsPhNCQ8PX7t2LfOZQT8MjUZLS0uTk5PDrlL446BSqWFhYVD84VfR1NQkKSlJv39wcDAvLw8AgCDIsmXLmOxPTU3t7+9ftWoVOlWtvb0dt0hAWloaTqkrLi6eNWvWSDKGux+IRGJcXJyWlhbDwBkZGYODg1JSUuLi4vTz4LB0dXWVlpbCbREREVlZ2T9urtYPMPrPkEqlvnnzhn4/HC3DbvxaCgoKoMQfuvFNmpqaEhMTsXt+lb8M6v8wEh+G3t7et2/f/nyiLH6Sv8ljAfoDdHR0WFpa/k2mHCPk54W/h4aGcnJyfklmAAA0Gs3NzW24sXNfX18XFxcXF5fu7m4m+21sbDg4OGRkZOBqKBhGUFDw6dOn1tbWHBwc7OzspaWlAQEBBALB0dHRwsJihNlD7wdYbtXV1du3b2cYcvfu3WVlZUJCQqampiUlJcyjHT9+fEpKyubNmxEECQ8PnzNnzjfFYkYD/8pain8GuMyov7/fzs4O9y8qlerh4aGuro5u/PKkTU1NHRwc0I2RHHX37l19fX3snmvXrsFVgD8DlUq9fPmyiYkJgiCvX79OT09nHj43N5eXl/cnE2Xx23L69OnCwkIEQbS0tL55M/x9vH//3s3N7d9KnSFeXl7Y9aNRUVFNTU1wm0KhbN++PT8/H3cIbn9PT4+srCzc1tfXf//+PRrSxcVFS0sLPaq+vh5BkIKCglWrVn1XJoeGhrZs2QK3RUVFGYaRlJQsLi5GEKSpqSkiIgLuDAgIuHHjhp+f3/Hjx8PCwioqKrS1teGCztjY2JkzZ8Jgu3btOnXq1Hdl6U9k1H7zoYL6fHx89vb2uP+ys7PDfgl049ciICAAtRjQjZFA707wS/xlsP4PI/FhmD9//j85ZZQFQ5gIfGdlZTk4OJibmxsaGkJzBshItLyx/gAAgKioKDMzM0NDQwAAzlUAjfbZs2f+/v5mZmawAw0rBN/d3W1oaHjq1KlTp04pKyuHh4fb2tpu3boVAODt7a2urn7lypWFCxe6u7tDKZyioqL8/HwZGZmuri6G6uqDg4Oo4jkq3NXU1OTt7Q2zQaFQbt++HRgYaGBg0NPTk5iYOG/ePFzmiUQi7txxRw13pj4+PjA2SHx8PNRBhXng4eFZu3athoYG6pNAv7+7uxv14Jw4cWJxcTH2msKl9J2dnU5OTtAMhH6pcV5e3pQpUxoaGpydndesWdPV1ZWXl3fp0iX0figoKIiLi3v16hWUC/D29tbT08Ophm7ZskVDQyMmJkZCQmLDhg0AgNjY2ODg4FOnThkZGZ05c2ZgYEBWVra3t1dZWRl7YH5+flZWFr2yz+hj1LZ8qKA++mxglfXpw1MoFFNT03379g0NDRUWFmJtaLAPXnl5+Zw5c9zd3fft27d3796XL1/u2bPnxo0bgKktAATrSIB1acH2aLW0tBw9enTmzJmvXr1C73Xwv2YlNBrtypUrhoaG169fX7Jkib+//+3bt9esWdPQ0AD+1zYF5/+A+jAAABISEp48eXLlyhVPT0+chwvDwsFWHLjqLzs7e/r06fAJxNV3ubm5M2fOBAD4+Pioq6uD/3VI+OGL+1+AiceCo6Pj2rVrHRwcsrKyNDQ0YPgRanmLi4uj/gAAABUVFQ8Pj+Li4sbGRpyrAAyfk5Pz+fNnIyOjU6dO9ff344Tgx48fLyEhMWvWLEdHRzExMTY2Nicnp+bm5vr6eiUlJQKBYGdnFxISYmNjIyQkJCgoSKFQ4MMI38bo1dUHBgZQxXMVFRWYh6SkpMzMzF27ds2bN+/BgweysrL6+vrm5uZdXV0rV66E7kg4OwLcueOOYnimTU1N6LBcU1NTQ0NDb29vS0tLQ0NDd3f3pEmTHj9+/PXrVy4uLqyLCG7/5MmT582b9/jx48LCwry8PJxbXnV1NewXZdIDOX/+/NmzZzc2Nh48eLCoqGjs2LH19fV79+5F7wdFRUVeXl5DQ0NOTk4ajbZr1y4vLy8PDw9sJPfu3TM3N9fR0TEzM4OKtc+fP9fX14f/FRUVha6BbGxsaNPb2dnp6Oh4//79SZMm0Sv9jj5G7QwXJSWl1NTUdevWAQDgswGV9VeuXGltbY3QreXg5OQ8d+7cxo0bx4wZU1xcfPDgQfRf8MFzd3cvKSmRl5fn4ODYuHGjubm5oKCgq6vrli1b5s+ff/r0aWgL0NPTY2lpuX//flz80JFg//790JEgOzsburTY2dlhg40ZM+bevXvZ2dnr1q1rb2+H9zo0K4GjlZKSklDcD1qj8fDw5OTkODk5dXZ2xsfHy8vLx8TEaGpqQtuUadOmQf8HNja28PBw1IeBRCI5ODjExsYODg4GBwejHi4PHz7Mycmh15mEFYeenp6wsHBXV1dgYCCs/hQVFdXV1RsaGrq6urZs2WJqapqfn+/t7W1nZ7d79+7Zs2d3d3f39PQAAFavXg11EUNCQlasWHHixAnsizMLhuAEvjk4OOAAj5KSUkhIiK2tLfbznV7Le8mSJUy0vCHw415QUBA6Hjg7O0+cOHHnzp1ogLS0NDgFRkZGRkZGxtvbW1VVFWCE4IezjODh4YETK6SlpYWFhZkM6UF1dQAAfOiwRgoQbW3tN2/eLFmy5PXr16mpqfAzZc2aNdhSwmX+6dOn2HPHHcXwTLu6ulDBo9u3b0P1y9bWVlFR0VWrVsHPYiEhodOnT+MsurD72djYYmNjw8PDW1tbW1pali9fjg0mKysLlY+Yj72Zmpo+efJk3bp1qqqqYWFhLS0tmzZtApj7AYWDgwNKPmFdsaAc0rlz57Zs2aKurj5nzhxLS8vu7m7s9yW9NJKwsPCpU6cAAHfu3DEwMCgoKOjr6zM2NgYAqKurj3ww8k9h1H7zYYF3DE5Znz6YjIwMtDRrbGyEdgEQbW3tvr4+tB5BH/Vx48Zxc3Pz8PBAGyAmtgCAznsBMHrCwV/+AIsWLQIA9Pf3w5zTm5UMV93gbFNw/g9oUZSVlcHXWx4enh07dtCbKuBITU2Fnwhr1qyRlpam98Hg5OSEOUdNJIar777pkMACBQ5IYH/CDT09PRkZmZKSEqz10si1vJk4HmAV/WFgSUnJ4OBgaJiQlpY2nBA8w4Qgg4ODAgICIiIiqCz4wMAAgiAjV1evrKx8+fKlra3t0aNHJSUlAwIC4FHQFwlhZEeAyxLuKIaB5eXl0Q6PW7duubu7b9iw4fz58+7u7rDZg7S3t8PxEZzNBbqfn5/f0NDwy5cvlpaWWEFErKw57AgZDh0dnQ8fPpSXlzs4ONy4cWPSpEnoucAY0KQRRkLw5eXlISEhAAAFBQV9fX3YFaSmphYfH4+Gwfbxgv+tD8eNGwdn1Y0bNw5Kro++Zg+M4pYPax0Ab4vhlPWxnDp1yt7eHjenGfvg4cJj42FiCwDoHAno3WGwwD4ffn5+eE/Tm5UMlwecbQrO/wFg6rikpKT29nYAQGpqKhMPFwiu4mBe/UHQ+g6miHogYE0PAF31wQKlvLx8OI+Fhw8f+vv7Ozs7nz9/HjVfhD6CqJnDrl27cIL96JWF/gAfP34sLS2Ni4urqqqqrKxMTU3FuQrAwDo6OqKionPnzt21a9fkyZOtra0HBwcDAgK8vLw8PT27urrgyFBDQ0NJSUlcXFxdXV1lZeWnT5/gKbx69crZ2dnNzQ0AsHXrVhsbm9u3b4uLi6ekpKCOAVh7BDjPMC0tDdsEJicne3l5iYmJGRgY2NjYhIaGqqioXLhwQUFBIS4urqOjIzk5GZv5OXPm4M79xIkT2KMYnikXF5esrOxwU6nv3Lmze/fut2/fdnV1HThwAAAwY8aMhoYG+v11dXVOTk5sbGyHDx9GD29ubo6MjMzLy8OaEBGJxNDQ0Orq6vz8fGxaY8aM2bp167p162bOnCktLb1x40bs/QAAUFBQOHfuXHx8fEdHR0pKSmxsbHd395cvX+DhcnJy586du3v37vPnzwsLC6H7koWFBYVCuXHjxvv37wMDA1tbW8vLy6uqqiIjIzs6OgIDA+HIor29fWBgoI+Pz4/csn8Wv3bCzO8DKqgfGxsLAEhKSsIq67u5uTk6OsrKylZUVKAb8MAtW7bgFO49PDw8PT3j4uKePn1aVlYmKioaEBBQVFTEw8MTHh6ekZHBycmZn5+PtQWIiooyNDTU1dWtra2FG11dXVhHgvr6erSPFE2ovLwc9m84OztnZ2eXlZVBcwMEQWxtbW/cuBEUFOTq6kqhUE6cOKGmptbQ0AAdTxoaGgwMDIyNjbu7u1etWrVw4UILC4uGhgac/8PXr1+hD0NPTw90QtDV1U1ISMCZKiQmJnJwcMTHx6MZq6mpmTZt2ooVK44ePUomk3E+GNnZ2VxcXCEhIQiCFBYWSktL+/v737hxA5pR7N+/f/v27Xfu3Jk6dWplZSXW9KCqqgqdUcZihNBotHv37rW3t5eUlCQkJPj6+mL/O0Izh+EcD3CK/ig4N4be3t5vOlEUFhaqq6v39fWhvlcIgkAHCdQZAOsYwMSOgEqlkkgkbMZw+WGeeYZHMQxcV1eHmrTQ09DQAPMPgV+u9PsrKyuxp/xjoGYXDIuFQqFgbTTo6e/vJ5PJdXV1uMs0NDTU29v7k3kbHYxm9TJ664BvKusjCHL//n2cCwGckUUmk3Fi//QMZwuAMhJLhJ6eHoY9gYODg+zs7MN5L2AhEAiojwFC5/+AMjAwwMXFhZoPfNNUobe3V0BAAP3J0AejqKjo6NGjISEhfHx8aAkPDQ1xcXFB71ac6QEssW+eEQuU1tbWFStWnD9/XkBAoL29ff369VOmTPm3M8WAjIwMKysr+PH3BwG/nBQUFP7tjLD4exnNLd/3cubMma9fv96+fVtGRubfzsufyh9a3/1ZtLS0pKenCwkJKSsrM3Rx+h0IDw9vb2+fP38+rnOeBYvfAVbL9//59OnT+PHjmQ8+s2AOq75jwYLF7w+r5WPBggULFv8tftOuEhYsWPwDdHd3p6enQ5kPJpDJ5KioKFlZWXptZRKJVFtbKy8vDwBoaWmprq6G+0VFRdF13L9Erxl8j2RzTU0NnB3KwcEhKSkJBVN+EiqVmpWVBbfHjRsnIyPzvUPUw2lhjwT6K9XU1FRWVrZy5Up0T1ZWlpiY2NSpU38sCSZpjT5G7aoGFixYfJPc3Fxzc3PmYRAEuX79emdn5759+3Di73AR+osXL+DPq1evPn782NfX18HBISgoCA32S/SawfdINouLi9+8edPe3p5AINy6dWv16tUEAmHkCTGEg4ODg4Nj2bJlcOGsmpoalEYaOc3NzXCV0Q9Af6VevXr14MED7J6qqiqGCyJ/Pq3RB+ubjwWL35G2trZjx47NmjVr+vTpJSUlWlpaY8eONTMz27FjR11dnbKy8rp162xsbGRlZePi4tTV1YuLi0+fPv29Hzf0UrGQsrKy9vZ2KEHS3d1tY2PDx8eHIMiXL1+wlkBSUlKqqqodHR3w59GjR6EExNmzZ6E+FoSTk3P27Nl1dXWLFy8GACxatKi5uZmfn3/Tpk2RkZEjz+2ZM2cAACQSae/evUFBQQoKCsPNQ+bh4VFQUGhvb1+1apWamtrUqVOTk5M3bNjw6tWrFy9eGBsbk8nksrKys2fPXr9+vbm5ed68ec3NzcePH8/Ly3v79q2QkFBVVZWiomJvb6+trS0a7cKFCwEAS5cuFRcXJ5PJzs7OwzW9DFm4cOGVK1dmz54tKCg48qMg9FdqyZIluHbUwMDge6MdYVqjD1bLx4LF74iYmBgAYMWKFWpqagMDAx0dHZMnT66pqdm1a1dvb+/ChQvr6+uh/vjz5891dHQ2b96MSjcAAOrr6x89ejRv3rzIyEg3N7fz5883NTXJysrGxMSEhoYKCQl5enry8/NXVlYyTL2ysrKkpAS2fEJCQgCAvr6+mJgYd3d3XEjsshZU+ai0tBT2f2KBq4N6enoePnwI2zB6vWYAQF5enra2dlpaWmBg4Nu3b4OCgmpra0NCQkxMTExNTR0dHQEAULJ569atCIJ4e3uHh4erqqoeP36cPjYymQyX9q9YsQIAoKio6OHhARUsv379ysnJKSMjMzAwcOjQIWtr65cvXyoqKkI1gKqqKjMzs7S0NIblU19fHxYWBuPBFvXDhw9jYmKoVGpKSoqJiYmUlNTDhw9JJFJKSkpAQAAXF9eyZctu3brl4ODwS64UlPmNjIy8fPmypqbm2bNnp0+fbm1tHR0dnZOTIyQklJeXd//+/aioqCtXruzYsSMiImLVqlXCwsLBwcHHjx/X0NCIjo6ura0NCwszNTXV1tZmfleMJlgtHwsWvylsbGzFxcXs7OwpKSlQQp2NjY1IJAYHB8PRHaxuJ5SXQ8Gq1LKzs0+bNm1oaOjChQsNDQ0JCQnTp0+PiIgICQlpb2/HCax3dnYSicS2trbu7u6GhgZOTk5xcXEqlRocHFxYWHjp0qXbt28zz3ZeXt7cuXPp90O95p6eHuZi5VjJ5mvXrqGSzVOnTsVJNgMAoFqNrq6usrIyfcuXl5fn5OSUlpamqakJNYzY2Nh6e3s/ffpUUVExceLEGTNmsLGx0Wi0hoaG9PR0U1NTrFcDAAAnvAlxdXUdGBhoaWmBy/5wgsBYZVpnZ2cKhSIjIwOlsTdv3iwnJ3fr1i1sbD98pQCdzC+qymthYVFSUsLBwbFt27ZHjx5t27atpaXF2tpaVVX10KFDGRkZ0tLSr169UlVVxYoJy8rKMklrlMFq+Viw+H3h5+cXEhLCCiM8ePAgMjIS52BMj6ysbGpqKqpSCzWvwV8KzsnJyfD7jH6ORmBg4OfPn2tra7u7u5ubm6WkpOzs7Dg4OPbs2aOnp6ekpPTNli8gIMDIyIhhlkai1wx+WrIZZf78+efOnQMA6OjonD9/Ho6KcXJyCgoKCgkJoUoLZWVl2traV69exTV7EC8vr/fv3wMAvL29YS+lhYWFuLh4Tk7OsmXLampqcEVta2urp6f3/v17T09PemlsXl7e1tZWXLH82JUCw8j80muXGxkZMZT5RcWEYfYePnyIS+vIkSO1tbUSEhL03/p/OqwZLixY/L5MmTJlzpw51tbWhYWFAAAEQWxtbZcsWXL9+nU0DMOFScOp1MJtekFXlIMHD7q7u1tbW2/bts3d3R3rJcLDwzN79mzwLbXV3Nxcek/Kkes1g5+WbIYwVGEGAPDx8c2YMUNTU1NeXr6zsxNBkBkzZgQFBdna2qLTQ7CxHThwAAo3i4iI4OKECu+4osYq09JLY3d1dU2bNg2bzx++Uig4mV8m2uW4U8OJCQsLC+PScnZ2DgkJGX3NHmB987Fg8XvS2tr65csXKChcUFAgICAwZsyY5ubm+Pj4mzdvrlmzRkRExNzcvLS0tKamJiYmxsTEBHu4kJCQq6vrmDFjREREPD09CwoK6uvroYIzkUi8e/euu7u7sbHx9OnTKRRKfn4+w88dSEtLy4EDB4yMjHp7e6EV5YEDB9atW2dkZNTa2hofH0+hUOrq6iZPngwAyM/Pp2/2oF5zQUFBdnY2WhFj9ZpxqTOXbF68eDGUbFZXV4eSzX19fVCyGVUdKy0tTUhI6O/vv3//fn19PYFAuHnzJgAgISGhrq4uKCiISCQ+ffr03bt3KSkp1dXVQkJC169f19PTe/XqlaSkZHx8fGFhYXNzM3RoglCpVG9vbwCAs7OzoKBgUlKSp6fn5MmTsUXt5eVVXl6uoaGhoKCgoqLCw8OzYcOGuLg4RUXFK1euAACys7NRn7yfvFISEhItLS3v3r2rrKz09PTs7OxMS0vj5OQ8ePAg1C7ftm0bjUYzNjZ+9+5de3t7WVlZUlJSZWVlRUVFUlJSdXU1Gxvbjh07Fi5cqK6uvmfPHl1d3YcPH47wrvjTYa1kZ8FidPJNldre3t5x48ZRqdRvSqBBRwjUsBSqvDKcnwI/rYabcjlyqFQq7K8jkUj0WrVUKpVGo9FL0f5bYIsa/kSVaREEIRKJsL+aRqOdO3cO+71Of/j3XqnhZH5pNNrAwACq38sEnJjwyO+KPxpWy8eCBQsW/wQhISFr1qxhOVP+DrBaPhYsWLBg8d9iNH/PQiIjI3l5edXU1NA9jY2NAQEBU6dOxa7J/SaJiYklJSXi4uJwhayysvKvz+t3UlhYmJGRwcPDM27cuOnTp8+YMQP7XwKB8OTJk3379n3TF2kkZGVlFRUVAQDY2NgmTZo0b948hoa038uTJ09WrlyJylyxYMGCxT/B32X893uQm5vLy8t76dIldE9jY+OSJUugpdmtW7dGEgmZTN66dStcKUwikY4cObJ48eK/I7cNDQ13794dYeDc3NwFCxZQKJS+vj4NDY3IyEhcgJSUFDY2trS0tF+SNyqVumzZMgBAeXl5QkKCpKTklStXfjJOEonEz89/48aNX5JDFixYsBgho3lVQ19f37t374hEInZnQUFBcnLyly9fxMXFv7koCnLr1q2goCA7OztOTk64dBRd30oikVpaWuA29JUGALS2tiIIAgBob29Hgw0NDdFotLa2NjghG+5BMG7U3d3durq6FRUVFAoFAEAmk+EcaxiGRCIBALCLlqKjo9vb2xsaGvj5+e/fv48OZbe3t0Mtj8WLFzc1NSkpKcH9jY2NcKYymk9UdAqCVfxDU0dhZ2eHH3mioqJqamonTpyws7MLDg6G/+3t7YVSiszLAQBApVLb2trgNhsbW0VFxYkTJygUytDQEPz78/qKLFiwYMGc0dzyeXh44KZ6AwDWrVvHxcXFx8cnJSXFUKCBnlevXgEA4EomyN27dwEA/v7+27Zti4yMXLNmTU9Pj5GREQ8Pz/Xr15cvX75q1aqbN28qKytra2sDAHx8fHh4eIyNjW1sbGRkZKqqqhwdHXl4eFJTUy9dusTDwwO7UvPy8mDDnJycvGbNmvDw8K1btxIIhNWrV0tLS2/ZskVUVLSmpgbmQUVFpb6+fvbs2WfPnhUTE1uxYkV/f/+GDRuCg4MXLFiQlJR09+5dCQmJ+Pj4lpYWJSWltLS0FStWtLe3m5iY8PDwnD9/fvny5VDn/suXL6qqqpGRkbKysl1dXdjUhysTqCECW77jx49fu3bt4sWLJ0+epFAoTMrB3d399OnTL168UFBQqKioyMrKEhcXv3z5cklJCT8//8aNG01NTSUkJD5+/DjyqzyKCQ8PR1ehDUd9ff2HDx++N2YikRgeHg5fUEZCU1MTw/1tbW3BwcFlZWW4/UNDQ1FRURkZGbj9ODGw9vb2T/8L+h4Ju9ZHCK6giETiu3fvmITPyMhITEwsLy/v6+tjHjOVSkXzVlxcjHuN/rshkUixsbHoOyVzhoaGcnJy6Pf39PTExcWhb59tbW3oqydKbm4u9ipkZ2czCTxK+Fe/OP9GwsLC8vPz4fXG9nZC8vPzly9fDj+2vomoqCg7OztuZ19f39ixY+/fv48gCC8v7/Xr1+FSp66urqtXrwIA2tranJycAAAdHR1wGbKvry+ZTObh4Tly5AgU6k1OTo6NjQUAJCQkIAjCz89/9OhRBEGWLl168OBBWN1kZWWZmZlxcXH19fWFh4eTyWQ0Dw8ePBg3bhwAQFpaur6+3sHBQVJSEkGQ69evx8TEJCcnAwBiY2NPnTo1e/bs9vZ2JSWlW7du3bt3DwDQ3NwMxTg6OjrU1dW3b99Oo9FMTEzq6upwqaPJwWayu7sbQRAow7F+/fr4+HgAQH5+fnp6OgAgLS2NSTlYWlra29vDJ/nBgwcIgnBycp4/fx5BEEFBQTMzMwRBBAQEoAoUi2vXrrW3tzMJQKVSPTw81NXVcft7enqYx1xaWioqKgovJXOoVKqrqyvDCMvLyw8cOGBhYSEsLPzhwwd0P5lMNjQ0bGlpef369cWLF9H9ERERy5Ytw8ZAJpPNzc2VlJQyMzMzMjKePXv24MEDGo0WFRXFycn5zbyhoAUF81lcXIwu66Zn165dvr6++fn5ampqGRkZ34w8MzMTAPD27dunT58uWbLk5cuXI8/YT1JdXT1lypTq6uqRBLa1tT19+jRup5+f35kzZwgEAvxZUFAwc+ZMWNtgqamp4eTk9Pb2zsrKSkpKMjQ0ZBJ4dDBqZ7jcv3+/q6sL9vt5enru2rULnUYxODjo6uoaHh5OIpEGBwcFBASYR6WgoJCUlNTR0SEiIoLurKqqQtfBjB07trCwEGoVjhkzBuoDjRkzBq5DQnWEOTk5oXJSbW0t8xRLS0t5eHhSUlICAwOhoZeAgAD8KkLDfP782dLScufOnVeuXLl3796zZ89ycnJIJBKCIFDjEdVxLy0tbWlpSUhIsLW1lZeXT0hIgJmB+SSTyTk5OatWrWJjY3vy5AnD1OmBfaELFiwoKCiAJQDXIcE2frhyuHDhwu3bt2HTS68DAtcPcXFxwa5dFlDWmQns7OzLli0LDAzE7kStDJgcOG3atBFOUHr8+LGKigr6jGA9HOACagDAhAkT3r59u3r1ahgGzimbMGGCgYHB8ePHjx49Kigo2NTU1NjYiIucoYcDGxvbunXrvmvq/8g9HAAAcXFxZ8+eVVBQ8Pf3h7oqAIDAwMCKioqpU6dmZmauXr161qxZR48e1dfXNzY2ZujP8M84aUydOnXChAn0+ykUyuvXr3fs2IHuSUxMHDduHLRwQomKigoICMDeCXPmzMH2XaFMmTJFUFBwwYIFUGcAToMYLvDoYNT2du7du3fXrl2wv27evHmcnJyHDx8uLi5GEOTQoUMSEhLe3t5GRkYMV+PiOHnyJAAgPDwc3RMUFCQjIzN27FjY+0EkEul1K+ih0WhkMrm7u3vWrFmwMUD1ipC/1pbQaLS+vj45Obmurq4tW7ZoaWnBFgWhW3zy5MmT9vZ2ISGhO3fuLFy4kIODY9q0aR0dHfBhxnbLyMnJdXd3q6iobN26lWGFMm3atMTERNjeEIlE+tTpeffunaio6LFjx+CJE4nEgYEB8L99wvRs2rQpLy/P0tLym2XForq6Wl1dHfY7PXv2zN/f38zMLCMjo76+/vLlyyEhIYcOHWJ4YEFBAbQyIJFItra2sbGx27dv7+vra2pq8vb2hlGhgb28vJycnFpaWmJiYiIjI+3s7HBy0j4+Plghj8rKSrQPU0NDA27w8fHB5gHS2dlZV1cHt0VERCoqKhAECQoKwvoWYYEeDp2dnU5OTmjbQP9g9vT0LFmyJCQkJD09fcqUKeXl5b29vfv27auoqIAFhZ44hUJBEMTb21tPT+/OnTu4eLZs2aKhoRETEyMhIQHNV2NjY4ODg0+dOmVkZHTmzJmBgQFZWdne3l7c/G2sPwPqpKGvr3/8+HEREZFp06ZBJw0LC4t9+/YBAAwMDA4fPtzY2Kijo3PmzBk4fo+eMvbSvH79evXq1S4uLitXroQvrImJiQ8ePPD29q6qqqIvMSqVGhAQgP7s6OhoaGigVwm3s7OTlJQ8efIklJ4ZrmBRKBQKmUx+8+YN2rk9kurxD2XUfvNB36yWlpYLFy4sW7asv7/f09Nz0aJFjx49evbsGQwzZ84c2FvIHG1tbR8fnwsXLlRXV0+cOLGystLIyIifn9/DwyMwMFBAQEBJSenQoUO7du0CAGRkZMB+v/T0dFhHfPr0CX5uvn371tfXV1pa2srKSkBAYMqUKXZ2djNmzBg7dmxaWtqqVatUVFRCQkJWrlx58+ZNXV3dFStWrFq16vjx49nZ2Z2dnVFRUevXr0dzNTQ0ZGxsfPLkycbGRmFhYXNz88HBwejoaB0dHUNDw127dsF+yNTU1OPHj797905ZWVldXf38+fNwXk96ejocdPn06dPdu3c3b96srKysq6t74MABbOqXLl2CyWVkZHz+/BkAcPPmTT4+vsbGxszMTHFxcXFxcQsLC19fXxKJdOLEicWLF0MHFoblICgomJaW5uTkJCgoGBsbu3DhQgqFkpGRkZWV1dfX9/nz59zc3J6envz8fKhq8YvuhT8SaWlp+NqRk5Pz+fPnu3fvLl26tLq6GucMQH8gamUwODiooKCgrq7+8OHDnJyclpaWzMxMd3d3VDA6KSlJRkbmwIEDAACswwAaVVNTE6qTwtDDAQBAoVBqa2uPHj2KHqWhoXH69OmMjIzBwcGWlhYZGRk/P79t27YNV42O0MNh/Pjx+vr65eXlenp6kyZN6u7uFhISWr9+vZyc3Hd5ONy7d09SUlJHR2fPnj0uLi6cnJzPnz9HFcVERUVhC83GxobNMM6fAfyckwaJRMJemsWLF/f09FhaWoqIiLx+/Xrp0qUWFhZ5eXns7OywgwSlv7+/u7t7cHCQSCQ2NDQAACQlJX19fa2trUNDQ7EhiURiWVlZVFQUPz//ihUrBAUFcapp9Lx+/RpqZ586dYp5yNHAv9fR+k/T19f3M4fTaLTKykpcn/vg4GBzc/M3j4V9gH5+fq2trTQaDe6kUChtbW2Dg4Pwyw+XyaGhoba2NiZxwv9WVlbW1dVh9zc2NmIjRDPf2NjIJDYymdzU1IT+/GbqOLq6ukYyaESj0VpaWhAE6e7uJhKJI4//v8mGDRuysrJcXFywCz/IZPKTJ0/i4uImTZpEoVByc3Ppx/kkJCTgxtu3b0NDQ3V1dWNjYwcGBrZv375o0aLy8nIEQWbOnLlixYqoqCgYsqqqasGCBTt27EDHhBAEKSoqWrduHdx++PChmZnZxo0blZWVzczM7O3t4X4vL6+GhgZcBmpra58/fx4RETF//vz29nZ1dfXTp08fPXpUSkrq2rVr2JAuLi5aWlpw+8uXL+h+UVFR+gJpamqaN29eQ0PD7t27LS0tHz16BO8iWFDYE4eHUygUXDzo2RUXF0tJScHBZh0dneDgYFxaq1atggUFv9XgY56dnT1mzBj4KO3atevZs2cFBQXOzs7wEHl5eXt7e2VlZfjRCVm2bFl9fT39uWAvTXV1NRwBfffunYmJSUlJyaJFi2CwxYsXY+uc+Ph4MzMzU1NTaWlpMzMzMzOzgICAnTt3nj59esuWLcuWLfP394ch+/r6xMXF4falS5eOHz8Ot/X19RkO3YmKisIy7OnpQS/ocIFHAaP2m4+ekUjYMYGNjU1GRga3k5ubG775Mgd2/hQWFmLdWzg4OOjHWtBMcnFxMR+Jgf+lzxLDkTk2NrbhRuwgnJycWHHeb6aOY4Qe02xsbHDcgiXgNBLgIyopKeno6GhjY8PBwZGWlpabm9vX12diYoKT9scCx1C/fPni4eHx/v17OCmjsrLy5cuXr169Onr0aFhYGADAy8try5YtERER0tLS0GHg+PHjHh4eJ06cgPHIy8ujsx8PHjwIAIiKiiopKYFmQwCAyMhIdXV1KSmphoYGQUFBHh4eKLY5efLkXbt27dmzx9vbW0BAAH649Pb2fvjwAdfn+V0eDhISEnJycufPn/f09FRSUpo+fTocz4MFBUbg4VBeXl5ZWamnp6egoKCvrw8/m9TU1OLj4/X09GAYnFIoQ38G+BM6acyZM6ewsHDOnDkIgtja2nZ0dFy/fv38+fPYnODAXRp0P9wWFhYuLCzs6+sbN24c8r8WDatWrVq1atXQ0JCRkRG0UOju7oafoXFxcfn5+dCAl0ql8vPzS0pKtre3wwl6qGnwcKAXQkBA4JtTH0YB/6GW719EUlISzrTEicOyYDEcqDWBra3to0eP5s6du2jRohs3btTU1GCdAfr6+iorKysrK7FVG7QyMDAwgF6yVCrVx8dnxYoVqampcnJyBgYGBQUFLS0t1dXVFhYW69ev9/HxCQwMRB0G0Hi4uLhkZWUHBwcZThgJCgqytLSELz2TJk2KiYnZt28f9HDIzc19+fKlsbExnLoCR397enqgQxAaA0MPBwBAbGxsR0fHu3fvNm7ciBtpNjMz+/r1Kw8Pj7a29tq1a8F3ejjIyclt3769urpaVFS0sLAQDnxYWFgcO3bsxo0b8+bNGxgYWLZs2eDgYFVVVWRkpJmZGUN/hp900uDm5sZemg0bNjQ2NtbW1iYkJJSUlHBxcZ08eVJLS0tDQ6O/vz85OZn+BRdFUFAQXoLy8vKmpqZJkyY9e/YsNTXVw8Pj3r17Fy9e1NXVrampgW8zZWVlOTk5CQkJCxcuxDZvb9++7erqevXq1axZs1AzyOECjw5Yup0sWPwB9Pb2orXPN6X9USsDaKqAIAj8jqFSqWQyGetzi0Kj0bAOAyj19fVxcXF79uwZSSZhcmQyuaWlBZoW/R1AG4cf9nAYGBjg4uJqbm6eOHEidiQPikuMZOD/l4C9NAxfLPr7+3l5ealU6vdaUsCZdOjM7fb2dub9Pf9NWC0fCxYsmPHlyxcAAPrZxILFKIDV8rFgwYIFi/8Wo3Y9HwsWLFiwYMEQVsvHggULFiz+W7BaPhYsWLBg8d+C1fKxYPEnQaVS37x5g93T2toKtVjpQ/4dPgNdXV1otOXl5djVZiOBSCR2d3f/ZB6YmBgMVxq/hN7e3rdv32I3fhIEQaKjo6HyalVVlZ+fH0O5sn+YHzMA+bMYtS1fVlbWp0+fSkpKqqqq4FPa2dkJAIDb31ULUKnUhIQEnJvd38rXr1/z8/N/+PA/xYTlh/n76oiamhpYGllZWXCZ8+8G1owGVv1v3751dHSkD8nBwcHBwbFs2bKWlpasrCw1NTU/P7+fz8D48eNTUlI2b96MIEh4ePicOXO+y1GIl5c3MTERFfb8MZqamvbv34+1qwTfKo1fQlVVFZRFRDeY802DITs7OwUFhebm5traWjc3tyVLlkCNwJ+J8yeh0Wjh4eHXr1//W1P51xm1LV9fX9/y5ct5eXklJCROnTrl5eUlJCQEACgtLU1KSvouTciWlpbDhw/jnFr/Jnp7e0kkkqOjY0REBPNgw/1r9+7dZWVlQkJCpqamqELjcPxNleN38b1Pcltb2wjriB9AXFz85s2b9vb2BALh1q1bq1ev/t2ccvn4+Ozt7QEAdXV1UC5y0aJFwwVGfQaMjY0PHTrk7OwMACCRSBcuXDh8+HBgYODjx499fHw6OztNTEyuXbt2586dBw8eAADc3NycnJysrKwuXrx47do1rN0dOzv7/PnzhYSEli1bduTIEUVFxefPn3/XKWhra8Oc/DD0JgYjKY2fZ/78+VCJAt1gApolJrx9+3bChAmWlpaZmZmcnJzy8vLD6XpDIiIiXrx48b3Z/i6gAcjfmsTvwKht+VavXq2mphYTE8PLy3vmzJm4uDi4v6mp6fDhw98VlZSU1Pfai/wY8Lbm4uJi7njA/ImKi4tbtGjR3Llz/f390e/UwMBAR0dHf3//EydOvHv3rrKyUkdHBwpYMKwc29radu3ade3atcDAQAcHh9zc3LKysrVr1z569OjSpUvR0dEAABsbGzc3t61bt7q7u1tZWf3YF9IPPMmJiYkjqSN+DB4eHgUFhSlTpqxaterOnTsVFRVQfOfLly9qamrPnj17/fo1vH/evHmjq6vr4+Nz7ty5srKy/v5+ExMTLy+vTZs2+fj4/LwfRV5e3pQpUxoaGpydndesWdPV1ZWXl3fp0qXExERonhAYGJiTkwOvRWdn5+3bt6EKF8PYsD4DXFxcU6ZMERcX19fXNzExWbFihbCwMCcnp6Ki4vHjx728vL5+/aqoqGhra8vPzz99+vSzZ89i/bmw5OfnZ2Vl6ejoAABiY2M9PT2vX7/+6NEjAMDTp08/fPhgZWUFAPj69auHh8fhw4cvXrwIAGBjY+Pm5obOlDh+2MSASWlER0d7e3tv3rwZarahoDmk0WhXrlwxNDS8fv36kiVL/P39b9++vWbNmoaGBlx+6DNMJBLv3bvn5OS0efNmEomEtbzAZgmbqKur6/r16ysqKuLi4lpaWvz9/bOysuLi4goLC9+9e4eLkEKh3L59OzAw0MDAoLOz08vLKzMzEzVXAgAMDg6eO3fu+fPnZmZmiYmJX758kZeX9/X11dXVdXFxwRbscOeYm5sLdeN8fHzU1dUZXujRx6ht+QAAu3fvfvz4MQCgtbV1YGAAmmcODg7y8fFh7z/6A3E3E9z54sULqCdEIBCw9zf2Tu3t7bWxsTEzM9PU1IyIiFi3bt2tW7cAAK6urm/evME9/zioVCr2ti4qKrpw4YKKigoUucY+fgyfKJTfwYQFpbKy8syZMy9evNizZ099fT3uGUNPOSMj48iRIyYmJkePHp09e3Z6evpwpRoaGorWEbjL1NzcvGnTpitXrixdujQoKGjZsmX379/X1NR0dHT08vLS1NSMiYkB/1sjM4FMJkdERNBoNCiECE3kjY2Nt23bBlUrFy9e3NLSYmJioqioeOHCBQqFcubMGWNj49LSUhMTk507dzKP/5vMnz9/9uzZjY2NBw8eLCoqGjt2bH19/d69e1euXFlfXw8AUFJSmjx58rp16wAA3NzcJ06csLKyYvjJ7urqeu/ePZzPQH19fXp6+qlTp6ZNmwb3kMnk7OxsMpksLS2N9RkAAEBDPiydnZ2Ojo7379+fNGmSsLAwAIBIJG7cuFFHR8fX1xcA4ObmJiUlZW5uDgA4c+bM2LFjFyxYcOPGDTjoICcnBx1fcaAmBhQKBWtiYG5u/vr1awqFYmFhYWFhYWpqipWZZVIag4ODN27c4OXllZeXR41HIGgO2dnZ58yZw8fHd+bMmR07duTk5Jw4cWL58uXx8fG4/NBn2NnZmUAgTJw4sbCwMCIiIiQkpKOj48SJE7y8vNgsQdLT02NiYgQFBTk5OV1dXdeuXTt27Fh9ff3FixcvXbp05syZWlpauAgfPHggKyurr69vbm7e29urqKioqKi4YMECNE4XFxcxMbHdu3dbW1sbGRnNnDmTRCLp6Oi4u7t7eXmhwZic44IFC3p6egAAq1ev/ifHdP5dRrNu59atWy0sLEpKSsrKyg4fPvzo0SMuLi5VVVV4/2lqasL7j97BC72ZFBUV1dXV4deMpqbmtWvXtLW1Hz9+XFJSglq6ODs7UygUGRmZwsLC+Ph4GRmZr1+/wr5KIpEIbSERBNHV1d2yZYuent6CBQusrKyOHDkC6wsUDg4ORUVF6A8ZFxcnLS1tb2/f2dkZGxu7ZcsWrKeJkpJSamoq9onC8juYsKBYW1s7ODjAR+vkyZN+fn7oM+bs7IyespKSUnp6emNj440bN3x8fOzs7NauXTtcqXZ1dY0dO1ZLS+vWrVu4y8THxzd79mw7O7uuri4bGxtra2tVVdVDhw5lZGRIS0u/evVKQ0PDzc3t6dOnsEYejry8PCcnp7S0NE1NTXQGBxsb26dPn6CS/YwZM9jY2BAE6enpCQsLW7169fjx48ePHz80NAQD0zcVP4CpqemTJ0/WrVunqqoaFhbW0tKyadMmAABUhcYCe96EhIRwQ18QCwsLcXHxnJycZcuW1dTUQC0rHh4eQUFBrHT4x48fAwICUlNTofAVjiNHjtTW1kpISEChZGFhYehlc+fOHSgEqqysHBwcLCoqCt+Brl69qqGhYWlpOWvWrMbGxt27d4O/ZK8BALy8vK2trfSp8PDwiIiIhIWFQWFoDg4OmBkBAYHe3t6KigouLi4o2MZkwAJbGuXl5XJycjt37qR/F8HmkJ2dHcq2CQgIwLZZQECgpaUFlx/6tLKyspydnSdOnAjjnz9/vp6e3vv376FnL31gTU1NhpkZLkIDAwP4hrpmzRqG4T9//qyqqgoAUFBQGBoa6ujogIXGwcGBG0cY7hyHywmgu+ijidH8zTd+/HhtbW07O7upU6fu2bPnzZs3r1+/VlNTQ++/8PBw+mYPAPD582eopIfeTAAA2FCpq6vX1dXZ2treunXLwsJi/PjxWVlZe/fu3blzZ0VFxebNmzk5OdGuoc2bN8OJElAMED7/Bw8eHBoawjV79GCf3m8+fij9/f2cnJznzp3LyckJCwvz8PAAAHR3d2MbOZwwI8TCwuLWrVuurq56enqoLyU/P7+QkBBW5vHBgwfPnj0b+bgOWpIqKirQvX04ODg4YCmpqqo2NzczKVX6yNHLhAbDPuRo1Qkfcljf4Tq+cMyfP//cuXPv3r1raWlBRfcBAIKCgiIiImgBdnZ2WllZTZgwAa3QsaSlpenp6enp6X38+PGbBcUQHR2dDx8+lJeXOzg43LhxY9KkSXA/1F2CX2kA40hAb03AxGdAVFR05syZ58+fhxZaCIKoq6vfvXt33759OMcDiLOzc0hICKwBcdEODg4CAA4fPjx//vxZs2bBoyQlJfPy8sLCwrKzs/v6+uAXXnV1NRwv7+rqgt+a6LsCBJoYaGtr8/DwMDExgHuw2RiuNMTExKKiouALQWZmJnpquBxi84BNd7j8oEyYMOHly5cAgMHBwdzcXGh5ISIi4uHhgWYJGxi6CAEAoDsm/bXDRSgpKQlNaPv6+goKCujjnD17NuyQHxwcFBERQS1WmItz4f4LU+/v78ftx170UcZobvkAALt373737p2+vr60tPTy5cs5ODjY2dnp7z8cw91MAIDW1tbly5dj72/cnYqNh4ODY+/evcbGxtra2gAA+ucfx3BPL+7xo7/7UcrLy0NCQgAA9CYsaBjovY7yTRMWa2trtHK0tbVdsmQJdt4X8wcMLcn29nYoxo97xujPpa6ubunSpUxKFS0cJpcJFx77c7j6brgCgdU6ZMaMGYqKijt27CgpKUEQRERExNvbOzk5GS1ebFrLly8PCQkJCQlRU1NjUkRMGDNmzNatW9etWzdz5kxpaemNGzcCAOLi4jo6OpKTk2fNmlVQUODu7v7x48fS0tL6+vrY2NivX7/C13kAAJVKRX0GnJycjh8/Dn0GSCRSWlpadnZ2SEjIw4cP79y509HRUVhYmJiYuHnz5nnz5pmbmxMIhI6OjpycnJSUFNyM346OjsDAQDgAaW9vHxgY6OPjAwAQFRW1s7P78OFDZWVlSkrKlStXSkpK1qxZM2fOnKtXr+ro6BgZGX348AH2UhYVFW3atIlAIAgJCWGfQZyJQVJSEkMTAwcHB2higB44XGmMGTNmx44dCxcuNDU1HRwcxH4uozlUUFBISkoqKytrbGxMT0/PzMxsbGzMzs7Oz89HEASXn66uroSEhNTUVLhx7NgxDw+PDRs2XLlyZd68eY8fP46NjVVQUFi3bh2aJTRFXV1dAoGgqKhoaWk5derUT58+NTY2BgYGdnd3f/jwISMjo7q6GhehjY1NaGioiorKhQsXFBQUlixZ4ufnFxkZicZpbW09ODgYEBDg5eXl6elZXl7e2tqampqakJDQ1taGjuZQqdThzpFIJGppae3cuTMqKqq7u7uqqioqKgoagPzYfftn8A3/vj8cEolkbm4Ot58/f56cnIwgyNDQ0KpVqxYuXGhhYUFvqokgSF9f39atW1+/fu3s7BwfH48gyPHjx+3s7N69e+fo6Ah/RkREuLm55efnl5aWysrKrl+//uzZswQCYffu3evXr0ddXtvb2w8ePAi3Q0JCJCQkDA0NHz16xDC30dHRSkpKoaGh+vr6mzdvbmpq0tDQgHM1J0+efPHiRQMDg507d7a1tcnKyrq5uTHMuYKCwp07d549e7ZmzRpoiTk4OGhubn79+vXw8PCAgIC6urqysrKpU6e6uLhQKBQ3NzcAwJkzZxwdHbW0tLy8vBAEaWlpUVRUPH36dFBQ0KVLl+7cuVNSUjJ27NiIiAgSiaSiogJT//r1q4SExJMnT4Yr/6KiIj09vaioqIsXL8LM7N+/f/v27Xfu3Jk6dWplZSU85YiICBcXl3Xr1oWEhNja2tbX1w9Xqj09Pbt371ZVVa2qqsJdpvr6+vnz558+fZpIJL5580ZQULC0tPTRo0eSkpLl5eU3b96cM2dOS0uLoaFhSkqKnZ3d4OAgfYa/fv2qrKw8f/58Z2fnkydP6ujowDuksLCQh4fH19f39evX27dvz87O9vX1lZKSKi8vr6mpmTFjBry13r59y8/PX1BQMFyBfC+ox+nQ0BD9f383d9+BgQHkr9caKpWKNSseGhpCT6GxsfHu3btwOzk5ubS0FBsJkUik0WhUKnW4syMQCFQqlUQi4fYzKQ2s3S4KLofD8c380Gg0eMowTgKBQCaTmWTpmxbZ2AghPT092PzQH9Lb24taXv8Yg4ODNBoN66k7uhn9itWomwkcNkD7/QgEAnOv2r6+Pn5+fjT80NAQjUaDows4SxcEQYhEIkPzF5gu2j8Gv7fo3VVQhvNCw3maDBcM/DYmLFiwDjsAgKGhIS4uLhqNBl/A4bm4urp2dXVZWVmhI0/MSxUFd5m+CY1G6+vrY1nj/lsQiUTYDQMvWWlp6fTp0//tTLH4zzH6Wz4WfwROTk4dHR1/3xpkFixYsEAZ5eN8LP4IBgYGpkyZMm/ePDhfnwULFiz+VljffCxYsGDB4r/FaF7Px4LF6CA8PHzt2rXoyG53d3d6ejqUKfglEf4YZWVlXV1dSkpKIwnc3NwsLi7OcDg2Nze3ra1NRUUFu0SvrKwMnaQqKysLpRVgYOxCCCg9AwBoa2sDfykwfJOsrCwxMbGpU6fCnzQaLS0tTU5ODrc6HkImkz98+DBhwgRubu5Zs2Yxj7mmpgZO2+bg4JCUlPxntJ8gCILk5eXx8fF9c9yURCLV1tbKy8vT/6u4uBh3jmlpab9kcervBqu3kwWL3xR0JXJ+fn5/fz+6Pzc3l/lK/G+Ci5B56gwZHBx89OjRCFVes7Ky8vPzGTZ7L168yMrKUlJSMjExQdfbkMnkI0eOPH/+3NfX19zcHCtUJiwsrKKiUlhYyMnJSSaTb968CQAoLCxcuXJlcXHxSDIDAKiqqoLtE5VK7e/v7+josLS0rKmpYXiaK1euFBUV7ejo0NPT+2bM/6Lua29v78WLFxku08ICVxXTSwYSCARHR0cLCwvszsjIyOPHj//ijP4ejM6W7w+1IPgnaW5uTk1N/YcTbWtrG050bTgaGxsZ7u/o6Hjz5g1DBYqcnJxPnz6hCbW2tn769Ck9Pf3Lly/YYCQS6dP/UlZWhuYTfkOMEPrC/Em3DZi9vXv3wu0zZ85gF/IvWbLkZ2Kmj5Ceb+qp8vDwQMXXb0Iikfz9/VHJIQqFAldqQi5fvmxsbDx+/PipU6dCsx4AQFdX1+vXr11cXB48eDBt2jTs+U6ZMgXqHCkqKqqoqDg5OQEA5syZw1zqFoeBgQFUIDp//nx1dbWYmBiqEoCjpqamvr5+3rx56urqN27cgG8DFArFycnJw8PDz8/PwMCgra0tPj5eTU0tLy/vX9R9HT9+PMOvPVyBS0lJQc0XHPz8/FAhCKWpqWm4p28UMDpbvt/BguDvAL6i/pKo2tra0Ir+V/FN14WUlBQoeTVCAgMDGcoENzQ0uLi4tLe3Kykp4Rq/3NzcK1euuLi4oNY5jo6OLi4urq6uOL0rLi6uioqKlStXsrGxcXBw9PT0uLq6gu//gAD/W5gjdNv4JgUFBXFxca9evaqoqFBXV4fr7j09PV++fHn37l0YJjo6+saNGw8fPjx8+DCFQgkPDx9OrfT27duRkZEHDx78/PlzdXU1jHA4dWOshCxOhxqrWIue786dO6FOzYMHD96/f+/g4IA9kadPn0KJbTRyKEoCAGhsbGRjY4OLfOTl5dHvlQkTJsAVR/X19ZKSkvQfixQKhUwmv3nzBtUbog/T09OzZMmSkJCQ9PT0KVOmlJeX9/b27tu3r6ury9LS8v79+y0tLcHBwREREbB+j4qKMjMzMzQ0xEYyY8YMQUFBNTW1iooKPT09uDjn8uXL48ePNzMz2759+/79+wkEwurVqysqKubPn48e+Gt1X7GXj/6qNTQ0XL161dfXFxUZxoIt8OHKin4/giBBQUF/hyj8b8KoHedDLQjExcXJZLKzs/NI/LR+c86fP79r167ver0djrlz586dO/fn40Gpq6u7fv06XBc/HN81YPDly5fW1tYZM2bAn2VlZe3t7TAGBEGg6ndKSkplZaW4uDh6lLe397Fjx5YvXz5mzBgAQGlp6eDg4LVr1yZPnkyfxPz587m4uOBIFZVKhQX7vR8QAFOYERER1dXV5ubms2fPZq4z900UFRV5eXlhRQyVKouKiqAmcnt7+6NHj0gkEpSl5eDg2LZt26NHj7Zt29bS0sJQrXTcuHEaGhoVFRWhoaEXL16EESooKEB147Vr127YsAH9zsBKyIaFhW3cuBHKRu/fvz8kJARVrAUAEInEFy9euLu7CwgItLa2Pn36ND4+XlpaGnsiWVlZO3bsAAD09/d3d3cPDg4SiUSoLtTU1ISu1+Tj46OXSw4KCmLYx/j69evU1NSkpCQmL1Ljx4/X19cvLy/X09ObNGlSd3e3kJDQ+vXrhYSEpKSkyGSyuLj4lClTNDU1oeqsiorKmjVr5s6d29jYiNWhjY+PP3bs2Ny5c52dnQ8cOAAAePbsGeqQtWHDBqiIhm02frnuK+7y4a7a8ePHz5w5s2DBgvT0dOxR9AUuKSkJL/038fPz27Zt28iXyf5xjM5vPixYCwKc6UFTU5O3t/ezZ8/8/f3B/xqI4CLBvVzTm6dgwRqL9PT04PwKhnsxHxoawvkVuLu77927d2Bg4OjRoxcuXMC+omKNV7Kzs6dPn+7t7U1fR2Df0Ovq6u7du/fs2TOoKePi4mJsbAz+10Gms7NTS0vrypUr5ubmUGIYexSgs5vBgjpI1NfXX758OSQk5NChQwAAXCFDjhw58urVq4GBASbOCQ4ODlpaWujPysrKjIwMuA37psrKyri5uXF1BDc3t5mZmYKCAvxoa2pqam1tnT59+nC+TgiCUKnUgYGBS5cuofMRhnvgTUxMTp482dTUpKSkBDsDzczMmpubYWEycdtgGNvIgUv+k5OTZWVlwV9izTU1NX19ffBfUBaViVrp8uXL/fz82tvbYXuMinhBdWOoB80waWVl5YiIiNLSUnggVrEWAPD58+fAwEDYek2YMMHIyGjWrFm4AbOuri74VZeZmQkFSL9+/erg4ODg4CAuLo5+0/f19cnJyeFS//jxo4qKCn2uduzYcezYMR8fH4YvNCh79uzx9fVtbGyUl5d/+vTp27dvN2/eDBhJfoO/lHIFBQWxqt8EAkFUVPT58+fPnz+3tLSE2rM4IVz62H6J7isW+suHvWpJSUnYGwOFvsBHOO7T0dHx5MmTu3fvXrp0qba2dlS61I7ylg/nz4LzHElKSsrMzNy1a9e8efNwBiLYSODL9cmTJw8dOgRft3HmKbhEscYiXV1d1tbWhoaGO3fuXLp06cmTJ5WVleGL+bVr14KCgg4cOHD8+PFXr15xc3PD2WX37t07efKknZ3dokWLuru7+fj4Fi5c2NfXh76iCgsLY41XFBUVu7q6tmzZAhU7sWA9U44cObJr1y5jY2MVFRUajbZo0SL4hGMdZAAAwsLC8+fPd3d3z8rK6ujowB5FpVLp7WZQUE+WgYEBFRUVLS2t9+/fIwiCLWQAAIIgr169Mjc3NzQ05OPjw3rZ4MjJyYEtXGdnZ0NDQ1tbW3d3d0NDA6zKOzs73759GxgYiNOevn37dkFBwY4dO2CfkpqaWkBAwOfPn1+/fk3/jgIAIJPJLi4uLi4uHz58+ObtZG5uXlxcLCkpqa6uDvvZ5s2bJyEhAQsTfiqhJjLQbWPBggUMjehGAlY5GkEQUVFReAoIgtBotMmTJw8MDJSXlwOMLCoKdrUSlUo1NDQ0NDSUkJBAGGlb0y9tQvVUcTrUWMVaAICysrKysjL88Kqvrz948GBERISlpSU2wpkzZ8JbZdWqVe7u7i4uLgsWLHB3d3d3d588ebKYmBjswC8qKlq7di32rBsbG8XExOjbFSqVCuMXEBAYziQEIiEhIScnd/78eU9Pz48fP/b398PprOjpf1P128/PD34tbd26debMmbBfFCeEixOe/VW6r9jzxV0+XGARERE4zIz8r4o3fYEzdNPFqnhDBAQE7t27t2vXrm3btqHuLqOMUd7y4SwIcKYH2trafX19S5Ys4ebmZmLgQP9yjTNPwSWampoKH8g1a9ZIS0vj/AqYvJjj/AqGOynUeOX27dtw+IeTk5Oh+QP2DT0vL09QUBAAYGxsDMdBYRicgwR6avz8/AQCAXfUSOwmZGVl6+rqkpKSAAA0Gg1byACA3t7ea9euoSJqTJwT+vr6YM8MdMf18/OLjo52cHCA/i/CwsI2NjYvXrwIDg7GHcjBwXHmzBl0BAgAMHPmzL1798JGAgcXF9eRI0dsbW3hOBZzli5d2tjY2NjYyMHB4e/vn52dvXjxYsD0G2I456CRoKCgAG03SktLo6OjdXR0+Pn5jY2N7927R6FQSktLHz9+7OjoGBMTQ6PRjI2NExIS2tvby8rKPn78WFlZWVFRkZSUVF1d3d7ezsnJaWVlVV9fHxkZCaUyo6Ojh1M3BgCgysg4HWpUkVlNTS05ObmkpMTMzCwgIOD8+fMNDQ3nz58nEommpqbYT6KtW7cydLaD3Lhx4+bNmwkJCTw8PKtWrXr27Bk6vTAwMJC+G+Pt27ddXV2wwwDdWVZWlpOTk5CQQP8wmpmZLVy4kIeHR1tbG7asnZ2daWlpaWlpvb29y5cvv3r1KhS5jouLq6qqqqysxE5WkpKS2r9/v4+Pz82bN6dNmwaNW11dXQMCAry9vd+9excUFEShUGJjY9va2uCVSkhISEtLu3//vq2tLYFAuHz5MgCgqKiovb39xYsXAQEBu3fvHhgY+PjxY21tbV1dnZ+fn7m5OXyniY6Obm5uhgLxKLDGwF4+3FW7du2ajY3NuXPnCgoKcnNzh5Ozh7S2tsbHx3/+/Lmurg4AgBY4kUgMDQ2trq7Oz88fM2bM7NmzZ8+ePWvWLD4+PnTEYVTxd4iB/g7AGdLNzc0IgpSWlrKxsdXW1hYXF2tqaiIIYmRkFBMTU1hYiCCIv7+/lpbWq1evtLS0oOpramoqNioikThu3LiysjIEQezs7B4/flxdXb1s2TIEQcLCwkxMTHBJW1lZnThxAkGQ3t7e/Px8DQ0NKAMdFxdnbGzc3d0tLy+PIEh5efmKFSsQBElPT9fS0kIQxMXFxcHBAUGQ+Pj4/fv3Z2Zmwty6uroeOXIEQRANDY3Pnz83NzdPnjy5s7MTQZCMjAwKhQJfBukpLCwkk8lWVla3bt2aO3duWFgYgiDFxcXw4d+0aROCILNmzcrIyEAQpKqqqqmpycTEJDIyEkEQRUXF6upq3FG4wNi0UlJSNm/ejCCIm5sb1PWWlJQkk8nYQm5ubl6wYEF0dLSysjLULy4sLOzo6FixYkVWVhYu86qqqlih4cjIyHv37uHCfPr0yd7eHsEoO0Pa2tp27tyJ3XPs2DGYEyx5eXljx46lLzd9ff2EhASGRers7KyhoVFcXGxoaHjgwAG4Ey3Mq1evQi3mmzdvXr9+HUGQc+fOPXjwgGFU34RCodDrMvf09NBoNFQTmUqlflMBGQaDCt04KWQmoMrIOB1qrCIzFhqNRqPRent76f91584dJukODQ2h9xKaTwRBenp64OfdTwLvDYaS38i3VL8HBwfhKbe0tOD+1dvbO1ycv5xvXj4SiUQkEunvlpHH/F9jdH7zDefPgvNASUxM9PLyEhMTMzAwwBmIYGPj4eHBvVynpqZizVOg2yoKzljk3r1779+/j46OTkxMvHbt2nAv5nA0KDEx8c2bNxEREZcvX16wYEFTU5ONjU1JSUlpaWl/fz98Re3o6MAar+Tl5XV2dr5584a+HLCeKTdv3jQ1NV23bl1iYqKAgEBUVNTXr1+rq6uxDjI8PDxfvnxJS0urqqqqrq5OT0/HHiUkJERvN4OCerIICQk9e/bs7t27IiIiXl5eycnJaCHHxMS0tLTIyspKSkrq6+tXV1djvWxwmd+2bRtD224AwPv3701NTQMDAzMyMmxsbAAABw4c8Pf3T09PX716tZ+fX2ho6O3btwEAO3fuPHPmTGhoqJqaGm7eytDQ0MuXL/v7+1+9eoXdz+QDAgCwe/duCQkJBQWFffv2wV5NKpWKFib8VAoLC4PrKJqbmzMyMjIyMrB9UCOHg4MDztPBIiAgwMbGho4VsbOzMxdeR4PBb+5vKoCjoOvc4egRPJCdnX3s2LEMLR6h1zFDSfQjR47ExsYiw8hFcXFxofcSmk8AgICAwAinYzAHfpEPJxPPfDk/Nzc3POUJEybg/jVu3Dgm0vO/lm9evjFjxvDw8NDfLSOP+b/Gf069DGt6wMXFRaVSyWQyej8xMXCg0WgDAwMjqWUgOIMC3E+GMPQroFAo2JoOdWno7+9n2GuPyzPWUwJ+Q9A/PMwdJHBHMQmM5o1IJPLy8g4NDcFHEVfIuBwO55xApVKdnJzOnDnDMFeNjY1CQkLokD68rGxsbNCfFo2QSqU2NTVJSUn9kjoUjZODgwNeGvq6homNBgsWLH4T/nMt3+8My68AR2dnZ0FBwQ87u7JgwYIFQ1gt3+/CwMBAaGgolUpVU1MbTlGCBQsWLFj8PKyWjwULFixY/LcYtRouLFj80VRWVuK0Q+fNm8fLy8tEaH84vsuaAPw9hgNUKjUrKwtujxs3TkZGBrfs+ps0NTVJSkr+QNJVVVW5ubkTJ05cuHDhD8wB+eXU1tbW19crKyv/2xn5TzM653ZCqFTqx48f/fz86FWRfjJahhMpfxXd3d2RkZEjD48KVDLZTyaT3717x1CLkolw83AxDwd9ybS2tiYkJHxXJJCWlpbvmg9Jo9HQ/GNXeo0cEokUGxuLzudEECQ3N7e0tBQAEB4ejl2PzHDPL2fChAnm5uZPnz6Fs5OCgoJKS0uHE9pnzsitCSB/h+HAL5HSpV+7yRwEQY4cORIYGLhy5crBwcHNmzdjbR/+FWg0mo+Pj5eXF/2/vil7+0sO+Q2T+FcYzS3fsWPHFi1aJC8vP0JTghFe46GhISYrc3+ekXvQIAgSHR0N59Yz2Y8gyPXr1zs7O/ft20ffZjMUbh4uZuZgSwYW5tu3b793wg48EC6NYL4mFws7O/v48eOXLVuGlYL8Lpqamvbv34+uOsd6vqCePsPZBv0d8PPzy8nJycnJKSoqLl261M7ObsqUKcMJ7TNn5NYEkOEMB169eqWjoxMYGOjn52dvb0+hUK5cuWJhYfHw4cPLly/39fUxNxxApXSNjY0PHTrk7Oz8XWchKSmJIMh3GYw8ffq0oKDg5MmTIiIiK1eu3LZtm4mJyXcl+sthZ2fHqe1A6urqhhPYG44fOOR7wRqGjDb+hTWE/wjt7e1qamojDz80NLRly5a/LTvfAYFAkJaWHu6/T58+xe0RERFhGBLd39nZCRfA+vj4XLt2jT5kYWEhPz8/3KZQKPX19cxj/ia1tbXm5uYIgmRlZW3YsGHkB2KvQkNDw61bt74rXQBAdXX1dx2CZfHixdjDbWxsfHx8GObtn0FfX9/R0ZFCoVRWVsLXEQRBXFxcLl26xDD8nj17bGxsGhsblyxZ4uvriyDIoUOHysrKLCwsnJ2dm5ubp0+ffvPmzYaGBi0trcuXLx86dGjbtm0Mozp//ryZmRmJRAoPD584cSJcn15WVrZq1SoYAOpvPX/+/PTp0wiCWFlZeXh4dHd3/x975x3XRPL38aEXkSJFUSwUERQLImIDG3aqgqKo4IkFERt2sYKKvdEUVAROQBE8EKUJSm8iICC9hd5LIBCSzPPH/G6fvU2CnOVOubz/8BWX2ZnvzJbZ3Zn5fAoKCnp7e5FWA0ERAmlO1tfXk0gkQ0PDa9euQQhJJNK5c+eCgoKQnGxtba2Hh8eTJ0/8/Px6enpu3bp15coVAwMDtGa8u7t78eLFg2/AFStWXLx4Efsves1FHYaFhcX58+cXLFiAFCHw4MulUCj79+83NTXt7+/PzMw8cuQI/q8dHR12dna7du1auXKlp6fnuHHjMjMz6XS6jY1NZWUlIVsqlerg4PDs2TNzc3NLS0s6nX7kyJGoqCgzM7POzs6bN29qaWlFREQMvkGwXZgr7uXl5ezsvHz58pKSkgcPHkyaNKm8vHzt2rUlJSUQQpTbrl270AcSLM/Ozs59+/ZZWFjs379/8uTJycnJGRkZYmJi/v7+LLULfmmG7Dufn59fU1OTv7//wYMHDQwMAADMMtN43WfMFObDhw8qKioAAC8vL11dXWYR57i4OCRByZxhRkaGo6MjEqVE7jB48FLOSAPT0tLywoUL2tra6FWD4EHDrl6ELV/0HJGQkBAWFu7q6oqKimLn+wX/jnBzdnb2uHHjampq7ty5s2TJkra2tuzs7HPnzmEtg6lXAwBaW1tv3LiBSR0StL8JqtzYUaDRaKNHjw4PD/+Kjy3szHcwCM47cXFx9+7d8/T0RJ/CCJ4vmKcPS9uge/fuPXjw4PTp0y4uLiyP6TeSmJiILJZYujURYCkrqqSkxM6awN3dPT8/n50HGzIccHV1xQwHuLi4Ojs7U1NTnz59ij6fcnFxMRiMmpqalJSUuXPniomJ4ZWuWFpzEKR0B1B5vXPnDplMHjNmTG5uLrJ8EhYWrqmpGfxncBKJhHdpR54e9fX1EydOHDFixJkzZyZPnsz8NR5fbkRExPHjxzMzM7m5uSsqKvbv34//a2xsrLy8PD8//5s3b7Zv366rq5uXl8fNzS0nJzdu3DhCtp6eniIiIkg0AzBpCA9G9pbQINguhIIIEsQ7duyYNm3a2bNnb926paiomJmZ+fHjRzMzs2PHjnV3d+PzjImJIUgHY4YhLLULfm3+5Z73h5Gbm4ueT5OTk5E2WGlpqbq6OoTw6dOn+/bto1Aoixcv9vX1PXTo0MyZMyGEmAYY+lFRUTFjxgwI4ZYtW/744w8IoYqKSnNzM4RQQkKCOUMIoYmJSVJSUnNzs4KCAnNIhYWF0dHRVCpVTk6OwWA8fPjw4MGDEMKdO3cGBQXl5uYaGRlBCJuampjf+fr7+6urq6urqxctWoR+IE0pCKGUlBTLFsBvp9FoXl5e06dPP3ToEMu24ufnv3379pUrV+bNm8cyBwIrV65MS0vr6emRkZHp6+t79eoVeltCLZOQkGBiYgIhzMjI0NbWhhA+f/58x44dfX19SkpKSE3K1NTU3d09JSUF1drLy+vAgQP4o4BanvDeMDDgz3e+sWPHdnR01NbWTps2jTlZSEgIiURCZ0h/f7+amhpSyZoyZUpFRcX69es/fvwIIbSxsUHvfMuWLUPialhsaEt6ejoSbEN/ys/PJxzTwUfOEhMTE+yt9/Pnz+jHAO98EMIZM2Yg/UwNDY2MjIzU1FQIoZOTE8pHV1cXSbjp6emlpKRACJG/OXM+6J0P/dbX10ci1CUlJbNnzy4oKHj9+nVcXByE0NfX19jYWF1dHd2mEdg7H4QwKSnJyMjIyMjo3bt32DsfhPDDhw98fHy1tbX9/f2PHz9++/atnJwcjUbr6enZuHHjzJkzS0pK1q1bh31+wFBWVm5raxtkAxoaGp46dQr7b3Z2NgCgra3Ny8sLvaoSXusRzOVu2rQpLCzMzc2N+a/u7u7Y4cjPz1dXV//w4cP79++ZgzEzMwsJCYEQvnr1Ckke/vHHHyEhIUZGRtHR0dglM/gGwXYhcO/ePfTGj/Hx40c5OTn0duvs7Ozk5MSuspiAYklJiZqaGvzr9TiUGLLvfBjYiwtBZppZ95kdBBFn8KceErNu9ezZs4ODgyGEaFiFAEHKmbA7wYOGQGNjI/IZKS0tRT8KCgoG3wg8PDwWFhZxcXFhYWEsE/wt4WYAgJWV1ePHjyMiIrS1tUNDQysrK9HUQWbtZrxwM7P298ClCAkJfZ3Fz8DmO3jnndLSUn5+fqTwglqe2fOFuVJoC6ZFDgCYO3duXl7ewFLmfxfMlAAAgL5DfJFt27ZZWlpu2rRJSUnp/v37yHoQXepgENYEGOwMB5B+8apVq5SUlNCddNKkSS9evDh69CimsY7PcO7cucHBwcHBwQsXLiTkiVTjPTw8GhsblyxZgipbVlb29OnTo0ePHjhwQEZGBpmJ9/b2IuMnAAAfH5+4uDizvQBL9u7dGxQUhI0WR0REbN26FSmwM4eKwVyura3thQsX0FnBMioEGhy9ceMGy7FYgtXG58+f3d3d9fX1BQUFIYTYoRl8g2C7MMfv7++PqpacnAwhDA8PP3ny5IEDBwAAsrKyQUFBqAGTk5PZVYdEIqGb2CCb+pdjyPZ82DnNfG2j/0pLS0dERKBPUunp6XQ6nWAKg8bG2OXJcruxsbG8vHxBQYGvry9zSIRzmrA74cIg7Dt69GjkMzJlyhT0A80XIDDwaSooKMjSc/Ur7rAGBgYxMTElJSXIAAybMTHwHZbZWIeHhwe5cWKtja9CW1vbxIkTv1gvfNH4pmN5XwN/dd4ZMWJEbm4u+paIWp7Z8wWrBeEMmTJlCpr6gULFTwjCisa8Rv8u2dnZmZmZUVFRSFMfQRDaZ4ZZVvRvWRMg2BkOvHv3jkQivXjxwtfXd+vWrcOGDUtMTMzKypKQkLh8+bKxsXFVVRVgYzjATkp3AJXXgwcPuru7r1y50sHBAX1FJ5FIOjo64E+Z1i+2oa6u7smTJw8ePPjmzZs7d+5UVlY6OzszGIyUlJTc3Nza2toPHz4gBXb8XszlzpkzZ9y4ccioAf/XiRMnJiYmJicnY73+kSNHJk+ezHKY4ODBg6GhoXZ2di9fvqyqquru7sZrCCsrK39R9pYQGKaUSygIL0E8duzYU6dOTZ06dcuWLREREXfv3jUwMJCSkpo6dermzZvHjh3LXFm8dDD40zCkvb39i639i/GN74w/La6uruPGjauoqHBycpo4cWJ1dfXTp0/HjRtXWVlpZ2c3Z86c9vb2Y8eOjR8/fvv27ejTjY6OzsmTJ9va2rZv375x48abN2+OHz8+KytLS0vr7NmzZWVlkpKS/v7+yG4tPj6eOcPDhw/r6OiYmJicOHGipaWFEJKfn9+UKVNu3ryppqbm4uKye/duPT29mpqaRYsW7dmzp6+vb+nSpVu2bHFwcBg9enR2djbLehEmjERFRXFxcYWGhtLp9PLychUVFebt9fX1+vr6yDu7qKiIkGFvby/yV0PPiSxzZhnJyZMnUZAmJiZoABxrmba2NgUFBVdX1+vXr8vLy5NIpGPHjqmpqbW0tDx//tzKyioyMvLUqVM0Gq2/v3/GjBl2dna2trarVq0ik8nYUYAQGhoa0mg0fL3YgVxhAQDnzp3LyckRFhaOiYkJDQ0dNmwYGtXHs2fPnuXLl7u6usrKyiYkJJw6dUpHR8fBwUFFRcXb2zskJGTKlCknT55cvnz5vn37Pn/+LC8vjyZKoNg+fPiAbTl69KiTk9OLFy/QOB/hmIaHh0tKSmLfpf8Z0MdkBoPBUrl/YGuCfwXUPsgVgU6nU6lUzJGAwWDg3QmuXbvW2NgIIaRQKMhWZZDU1NT8LR8DQrkQQvwUD+a/Yjx79oz5Cy1+x87OTiwSVAs6nY4OCsEcYzANMsDRHNjBo6Ojg2V1nJ2dHRwc2tvbsb+yNAwZAvzXNVzwus90Op3BYKC1rn19ffz8/Oib5CCzghDevXt38+bNzc3N9fX11dXV5ubmhDR4KWeWGsqdnZ3Dhw+n0+lfN6SM8mfejqxB2TnqfR1IuBkAgLS/CX8dQLiZoP0N/6rKjR2F8PBwfn7+JUuWAPb1+mpQhj09PWgJRHd3t5CQEJ1OR0e/v78f1Y6w8Bl/huBrys3NzU7v29vbe/Pmzd9RL/s/S2pqqpiY2CC/Sfzz5OTkXLlyRUFBwcHB4d+O5ev570gH/9d7vu9IY2Pj/Pnz7e3tRUVFm5ubV6xYwTy/i8MgqaysLCsrW7x48b8dyDfR09PT2trKUWH9L9DV1RUbG7t69epfdxrkf0o6mNPzfU8aGhpSUlIkJCTmzZv3614AHDhw4DC04fR8HDhw4MDhvwXnvYQDh5+RgWWjs7KySkpK5OXlRUVFZWRkxMTEGhsby8vLIYRiYmJokXhfX19aWlpzc7OqqipaRYoUWbm5ucePH4+ZjLe1tSF5Ui4uLjk5ObTO/e/CrMLc0dERFxc3ZcoUKpXKPDhXX19fVlaGpa+oqPj48aOUlJSSklJPT8+ECRNCQ0PRiu/B8HNKbH+Rv6UkXlpampeXJy4ujma3fi/CwsKWLl36H/RS5gy8c+DwM8JONrqlpWX16tUJCQmampqtra2rV6+uqKgAAFy5cgWpvaAbdEZGxooVKzo6OmbNmpWYmLhq1SpxcfHg4ODffvuNRqPFxMQYGhrev38f9ZQBAQE7d+5kMBg+Pj6ampp/V32GWYU5Nzd3w4YNEydOjImJYakt2dTUhAmj79u379atW6qqqqKiotu2bXv16hWzAOzXtdW38F0ktgdm8EriGRkZ4eHha9asef/+/WAkbFCGg4nhH1Cg/Un512aV/nioVGp4eHhmZmZeXt4XE7e2tqb8SXFxMbup/N+dnp6e0NBQ/Ja6urrExMR/pvS/RXt7e0hISGlpKaYnAiEsKysLCgpKTU1FU59JJFLKXykuLkYpa2trkfrid4REIr19+/b75skMy8g/fvz4/PnzjIyMoqKixsZGVFm0OOx7gQmpMBiMsWPHvnnzBkK4fv36CxcuYGmSkpKSkpIKCwv37NlTVVWFNpLJZHl5ebRWB4E0XLy8vDDhTTKZPGPGDFdXVwihp6enrq4u2q6iosIsa/JFMF0ShLu7+4YNG9Dvx48fD7Cjq6vrkiVLsP92d3cjqRQEJgD7RVi2lb+/v76+/vPnz58+fXr+/Pn+/v4LFy7s2bMHCa90dnaSyWQLC4sHDx6sXr368ePHNjY2+DzxojOenp5aWlqDieQrOH78OEFbh8DevXtjY2O/IkMO7Biy73y9vb06OjpSUlItLS3GxsZfTC8mJpaYmIjEqMLCwtTU1P6uR8/XUVFRsXHjRvwW/OPwzwPzUzxkZQHz4MGDDx8+9PT06OjolJSUlJeXYzOkL126NJjVx4OHwWCEhYVdvnwZ/ffH2akQImd+66qtrS0tLZ07dy5eH+R70d/f/+bNGwaDMX/+/L6+vqCgoLVr12J/nTt37qxZs+rq6hobG5WVldGhSUlJ6erqwiuJ2NraErIdNmzY7t2779+/j99IIpFaWlrwHyfxGqfMwqT9/f2Ojo7Pnz8nvA+tXLny1atXVlZWXV1dyB6BRqPduHEjMDDQ1NS0vb3d2dl569atAICnT5/iqyMsLPzbb78xC8BSKJTbt29fvXrV0NCQSqUOpq0AABoaGl1dXSYmJhs3bkTik+j78K5du1paWp4+fUqj0U6cOLF169aioiJLS0vmZUiI6urq0NBQExMTAACDwTh69Gh0dPTGjRu7urrq6uo8PT29vb3RGfLkyRMXF5cVK1aUlpaifTs6OjQ1NYODg1NSUsaNG1dSUtLZ2fnbb7+1tbXt3bv37t27DQ0NQUFBb968QQKqERERu3fv3rBhA1Z6aWlpenp6bGzs77//rqiomJ6ezqxMW1hY6O7uvmfPnrNnz2IZ5uTkIGmhnJwceXn5zs5OgtQwpknLnGFbW9uRI0euXr2qo6Pz6NEjdg3+6zJke77Kysrq6upp06bp6uo6OTmh2yKNRrt69aq7u7ufn5+pqWlTU1NsbOzChQuzs7O5ubmnT58uISExZ86c/fv3a2ho+Pj4AAA+f/68cOFCb2/vZ8+e7dmzBwDw8uVLIyMjLy+vU6dOoS7q8OHDrq6u69atc3Nzs7W1rampGXycqqqqhI/sU6dOtbCw+J5t8T1ITEwUFxdXUVGxsrJCAzAsLWBWr169Z8+exYsXc3Nzz5kzx8zMbMeOHQCArq6u+vr627dvf8eQUBHo95s3b/6ua90gYY58z549c+fO3bt37/jx45ctW+bt7d3T0zN9+vRhw4YhxZnvCEE2uqqqikajSUhI4NPw8fEtXLjw+fPnHz9+fPbsWWJiYllZGSENy+WGysrKmFx1dXW1hYXF2rVro6Ki8Np7FApl9erVBgYGvr6+PDw8BK1nggozxvjx45OSkj59+qSqqpqSkgIAuHfvnoKCgomJibW1dXt7+8yZM9E3VZah6ujoVFdXAwAwUWZm9eovthX4MRLbBKVpvKI0QSoa7S4mJmZiYlJSUjJnzhw5Obn29vb+/v4VK1ZISEgMUklcUVFx7NixS5YsMTc3nzhxIjc3Nxq4NTAwcHNzQx+ZT5w4MWzYsBkzZjg5OfHx8aEMp02bJi4uTqPR0GMEhHDWrFkdHR179+61trZ+9uzZhAkT0EpT5gyfPn06YsSIo0ePdnR0LF26lGWD/9IM2Z5v0qRJ4uLiCxcuLC0tNTY2FhUVBQCcP39eTExs9+7dGzdu3L59O5lMXrx4cWlp6fTp0/H75uTkZGRkIIcHVVXV0tLSrVu3rl+/fv/+/QCAWbNmNTQ0WFpaamhonDlzBgBgamq6Z8+e2tpaAwODEydOoI8keAjmAO/evXv8+LGDg8ODBw8AABBCT09PY2PjmzdvQgixx2H8c9znz58VFRWTkpIYDMbu3bsrKirwRhMAgHv37r1+/drR0ZFQNEsDAfyTqaWl5ZEjR+rq6mbPno06j927dzc0NBDyYX6K9/f3R3pOiCVLlsTFxWHD9RhIOtLPz8/Z2bm3t/f9+/fMB6u3t/fw4cO7d+9etWpVQkIC4cEZq1pWVhbeRgNfRw8Pj/T09KysrKioqPDw8NOnT2MP3Xi+wlqBEDm7ty6W+34706dPP3Xq1KtXrxoaGuzt7ceOHcvDw4Pk35hRUVHZtm0bmvlSWVnJfB4SyMvLQ/dEAICcnJyTk1NNTQ1hnAmvcQqYtGrj4uIUFRUBk9gsmUyeNm1acnLyxo0bjYyM6HR6UlISurMvWbJkwoQJmECEvLw8y+oQFCQyMjK2bdtmbm5eWlpqaGg4mLZCG3l5ecXFxSUkJDAxgeLiYn19/fPnz2N1x5OcnGxsbGxsbIydqDY2NtevX3dxcTE2Nq6rqxMUFJSUlAwNDUW6o/r6+l1dXZqamgICAhkZGatWrTI3Nw8LC7t58yaWp4WFha+vb21trZKS0pMnT/744w9UBZYqGUhYQ1xcnOUJSVAhxuRhkWXxzp07+/r6BhCsYNaVxWIgZDhjxoyYmBgymaykpIRNhhpKDNmeDwAQGxurqKg4depUbOzd29vb1NQU/V65ciVaaY4X2Wttbb1y5crdu3fl5OSwE4iLiys1NTUyMhIpHHJxcUEIOzo6QkND0VJr/DPy6NGjme/++AdnKpXq6Oi4bdu2I0eOIB0TBoOxefNmDw8Pd3d3Li4u7HEY/xw3cuTINWvWIA8UNTW1UaNGOTk5CQkJKSkpnTt3rrGx8cmTJ9ra2vg7MoL5OZ3wZMrS2gb5ueBhfopnaQHD8n0XQtjY2Dhy5Mi9e/eyfO0TFBTE3F54eXnx4eGrNmPGjI6ODgDA4sWLW1pa8HXU0NDQ0NCYMWNGcHBwS0uLnZ0d80y8jIyMt2/f7ty508HBwdHRsaioaGC3GpaRs3vrYt7322GWjRYUFLSwsMBbL3V1deFthtra2mbNmrVo0SJ5eXn0XIUoKyvr7+/HZ9jY2Oji4nLu3Dnwp3CrrKysv7+/paVlfn4+lgyvcYqPDQ4oNnvr1i06nc7NzX3y5Mnm5ube3l5ZWdnnz5+jgD99+oTGWgAAu3btevz4MTbJAkKIJprCvwrAMgsrEwRR/zGJbYLSNEFRGi8Vje07atQoRUVFe3v7Bw8evH//vru7G33mwRrhi0rikI0KMfa7q6srPT0dAFBRUVFfX49liOniIkW0wWeorq5uYGAQFxfn4+Pz3Se1/gwM2VUNZDJZSkrKx8fHyMho06ZNc+bMmTp1ant7O76fY37mGjFiBBKxvHnzpqmpKWYmgG7E2JtQa2urra2trKzszp07BxPMvHnzgoKCpKSkaDRacXExulEKCgpu2rQJhSEoKMjHx4c6PCwq9BwHAECl7N27d8uWLdOmTVuwYAFmNIGNTJiZmU2ePPnBgweTJ08mlE540KupqUFPpti+tbW1tbW1PDw8/v7+ixcvZvkGgz3FHzt2zMjIqKamZuLEifj3g8LCQgCAkpIS877R0dHNzc03btzo7e2Njo4uLS1FLwp4eHl5JSUlAQDYg/NgqsbM0aNHjY2NX79+jb/vI77CWoE5cuyt64uz/7u6utC7u66uro2NzRcjJ4Bko7u7u+/evVtdXU0mk69duwYAuHv37pEjR7Zs2bJkyRIajSYsLLxp0yZzc/Nx48bNnTt34cKFSJQ8PDzc1ta2rKxs6tSpFApFRUVFVFT09evXxcXFd+/eBQAgT6V58+Y1Nze/fv06Nzc3LS1NR0fn1KlTenp6AQEBmpqaAAApKanTp08bGRmVlZXFx8enpKRUV1cjrWcKhXLo0CEDA4P+/v6Ojo6qqqqmpib0MNTV1bVnzx5dXd2IiIhLly4NGzbs8OHDurq6KSkpmpqaTk5OTk5OhYWFFRUVW7Zs6erqMjY2NjIyEhQUpFKp5ubmb9++bWlpSUhIUFNTQ6LMBw8eXLly5du3bzU0NBwcHCIjIzdt2kQikdBNmV1bYRLbFArlyZMnr169QoNbmMR2QEDAuHHjMIltNTU1rP3xEtvi4uLx8fFIYrusrAyvND1//vykpCRFRUX0ydfNzU1DQ2PevHknT57EH83du3cXFhYKCgrq6+ujj4dISZyXlxdTEt+7dy9SEh85ciRSEkfxNDY2FhUVoef40tLS2NhYcXHxxsbGpKSk7u7upqam0tLSixcvGhgYLFy4cPny5b/99hvK8Pz58+vWrTt8+LChoeHIkSMTExM7Oztra2urqqrevXtXUFDw8ePHoqKiyMhI5gzz8/M9PDyUlJRCQ0NtbGzwLTNE+P6TZn4OHjx4gEnHTps2LTw8HEKor6//4sULLA2ajignJ4f+GxkZiSkjP3jwAPMYwxKgJz4SiTRnzpy+vr558+bFxMRguWlpabETq12/fn1qampBQcH8+fMbGxuHDRvW1NQEIURzOJH1eX9/P/qRnJy8Zs0aCOHkyZPRXMHy8vK6ujoI4YoVK5D9W319/dixY9EzbFpaWkVFRUdHx6dPnxQUFJiVfDE3Mjs7Oy8vr4CAAD09PZQMud/duXNn2bJl+fn5GzZs2LFjB8sqXLhwAUkht7a28vDwkMnkqKgoNDyAEly9enXr1q1Yen5+fkwd287ODtu+bds25GVIAPM5I4RHIpHwVRs5ciSDwcjLy5s+fTqEMCsrC01KvHjx4q1btyCEubm5/f39tra2zH7uiYmJY8aMQb8XLVpUXFxMaBnmqFhG/ttvv5mammLb0cNETk4O5msPIYyPj2dWyv6O0Gi0mpoa/H9JJBLzhGR0I/vGstDrAjuNZoIKM1YuhLCpqQmZuWPghZIJIK8+5u2YKDNBrPnJkyf/2ARs5pAwpWmCojRkLxWNLh9205u/XUm8r68PnzmWYW9vL/yr4vZg8Pb2rqysLC4uTklJuX379jfG9hMyZL92jh49evv27V5eXteuXZs4cSIaFnJxcXn+/Lmnp+erV69evHhBo9Gio6ObmpoyMzNbWloCAwORz/iFCxcCAwO9vLwAAHl5ec3Nzb///vvz58+3bNnS09Pz/v37qqoqEonk5+dnbW2NvvYUFRVVVlYy+7Aj0INzTExMWVlZUVHR8ePHp0+fbmxs3N/fn5GR0dLSkpiYGB0d3d7ejmyg0eMweo4zMzOLiYlBi1ttbW2nTp0KABg5cuSmTZvU1dWtrKx6e3vJZLK9vT2FQrGysiI4pDB7shgaGmImJujDLLO1DTPoKf758+dHjhxBT/EsLWBQYj8/PyqV+vvvv1MoFC8vL9S8AAAajSYpKYm++eAzp1AomNsL3mNl/PjxHR0d+Krp6emZm5tHRES0t7eXl5dHRESUlZWVlZVpamr6+fmFh4c/evQoOjpaVVWV2at63rx55ubmV65cCQoKMjU1VVBQGNithl3kd+/elZKS2rJly+PHjz08PEJCQkaMGOHn59fT03Px4sW7d+8eOnRo165dzO+13xEeHh78SycPD4+cnByzLrawsLCUlNQ3loXeq5C0NzNcXFzDhw8nfO9FiaWkpIYPH47fjobbWSIrK8tS8A97Tefi4sJi6OnpWbJkyb+lAy4oKMjFxcXNzS0oKMjNzc3Hx4dvHEyKnQD6lsNO2fzb15Lz8/PjM8cyRF81/q6YorOz87NnzwoKCtAE0W+M7SdkyKqXIT8ECoXS3d1NGKHt6uoSEBBgdwr+IAjmAD09Pfz8/F88HdEEbnyoNBoN2wszmkAHkUwmE240A0Amk/GXKLImgBDSaDSWo1Yo8ubmZgEBAUIptbW10tLS33esCwuPuWrsbDTQMBiDwaBQKAICAuzadmBrhcFDp9MbGhq+TvGEA4efnO7u7qSkJBqNNm/ePDExsX87nO/PkO35OHDgwIEDB5YM2a+dHDhw4MCBA0s4PR8HDhw4cPhvwen5OHD4D9HR0fHixQv8arPvQm9v79/Vuf4iPyhURH19PVqeOwBhYWG9vb1UKvX9+/doqSs7qqurY2JiviKMhoaGwShQfwVUKjU6Opp5rc5XhzrEGPo9H1KyR9Dp9NQ/KS0tJYhctLa2vnnzJjw8vLq6Ojc3l2VuSL4WANDXR4+PrwoOLigoaM7JISqeAAAaG7vfvatg3v596ezs++OPwsGkbGrqCQr6XFzcim3JyqqPiiqjUGj4NE1NPeh3Xx89IqI0Le1/K9MHvvIHD7sLEsFgMBITE7ElxgNTVlaWmpra2vq/GpWXl6empqalpaHteCgUyqdPn9Dv3NxcCoXyXeoyGMrLy/38/MrLy39E5l8hVXrgwIEVK1Z8+PDhO4bR0tLy5MmT765Z+iNCxRiMOi7yMWhqajp48GBZWRm7ZAT9WHawPFgSEhLe3t5o1fn3pa6ubvv27YTHkUGG+l9g6Pd8RkZG2H2Hh4enrq5u7ty5aDnB9OnTU1NT0Z+ePHliYWEhKSmprKx89+7d06dPM2eVkZGRk5PDxcWVkVG7YoVPR0fvrFmjExNJq1axUIz844/CK1cSvz1+Oh12d7O9MMrL2zduDPxiJqWlbadOvY2JKZ8zxzM2tgIA8PvvnzIyamfPHmNp+ZJKpQMAcnMbdXQe5+c3AQBoNIaFRbC6+qjKyo5z596hTIKCgr69OiwvSDAIoxZmRo8efebMmZUrVyKVCjk5ubNnz0pJScnIyFhbWz958gRN73zx4kVRUZG0tPSiRYvS09NramqWLVuGt9T5CgbZ5TQ1Nbm6umpqan78+PFbimMJiURiaQA0AC0tLQUFBSIiInv37v2OkTg5OVlYWBCW03wjPyhUjMGo4544cUJSUnLMmDHMqkx48Pqx7GCnK8vPz798+XKkLfB9wVswYgwm1P8IQ1bDBfHu3buRI0feu3cPk9GbOnUqDw/PggUL0F/d3d21tLSys7OPHTtWWFiI5u9evXr1xo0bhKyoVKq/v//169e7u/vXr3/+5ImxtvY4AMD27ert7b3MRc+cKRsU9Pnbq2BvH7N587QpU6RZ/nX69JHDhn15gn5ZWduDB/oAABmZYX/8UbB48YTz59/l5u7h5+cZP14sMDB/06apamoyWCnh4SVCQnwyMsNMTScfOhRx4MAcWVlZCGFSUhLeffQrYHlBAgDs7e03b948ZcoUOTm5QWYlKCiIxAT27Nnz8OFDPj4+bW1tBQUFAICioqKioqKGhgYAYPLkyUhvTEpKSl1dff78+e3t7cePH0dS2l8BlUrdtm3bixcvvpgyLi6Ol5dXSUmJpbTNF7l3756AgACJRBo1apSpqamhoaGnpyedTjc0NMzOzsZ8DKqqqvz9/RcuXBgUFLRz505ra2u0MTQ01MrKavTo0Rs3bjx69CjyIG1sbAwICNDX109ISMjMzJSQkEBCz+fOnSOTyZWVlfv373/8+PGECRMAAPHx8adOnXr//n1paemLFy/q6urCwsLQujEzMzMUZGFhYWtrK1o9FhkZieV59+5dHx+fwMBAZWVlaWlppGni6elJCPXJkydkMjkkJMTV1TUuLg6ll5KSQl67KNSHDx9i7bB9+3Z7e3sUqoWFhaOjo42NTXp6OoRw2bJlERERqqqqx48fj4qKotPpiYmJlpaW2MJKbKOFhUV4eHhaWpqXl9fFixdzc3NnzJgRFBRkZ2dXU1MTFhbm4+PT399vZWV15coVdBYBABgMxvHjx5cvX/7w4cMHDx4UFRVhDYtE4Gg02u7duxkMhpubW3Fxsa+vr5OTE/hTV1ZcXHzu3LmioqIeHh5qamqRkZEXL15EVsDh4eE7duxAix0JpaDFPO3t7bt27WJ5UHp7ex0cHFRUVBITEzdt2qSjoxMXF5ednS0kJISe+AsLC2NjY3NycqSlpc+fP084x9zc3NLS0lxcXE6ePCkqKnrhwoUnT56MHTs2ODj43r17X3HS/jL8a2vo/xEcHByqq6tHjBiBaUmUlJTw8vJCCHt7excsWHDt2jUIob29vYmJCX5HZqmFBw8ePHnyBEIYHV0mJXX1r4lpEMLS0tbjx6N9fXO2bg0mkToyMmpnz/a4fj1JR+dxTEw5hPD69aQ3b4p37AjJzKzLz29SVLzj45NtaOh3714qPreCgmY3t3Rr61dnzsTW15OVle9du5ZYU/O/+Ol0xpEjkVFRpWZmgZ2dfRBCQjADc+1aopdXVk1Np7LyPbTl/v2MvXtfo98mJs/evauAED55krV06RO0cfp0t4yMWghhd3f34sWL2eVMp9OPHDkSFRVlZmbW2dkZEBCwaNGie/fuaWtrJyQkQAjRGnAPDw9JScmKigr8vvX19crKyteuXaupqdHT0zt//vyuXbvWr18PISwoKHBzc7O2tj5z5gxzoY6Ojg0NDWPHjkU+c05OTn9WxOTKlSs0Gq25ufnu3btoo5ycHIrkyJEjSCUHT21t7bJly06ePLlly5a5c+c2NTWRSKRz586hezSFQrGzs9u1a9fKlSs9PT3FxMT8/f0JuhgUCuXkyZPe3t67du16//59fX29tbW1np4ewX9xkKSnpyPPLAjhqFGj8vPzV65cmZWVBSGcMGFCe3t7QkICOmmzs7ORdVx5ebmwsHBnZ+fixYt9fX0PHTo0c+ZMCCHy6oIQVlRUzJo1C0LY19enpKSEVEVMTU3d3d2dnZ1tbW1Rcfv27UOKNgYGBmFhYRDCefPmkUgkZGBLp9PxhpdPnz5F4jvMeebm5k6dOhVfKUKo8fHx5ubmvr6+q1evPnjwID49FipzO+BDnTZtGjqXxMTEuru7KRSKsrIyhNDa2trX17etrQ2vdIPfmJiYqKenByEMCgpCzoI3b948cuQIhPDkyZM+Pj4QwmXLlmVkZEAIjYyMEhISKBTKo0ePIIQmJibv3r3DNyymJVRWVqaiokKn0wMCAkpLS7GiHR0dnZ2dIYRr1qz5+PEjhNDFxcXMzAz9dcuWLUhNCULIXMrAB+XatWtIvSgvL09WVpZKpaqpqSF1mylTplRUVBgbG3t7e9+/f5+fn7+lpQULFZGSkmJkZAQh9PLyOnDgAIRw9uzZnz9/Hoyn6S/NUP7aSSKR5OTkxowZs3TpUrzFFIPBOHnypJKS0rFjxw4fPgzYWKUQcsvIyEAfPcrK2iQkBP+amAcAsG/fmw0bppibT9XSGnPkSBQAQECAx85urq3tbD+/TwCA4cP5ly1TnD59VEhIoaqqFJVKNzCY5Oam5+GRic/txInoYcP4Z8wY5eSUwMfHPW6c2KpVE0eP/t86biqVrqoqraurQKMxMjP/3tgbjcaoquowN59aX08WFv7fwnNhYb6Wlh5CymXLFPPzm9LSauLiKhsauuXlxQEAwsLCNTU17AbkCe4tBD8UGo1mY2NjY2NjZWWF9GjwDGDUgpftxob08MjIyAQHBx8/fhyJ6WAkJiY6OzvfunULP9AbEBAwYcKEESNGBAYSPxHLysqKiIhoa2t7e3srKyvfv3+/p6dnwYIFenp6r1+/FhAQwDS1t2/fLiQkhPze8Dk4OztLS0tv2bJl3759ZmZmMjIyWlpaKioqenp6LFtsYJhVRtml5OHhQW8GqGrZ2dlI0/XGjRtokIyXl5eg319ZWdnV1YWkABYsWPDp0ydMNxUAwM3NjaomKiqK6Zo2NDTgfQmwrNra2tDFMnCeLENFZvGYuQFzepbtwDLU4cOHCwgICAoKopPk6NGj169ft7Gxwa/Cxm/EZBDYVRYw6foSXBpYNqy8vPz06dNDQkJqa2vR5wd21UFNhDYKCQk1NjayK2XgOLEMVVVV+/r6Pnz4wM/Pj9RtkPjOIG0cMC5evLhs2TLkALN//35jY2Nra+sv7vXLMZR7vt9//72mpubGjRvS0tJ3797Fbtnc3NwODg4TJ07E5PnZWaXgwa5weXnxysoOGo3YAXz8WC8oyAsAWLBg3KdPDQAA9B1SQkKora0XADB37lg/v0/NzT1oXx4ebgEBXlFRgc7Ov6jO19Z2bdkybedOjb4++xEjiCrpgoK8kpJCoaFFNBqDOYaB8fLKOn58AS8v9/jx4l1d/yu0q4uqqEi8HmRlRVJTdxQVtfT09I8cOQwfBrshLsIVSxCDLi0tJVyQA4A3ahnMdauhoeHs7Gxqaop3LdDR0dm/f7+jo+P27duxjRs2bLCwsAgNDWUpfIXdl7W1tevr6xUUFEgkUnx8PACAwWCwvC/jIdyD8G4SX8GUKVMSEhLQ77a2thkzZhB09zE9foze3l5RUdGJEydGRESgkdT09HQ6nY4lQE+7AICxY8f29PSgc765uRn7oMcMxCld4H0JsI0qKiqosxl8nlio8vLyLM0N8KEyt8NgQu3u7k5PT5eUlHR3d2e5EcufXQ6AyceA4NLALoxjx45duHBBVlYWvxE7WFh18E3U1taGOTsOphT8dizD3t5eSUlJRUXF3Nzcrq4u8KeBBsHGgZAVdlIh6VEAgKysbHZ2dmho6IcPH+7cuRMcHOzm5sausr8uQ7bno1AoPT09p06dsrOzQ3qSf/zxB/jTkAX5Evj7+yP72W3btqWlpWVnZ2O7FxQUEDLErvBFi1wLqkoAAK/RSURBVCbIy4s/ePD/U87Kytr6+xlTpkgnJFQBAJqbezQ0RmOXDfpBp8MNG55v2KA2apQIZGULgtHVRU1PrwUAVFS019eTubhAf///37w+f252d8/Q11cWFOQl7EunQwAAjUbD3+wwwsNLdHUVRo8eXlPTJSjIKy09DE2cyctrXLpUnjn92LGimzdP8/PL9fQ0wDby8fGJi4sTDGL+DIz1FYt+jxgxgnBBEnZnZ9Qy8HWL2XNv2bLFzMwMGdmAv3rWqKqqoqOMDv3Zs2eHDx+OTIbZQSKRtLS0PDw8GhsblyxZgnbEJ2DZwoR7kJSUFMvb6yAhqIwqKSkh3f0bN24g3f3JkycjHwMAQElJSUBAwJ07d1xdXQmartnZ2a2trS9fvoQQvnz5sry8PCMjQ1BQ8NGjR1euXImKimIwGKampphualtbG/KnrKmpKSgoePv2LYlEQtNlExISPDw8pKWlMasvAMCcOXPQGxIhz61bt0ZFRRUXFxPeVvGhEjRasfT4UAntMGbMGCzUkpKS6urq5OTk/Pz85ubmyMjI9PT09vb2T58+sZRvxTYiE4nCwsLS0tL4+Pji4uLa2tqUlJT09HQk4pqTk5Obm4t8DCorKwsKCmJjY/n4+PAuDenp6ahhAQCYfiwAQF1dXV5eXl9fH19rTFf29u3br1+/joyMjIuLu3TpEvorlUrFDHIFBATwpaDXvgEOyr59+3p7e58/f+7h4fHgwQNpaekjR47o6ek5Ojp2d3cnJCQQ5H/xoQIAZsyYUVdXd/jw4YKCgqKiou7ubgcHh4KCgiVLlgxBfwY83+276c9Ef3//jh07Dh48iEZimpubly5dqqqqWlJScvbsWQAAGrFLSkoSFRVF3/SROfulS5e8vLxcXV0xnwGMrKysCxcuoN/l5W16ek/t7CK8vLLc3NJjY8shhHl5jcbG/hERJWfPxlZXd16/niQvf5tE6jh2LEpNzbW5uWfqVNddu0Lt7WM0NO7Hx1cKC1+MiSkPDS0cNuxiSUkrVlBw8OdRo65v2PD84cNMCOGZM7EmJs/y8hrRX0tLW8eOvXn2bKyp6TNz8xdxcZU8POdjY8vLy9tUVJwhhCdOnFi9ejUh+MDA/FGjrquoOKuoOOvqekMI372rOHs2Nja23M4uAqUpKmpRULhz7ty7jo5eCOHHj3VHjkRGR5dhmVRVVVlbW0MI586de/UqcXAROficPXvW1NTU3Nzcx8dn3LhxlZWVdnZ2c+bMaW9vP3XqlI6ODhqN9/b2Jux+5swZNLAxceJER0fHsrIyWVnZ+/fvBwcHjxo1asOGDQ8fPiTskpmZOW3aNMxjgUajoaHB7OxseXn5ZcuWubi43Lx5c/ny5a6urhEREdzc3La2tmQyubm5efz48UePHiX4D5iYmNja2v7xxx9Hjx6l0Wh+fn5Tpky5efOmmpranTt3tmzZsmLFCmSaoaOjc/Lkyba2NvzuXV1d69ate/bs2Z07d2JjYzs6OrZs2aKtrV1eXg6/FgqFgh9yJujuIz3+3NxcXV3drq4uvHcBmUz+YuZ0Op2dsQC79ARfAkRoaCgagPxinixDHUwMhHYYTKhkMpkwEMty498C79LALg2DwWBpboDfBe9Z8ebNm7dv3/7dUgh0dnbiTVrIZDI6WOi/BBsH5oCpVCrWLHQ6vb29fZDl/roMzZ7vq2lubh7glnHz5k38Zd/dTW1qIt4FULfBEjqd0dtLQzsOHEZfHw3NmkFQKISZFP0MBoNOZxC29/T0QwjpdDrq179IXx+tro71Taevj1ZVRTSUuXbtWmNjI4Swra0NvUMQ+OIVS7ggmXdnE8xA1y0e1DF8NSYmJu/fv8cb66Dvir29vQRDHBqNxq4WhHvQP0Bqaurs2bP/yRKZef/+fXNz8xeT/Qyh/lCOHz9ubGxcVlb25aQQQggrKirwTmcc/jE4itV/AwaD8erVK319/e+7dOn7QiKRpKSkvruNcmpqqpiYmIqKCgCgqKhIWVn5++b/M7B69WpbW9tVq1b924H8PcLCwpqbm6dPnz7AGNhPwi8U6teBv0w4/Mxwej4OHAD4U9VJWFh41apVaIoNBw4chiqcno8DBw4cOPy3GLJzOzlw+NVBisl/axcIYWRkJPNqxe9CRUVFWFhYXV3dACKWGJhQalZW1ncXsx4YMpkcFhb2xWTMErJVVVVIxvorWp7DrwWn5+PA4ScFKSb/rV1Onz6tqqpaX1/f00NUJ/hGPD0979y5o6ysfPny5cEot40ZM8bU1PT169ckEsnCwuLgwYPfN54BKC4u3rZt2xeTESRkGQyGl5cXEnT9ipbn8GsxlL929vf3x8TEyMjICAgITJ48mfDX+vr6srKyWbNmEdSER4wYISUlVVRUBADg4uJCwnr4BBUVFR8/fpSSklJSUurp6cFUAfE5d3fzNzdTAADjxonJyop8/7qxp7q6s6ioZckSFkv08BQVteTmNi5cOEFSUggAACF8965SQIBn7lw5bP5Ofn7T5Mn/U/Ls72fEx1fKy0sgPZe6ujrCWl1msrKy8Cv/eHl5B17d/EWqq6uLioqWLFnCLkFGRoa0tPTA+sIDM8gcKioq8vLyZs6cSaFQmKU6vj2Mr2bq1KkZGRl4jZXvxcaNGxcuXLh7927wp7w7ACArKys4OFhZWZlEIgEAdu3adfbsWTExsePHjw8bNkxbW3vv3r0bNmwoLS2dOHEimUzm5eV1dHRsbm5esmRJZ2cnNze3gYHBoUOHlJWVBQUF+fj4bG1tXV1dyWQyiUQaMWKEgIDAtGnTvkIER1ZWdjDuIpqamoGBgdiRCgsLCwwMfPz48d8tjsMvx5B95+vt7dXR0UHCesbGxswJkE0JPz9/aWmpjo4OAICHh6ejo8PFxUVMTAxJFDIYDB8fH01NTezBcN++fbdu3VJVVRUVFd22bdurV68I2SI/h5EjRaytX92/nyElJfzdq0bQfMHDYMCwsOLLlxMGzuHVqyI3twx//9ypU12RLdG+feHS0sICArxId41Mpl65kmhj8xqlb2rqsbIKmT17THDw58ePswAA9fX1BLUwZkaMGLFgwYLc3FxeXt7+/v5r1679nVoyV42twQryeQAAlJeXD9LhiB2DyeGLbz/fHgYAoKKiQldX98OHD58/f1ZSUvL19TUyMkKaDBkZGY6OjtbW1hs2bIiKisJ2efv2bUNDg7+/f3p6+po1axwcHLS0tFpaWk6dOuXj47N79+64uDgAwIEDB4KDg7dv375lyxYAwJMnT1xcXFasWFFaWvrs2bPFixc7Ozvr6OgQju/atWsPHjx4+/ZtBoOBur2GhoYdO3acOHHC3Nz82LFjYmJiEhISI0aMGD9+PH6KUGdn5++//7569WphYWF+fv5x48aNHDnSxMTE0tJy/vz5I0aMQI9Ehw4d8vDwKCws1NDQOHr0qIiIiLKy8smTJ/G6ORQK5fbt21evXjU0NERKzRs2bLh8+bKmpqa/v/+NGzeWLFlSU1MDAKDT6Y6OjrNnz7azswMAFBYWuru779mzBy3njYuLu3fvnqenJ9J07u/vd3R0fP78uZ+f38At39bWduTIkatXr+ro6Dx69Kiurs7T09Pb29vf3/8bDzeHf5p/c0nFj6SgoEBOTg4tugoKCsIvHSWQm5srIiKCftPpdBKJBCH09PTEdF1VVFTQcmlXV9clS5ZgO3Z3d7u5ueGz6uvrs7OzQ79NTJ5dv570PasEIYSwqqrD2vrVAAmysurRWvUBiIz8n5bu4sVewcGfCwubsV3mzPFEK/k+fWpYtOh/i8Tfv68wMXkGIUxMrDpzJhZtvHDhAmEpNzNSUlJI8xdCWFlZOXDiL0IQ28U4fvx4bm7uN2Y+eMzMzLDjjq2j/xFgisljx47t6Oiora2dNm0ahNDExCQpKam5uVlBQYGwy4QJE9CCVBMTkxcvXkAICYrGDAajrq6urq5u3Lhx5eXlycnJeM3o0tJSdXV1COHTp0/37dtHyDwoKGj06NHz5s1DLpWPHj3ClKPhn+vrz5075+npibYsWLDA3Nz89OnT8+bNw6QPPD09rayskpOTDx8+jLZYWVmFhoZmZGSoqKhgKzKPHz/u6+tLCODy5csODg6+vr4KCgovX74cQGxaTEysp6ent7dXSkoqOzsbr9rc0NBA0HR2dXVFTfT8+XOUIbuWd3Z2vnTpEvxTKZulhDeHX4Ih+843adIkcXHxhQsXlpaWGhsbi4qK0mi0GzduBAYGmpqatre3Ozs7b926FSWGENLp9P7+/gcPHhBWwpFIpJaWFrRA5+nTp2vXrsX+JCws/Ntvv+ETP3nyZNq0aexCcnPL2Lbtj56e/gMHws+ciWUw4NGjUdHRZRs3vujqon7+3KykdNfXN8fIyN/ZOe2v2Wa7uKSvWOFbWtoWGJifmVkXGVmK/bW6uvP8+ffBwQW7dhFfQNmxbNn/PtAhaeyMjFoFhf8JdsvLiyM3WvyaRS0tudLS1kOHIiIjS/ft00Ib58yZc/369S+WRaPR+vv7X758WVdX19vbi3//+PDhg7KysqenJ/6lHL8R/4yPCZUBAKKjox88eHD58uWHDx82NDQEBQW9efOmsLBw7969yOosMjLSycnp/v37e/bsodFozK8y9+7de/36taOjIz7Uzs5OlENra6uenp6Dg4O1tTV6McLD/PZDIpFu377t7e29c+dOLJNBHosBwBSTkQgqUkAFAMyePTs4OBhCqKWlxW5fTGWUWU101KhRe/bsOXny5IQJEzIyMvCa0QS1VXyG3d3dxsbG+fn5/Pz8NjY2AID29nb8SULQ70bo6+tfuHAhNjb26tWr2PuxoKCguLg4Xkv6/fv369ate/v27cDfaTMyMrZt22Zubl5aWmpoaDiA2LSQkJCQkJCAgMCCBQtIJBJe/RUJ8OIlZOPi4tCYBXbts2v5GTNmxMTEkMlkJSUlGRkZlhLeHH4JhmzPBwCIjY1VVFScOnUqGrW+d++egoKCiYmJtbV1e3v7zJkzsW+Y/f39zs7Od+/exYQfAQDV1dUWFhZr166NiopCt5gvWjpgfg4smTlTtr29V1iYT11dtquLSnBdYOfekJJSHRVVKi4uyMvL7eKSNnv2mLFjxZYv///BxZ6e/gULxunpKb9+XQz/zqgtidSpqTl6wgTxL1o3CAjwXLy49O3b8vfvK/n4/nfOKCoqIkXNgXn27Jmzs7O3tzdgcjNAh2Dt2rXBwcFYeg0NDWzjnTt3yGTymDFjcnNz37x5g6WhUCirV682MDDw9fXFfB4mTZo0evTo/v5+KpVqY2Nz5MiRXbt2NTc3P3z4kGAc0djY+OTJE21tbfxzDABAVFQU5TBixIgRI0ZMnz7dzc0tIyODoD1tamr69OnTa9euaWtro/vs/v37N2/evHXr1gULFgwbNgxlMvgDwQ70cIr/L/phbGwsLy9fUFDg6+s78C6AlZros2fPOjs7d+7cWV9fLykpyVIzmvlEunr1KgBATExs37596IsisoLDJFLxjyYI7E+8vLyCgoLYbEkpKSkVFRV7e/vc3FxUlq6u7q1bt3777TdMEJXlmSwjI4Ou0N7e3qysLELFmdMDADo7O9XV1fHqrzQajSAhKyUlhZ6H4J+KsuxaXl1d3cDAIC4uzsfHR0hIiCDhzVLNlcPPyZDt+chkspSUlI+Pj4+Pz969ez99+pSUlITmqixZsmTChAl4CxJ+fv79+/fb2dk9efJEWPh/I3NycnJOTk41NTWYRfgXLR0wP4fBwOy6wNK9ISOjdtWqiebmU8PCNt28uYI5HwUFCRKpIz6+EgDAxkGIBTQaw8/v08mT2gCAL1o3VFS0Bwd//vhx1+TJ0idPvkUb8dYqA7Bp06aDBw96eXmNHTuW+f2D2ecF4MxfCM/4WIJ58+a9efOmqKgIb+MC/nxUZ7bLIbzKyMjImJmZTZ48mdn8nfCwDwAQEREhk8n4NMxvP9nZ2eLi4gCArVu38vDwoEy6urqMjY2NjY1dXFy+2ErMlJSUIMXkkpKSxsbGpKSkd+/eNTU1lZaW3r9/39/f/86dO/b29njnptTU1Nra2sDAQBKJ9Pnz5/Dw8N7eXoKicXt7O9Kpefbs2fHjx9etW4fXjE5KSqqtra2qqnr37l1BQUFHRweWeWJi4pkzZ168ePHw4UP0rqyhoWFlZWVra/vq1asXL17k5eW1trZ++PAhMTGRTCbHxcV9/vz5+fPnt27d2rZtm7GxsZmZGZVKTU5O/vDhQ3Bw8P3792/evNnS0pKbmxsXF2doaDht2jRra2symdzS0pKZmZmYmEihUPBtcvDgQXd395UrVzo4OEyZMoWd2DSFQlFUVPT19fX390fevHjV5ilTphA0nQ8ePBgaGmpnZ/fy5cuqqqq0tDR2Lf/27VsPDw9PT087O7vc3FyChPekSZPQMwGHX4B/7LvqP8yDBw+qq6vR72nTpoWHh9va2qJBuM7OzpycnOTkZGRPmpOTg43zQQjj4uLKysru37+/dOlSCOH79+8lJCTQd3xvb29sHAVCyGAwCgsL8YWePn361av/DcKtXRuAH+fz8spKT69ZtcoXQujikrZ//5v8/Cb0XzOzwKioUgjhhAm3e3tpXV19Eyb8v+JtQECunt5TJAWZlERKTKwyNPTDF+rqmn7lSgKEUFb2en8/HT/OR6MxICs1SwaD4eWVhWQ/CwubGxu7lyz5n9Tn/PkP29qQDnIjNs73/HnesWNREMKOjt65c/83ipOXl2dqagohRGakLJGQkEB+2YiLFy+am5tDCHt6eiZOnAghHDVqFPNe2EZra2s0PkShUD5+/IiN861fvz41NbWgoGD+/PkQwmXLliG3z8uXL1+7do1CoQwfPry4uBgdkUePHlVUVMyZMwdCGBoaamlpSSKROjo6Pn36pKCgQNDYRDlACC0tLcPDwyGEGhoaBB9dzCM3KCgIZTt16lRkP5ufn9/a2opl8iNAgsjNzc0FBQXv3r1jHgxjycBqooPRjEaKtdXV1QQNVQaDgdxZ/xkYDAazZDZLurq68GcmQf2VICHLYDA6OzvZabFieHt7V1ZWFhcXp6Sk3L59myDhjVReOfwSsPg0PzQYPXr09u3bzczMmpqaJk6cqKuri9xJUlJSNDU1nZycnJycCgsLi4qK/Pz8KBTKjRs3hISEqqurX758GRcX9/r169zc3LS0NB0dnVOnTunp6QUEBGzZsgU9yBsZGQkKClKpVHNzc3yh69atCwkJWbNmTU5Ow8ePdS0tPfz8PGQyNTKydPPmaTNmjKqrIx8+HEml0ktKWul0Rm5u47lz7+h0hpdX1tixYo2N3UlJJCSEXVrapqgoAQAwMlJxc8vQ0Hgwb97Ykye1hYX5Pn1qdHPLsLaehQqVkBB0cUnj4+ORlBT28Mjs6uorK2srK2tLSKhKSiK5u+stXrzY2Nj4yJEjWJwHD0a8fFng5JTAYMD166c4OCzW11d++vQTlUrfvn2muLgghUILCSmsqGjPyWmYNm3k8uWKvr45ISGF+flNtrb/G1v68OGDiYkJAGDSpEnv378fM2YM4RD88ccfbW1tAQEBkydPRm/S+/bts7S0fP78eV1d3YMHDzIzM5HPi5GREbYXfuPBgwdXrlz59u1bDQ0NBweH69evI4MVKSmp06dPGxkZlZWVJSYmzp079+LFi4cOHUpOTubl5d25cyeyy1m/fj2yy3n27Bn+Vaampubq1atbtmyxsrLCj1S1traiHNA7U3JysrKyckVFRUpKCv4jNnr7mT59+uPHj9Hbz7Vr1ywsLKZNm7Zu3TpTU1OUSWdnp6io6LefyQSampqcnZ3FxcVFRUWbm5tXrGDxGYAZ5DvIDhGRLy+8QUeQ+ShzcXENxu/0e8HFxYV9lRkYQqUIH2PQ7FPMppGLi2vgJkI4Ozs3NDSoqKjU1dXNnTuXm5ubm5ubj+9/IwXfXSyXw49jyK7n6+vr4+Pjo1Ao3d3dMjIy2Pbvcj+qq6uTlpZmOaR/69atXbt2sbs4IYQ0GuTiAry83ACA3l6agAAPhIBKpSNXW3aQyVQRkf9dur29NEJiCoUmJMTb10fn4+Pm5v7frZzBgP39DAEBnvb29vfv3+O/FrKko6MPQiguLsjyrxDCmpouCQmhYcP4AAAMBuPUqVNojQGFQvlb13xXV5eIiMggVb8hhBQKhbk9UaE9PT3oT729vZhzN4LBYPT09LC8p6NznkwmD+ZmxwwqtKamRlpaGrufIveGQd6Uv5GGhoaUlBQJCYl58+axPAk5/CC6u7uTkpJoNNq8efPwM3Q4/HIM2Z7v3+In9HP4EdYKwcHBS5Ys4Vz8HDhw+BXh9HwcOHDgwOG/xZCd28mBAwcOHDiwhNPzceDAgQOH/xacno8DBw4cOPy3GLITw5iNAhQUFAZwYGhtbU1NTeXi4lJTU2tvb1dTU/unI/6Tzs7O2NjYL07FHHqQyeT379+vWbMG2/JFcwZ2ENwSGAxGcnKyoqLiqFGj2O1SWFjY19c3gPjcYEAGIPPmzfuWTDhw4PCjGbLvfMxGAQM4MCDXFUlJSWVl5bt3754+ffpfjLy8vHzjxo3oN0E78UfzDxdHgGCrNoA5wxfB3BKQjUNLS8vevXuZFVsQaAnzlStX8AJpXwcyAPnGTDhw4PDD+fcW0f9wmI0CWDowZGVljRw5sr29Hdvx+vXr/3iwf0FKSgpCWFVVZW1t/Y8V+g8XxxKCpAs7c4bBg9k46OnppaSkMCd4/fq1q6srhPD69etOTk7fUhYHDhx+FYbs104EjUaj0WgFBQVFRUXjxo3DtuMdGAIDA7W1tfFL02xtbZmzKisr8/DwUFNTi4yMvHjxYmhoaFpamouLy8mTJ0VFRffs2bN9+/Y5c+a8evUqJiYGKUQ8fvw4MDBQWVlZWlp61qxZVVVVoaGhVlZW+vr69+7dU1RUzMzM3Llzp6GhoaenJ51ONzQ0zM7OxhbaBwYGZmZmRkZGTp06NSwsjJ+fn5+f38zMDP2VwWAcP358+fLlDx8+fPDgQWBgIFbWunXrYmNjc3JypKWlz58/j9J//vxZX1//3LlzgYGBurq6e/fuJdQIK2758uVY6925c2f8+PEBAQGenp4tLS349L29vUZGRjY2NkifbNmyZREREaqqqgcPHjx69GhHR4e4uHhUVNTDhw/l5OQePnyINOTu37+PbxYBAYEpU6YEBQVZW1urq6sjW7WQkBBtbe0bN25gjV9YWMhcIwCApaWltLT0oUOHDA0N9+/fb25uvnv37sOHD9++fVtZWXnDhg1BQUGSkpJIZzwiIuLx48dIVgbtTqfTPTw8xMXF586dCwDIy8s7c+ZMTExMUFCQjIzMvXv3BAQESCTSqFGjbGxsoqKi6HR6YmKipaUljUZjjgdC6OLikpaW9vjxY3t7+7q6OgUFhaioqJCQEILQOQcOHP5l/u2u9wciJSV16NChW7dubd26NTAwEELo6empoqKydevWWbNmZWVloWSbNm3asWPHF3Nbs2YNEod0cXExMzNLSUkxMjKCEHp5eR04cADiHNEwcnNzp06dCiGkUCiLFy/29fU9dOjQzJkzGxoaNDQ0Ojs7kRzoypUrUTATJkxAr57onS8hIcHExARCyNIGjEKhPHr0CJX77t07rCwIId6NDK+pSDAbI9QIKw7j5s2bQUFBEMK3b9+Wl5cT0sM/XcoghGJiYt3d3RQKRVlZGUJ4586dY8eOQQgfP36sq6tbWFgYHR1NpVLl5OQYDAY+1Pv379NoNGdn53PnzkEmWzXsnY9djVJSUlavXg0hPHHiBJLKdHFxgRBevHgRvbjr6upi73xv376FEKqpqdXU1GA5ODo6Ojs7QwivX79++vRpCKGNjc3vv/+enp5uaGiI0owaNSo/P9/a2trX17etra2mpoZdPImJiXp6ehDChw8fHjx4EEK4c+dO1IYcOHD4eRiy43yITZs2HThw4OHDh1OnTkVbvsKBAYH5DCD5f+YEmCMa85aSkhJFRUVzc/MbN258+PBhAK8AlrC0ARMUFJSUlAwNDUXvtfjS8W5keE1FgtnYF2tEcLdgTo8ZpA0fPlxAQEBQUBBZB/Dw8CBhMG1t7fr6egUFBRKJFB8fDwBgMBj4UOfOnevn59fc3IxcFwi2algk7GqkpaVVW1tbW1vLw8Pj7+//4cOHWbNmAZzlAh70Ii4uLo6N77JMICEh0dbWhlUWBZmXl3f06NHr16/b2NiIiYkN0ML4pgasjO44cODwrzOUez68PZi0tPSrV6+QEbOsrKy/v7+lpWV+fj4AYNu2bWlpadnZ2diOBQUFAAD81FCA8zlrbm7W0NDg4eFBCZBS+8CRSEtLR0REoBtuenp6ZWXlzp0737x5s3fvXgghlhXSesf24uLiQjZvBBswxOfPn93d3fX19QUFBQkB4N3I0EQPPFhiQo2w4jBkZWWfP3+OMvz06RMhPbts8ZBIJC0tLQ8Pj8bGxiVLlqD2x/5Kp9M3bNiwYcMGNLyH3xHZqg2mRtu2bbO0tNy0aZOSktL9+/dnz54NcP5qWKWwLdgPQiMTEmCVBQC0tbXNmDGju7s7PT1dUlLS3d2dXTyEzNk1CwcOHP5dhuw4X0hISGtr66NHj9LS0tra2n7//XdkrMXswKCpqfnq1av9+/evWLFi9OjRPT09urq6ZDJZRkbm7du3aAQIAHD79m17e/tx48YlJSVdunRp5MiRdXV1hw8fplKpJSUlxcXFyBFNS0sLe1eIiooqLi7Oy8ubMmXKpk2b1NXVdXV1LSwsyGTyjRs3MK+AdevWHT582NDQcOTIkYmJieid4927dzNmzPj06ZObmxs3N3dSUpKioiKyAUMICAjk5uaeO3eOTqd7eXnNnj0bKwu5kS1cuHD58uWYazxmNtbd3Y3Mxgg1GjZsGCrO2toa7XL48GHM3eLatWuE9CUlJdXV1cnJyZMnT25ubo6MjJSWlm5vb0evg3FxcVOmTElOTj5//nx8fLyLiwsfH5+kpKSHhwedTkehTp48mZeX19bWVlpaOjw8fPfu3chWjZeXF9mq+fr6InMGljVCbNmyJSMjQ1VV9bfffkPv7pjlQmdnJ7Jx2Lt3b1FR0du3b0eOHFlWVpaUlIStWtHU1LS3t1dUVExNTe3v76+vr09LS0Ou5ebm5leuXJk4caKpqamSkpKdnd2yZctUVVUXLFigqKjIHA+dTo+IiCgsLCwrK0tJSamurkaOcRQKZevWrT+PjisHDhw4up3/T0tLi6CgIPrkBQBITEyUkZGZOHEiPg3e6gFCSKPRuLi4BqmX393djTKHTF4BfX19AgIC6KMlfhdkQcBgMOh0en9/P8EKoLe3V0BAAEJIpVIJTgXIIHswNrn4GjE7HgAmd4vBmF24uLi0tbXZ2tpi84aQtQIy0MCsYQAADAajv79fQEAAc10gk8lCQkLMnysHqBGdTufh4UGHA7OMwWBZqUEm6O3t5ebmRoUyGAwKhSIgIICO0eBbmAMHDj8bnJ6PLT/C4uA/wtWrV1taWq5cufJvB/Jr09raGhoain4vWbJk7Nix6HdfX5+vry+dTt+yZcsX/aH6+/v9/f2xL/+INWvWSElJ5ebmJiYmjh8/ftmyZd7e3jw8PMuWLZOVlf3GsCMiIqqqqmbPnj19+nQshmfPnnFzcwsJCTEYjLVr134xZj8/P3RrUldXnzZtWnBwcGdnJ3rb/sbwOHAAYEjP7eTwr9Dd3e3n5+fr60sikf7tWH5tHj58iC5Sbm7uuro6tJHBYBgaGqIZW2ge6RdJTk5GiXt7e8vLyw0MDFJSUjo7O0VERHx9fYWEhPbt2zd58uQLFy5YWloOnNWFCxc6OzsHSBATEwMACAwMnDBhArbR1NTU398fQvjq1asFCxbg07Nzin/9+jUAYNmyZei/bW1t1tbWWGJ2ezFvH8CJnsN/GU7Px4HDT8rt27e7u7vR3F1sY25ubllZGYRQT09PVFR0MPlUVFQAADZv3gwhzMjIKC4uzs7OTktLAwAEBQUVFhbq6+vPnDmzqampsbERQtjY2EihULDdm5qaqFQqhNDb25uLi6u+vp6Qf0dHR0dHB/p97do1AEB9fT1a7oIYNmzYmTNn0G9PT0/0IzIycseOHa9fv3Zzc1u3bh2NRjtz5oycnBxalwIhnDFjBj8/f0NDA4Tw2bNnpaWlEML6+noLCwt/f/87d+4cOnSot7eXZW5Y0fv27auurka/6+vr16xZM2vWLB8fn1WrVhUVFQ2m9TgMSTg9HwcOPyN0Ol1CQoKbm3v58uWFhYXMCQ4ePLhixYrBZIX1fOXl5VZWVmjjhQsXAAA7duyIj49XUVEZO3bsixcvGAzGhg0bnJyc9PT04uPjyWTyihUr7t+/P3ny5Hfv3hkZGQEALl++jO8XDx48eOzYsQMHDhw+fLi7u1tPTw8AgNZWYiBthLlz50ZERKAtRUVFgoKCaHkohBD1i9HR0QCAkpIStPHRo0cAgIsXL0IIMXmdhQsX2tjYoN8zZ85Ey0ZZ5gYhJJPJEhISaJkmYvfu3bNmzYIQysrKnjhxYjCtx2FIMmR7vo8f61JSqlNSqslk6j9cdE5Ow8ePdQMkqKurQx9hEhKqXr0qolLpaDuJ1PHqVVFDAxlL2ddHKy7+/4XS5eVtb9+W9fXRIIT19fVokQCerq6uV69efa+K/KJkZmZ6e3sP/FGOHenp6fiXlX+Rzs7OK1eubNmyRUBAYNKkSYS/UiiU6dOno5e/L4J6Pk1NzT179mAfSFNSUgAA6GxZuXIl6g+QcmlJScnBgwf19PQcHR1lZWUhhJcvX46Kirp+/ToAAK/zFxsbCwDIyclBuSUnJzs5OQEAyGQyPoDKysqFCxeiL7eOjo4QQicnJ7SeBCVAlwOh56NQKFJSUmPHji0vL3/69CmEsLa2FgDw4MEDlGDbtm3jx49nlxuEMDAw8Pjx4yNHjuzr60Nbdu/erays7OHhMX/+/PLy8sG0HochyZBdzzdihNCCBY9ycxuHDSNO9vt2Ojv7BvjT3bupYWFsZYszMjJycnK4uLiuX08KDS10dIxbtcoXAFBS0nrxYvzy5Ypnz74rKWkFANTWdm3ZEvz77/9bY+7oGFdc3KqmJrNmzVM6HUpISHh7exNW4BFEn/+DREZG5ufnS0pKDlIlgAAmdf2vM3z48KNHj3p7e3/69IlEIqGbPsalS5e8vLzk5eUJ2wdg0qRJLi4u6PWroaGBZRpkZhIRETF//vz9+/dnZmai75zHjx/X1dVlTo9WsAwbNkxERAQAkJuby5ymq6urv7//3bt30dHREydOdHJy6u/vp1AoAABspQfLJR+CgoI7d+4kkUi7d+9Gr5to2Ss2n5afnx/NsGWXW2Nj45YtW5qamgIDA7FsRUVFFyxYUFFRERwcPHCLcRjCDNmeb9w4MXFxwRkz2LrSfDUkUufx49Hs/ioqKqCmJsPur1Qq1d/ff/ny5T09/fPmjXVy0o2O3pqWVtPe3nvu3LtNm6by8XHr6ytfvZoIABg9eri29v9rjcbElI8ePVxGZpiwMF9fH42fn3/58uV3797F56+urs5SvuS/Q2hoqJSU1OrVq7/OZ8rU1FRLS+u7R/UVFBYWurm59fX1TZw4cc2aNbKyshcuXHj58iUAwMXFpba2Njo6eu/evVlZWV/MCuLmb+/YsaOurs7FxYXwJ/QDzZwcOXLkunXrlJSUJk6c2NLSgopAvQsAgE6nk8lk9Bs1MoVC6enpAQBMmTKFucTOzs779+8DAJYuXYq0WLm5uVeuXAkAyMvLQ2mqq6tZRm5tbY38xdAU1gkTJkyZMgVTXCouLl69ejUAgGVuaWlpQkJCJBJpyZIl9+7dw2eroqIye/ZsLy+vLzYdh6HKkF3JzkxjY7ehob+npwGdzjA09M/O3t3Z2ffw4cdp00aGh5e4uq6xt4+pq+tSUJCIiioLCdkoIfG/NV4UCu3+/QwqlZ6YSHr+3DQwMD8zsy4ysnT58v9NsI6OLisra2tpocjIDNu+XZ19CODJkyfIAU5YmG/evLEAAEFBXgUFCXFxwbS0mmvXlgMAlJRGJCf/70aAfxbetWuWgYHf/v1aW7dOFxbmAwCMHj06PDx8x44d+AV2BNFngtbzu3fvysvLq6urR44caWVlhde89vX1xWtwX7hw4cmTJ2PHjg0ODr537x47zejs7Gx9ff3k5OTAwMA//vjjxYsXVVVVwcHB586de/LkCZlMDgkJcXV1xU9Gv3HjBqZSzWAwNm7cePTo0bCwsKdPn96/f59KpSYmJj5//hx7tK+rq7OwsNDU1CSRSCUlJSEhIcOGDcNS+vj4XLhwgUwmV1ZWbty48dOnT8LCwhISEtOmTXNwcFBRUUlMTNy0adOwYcOwgqysrBwcHDZt2vTmzZtFixaNGDEiKCjo0KFDWlpaJ0+eVFZW3rx589atW5E0GplM9vHxaWtru3TpElICsrS0xK+mxwtws9QK/zpIJNKxY8cuX75sZWWFbLMePHiwaNEiLi6uffv2YasUzp07N3A+/f397u7uAIDU1NTHjx/X1NS4ubmdOXPm7du3AICgoCAlJaWioqLW1taEhIRVq1YZGRlt3749KCho9+7ddnZ2kZGRBgYGGzZs2Lx5s6amJj8/v62tLdZxLl682MbGxtfXl0ql2tnZaWhoXLp0CQDg5eW1d+9eLIYnT56MGjVKS0vLz8/P3d2dh4dnzpw5Li4ue/fu3b9/P4RQSEhIVlY2MjISAIBkJdCOcnJya9euxbf2s2fPDhw48O7dOxKJJCoqiibUMOcmISFx/vz5a9euTZo0KTAw0NPT09fXV1dXNycnp6am5tmzZ/z8/FevXv2WA8Th1+Zf+87645GSupqRUYvfsnKlb1ZWPYRwwoTb7e29hYXN0dFlVCpdTu4mg8F4+DDz4MFwCOHOnaFBQZ+xvS5fjndweO/rm6OgcOfly4KEhCoTk2f4bENCCkmkjtzcxkWLvCCEt2+nODrGsQxp586d7969w28JDv786lURhHD48Evt7b0QQhKpQ1b2fzZJzs5p5879Lz2VSrewCJaSuhoQkIvtvmXLlqSkJHyGBNFnvLZyXV3d0qVLIYQUCuX3338naF4za3DPnj378+fPSCabnUYzhHDlypVpaWk9PT0yMjJ9fX2vXr2qqKhITk42Nzf39fVdvXo10m7GIKhUS0lJoQwvX77s4ODg6+uroKDw8uVL/C7GxsZv3ryBEFpYWDg6OhJSOjs729raopSWlpZo7OratWu3bt2CEObl5cnKyjIYDKyg1tZWNOc+MzNTU1MTQhgZGbl9+3aIk7resmXLH3/8ASFUUVFpbm52dna+dOkSxIl0YwxGK/zroNFoSNAO0d3dzTyy+yNoaGjAxswghLW1tVi5ZDKZeZ1AW1sbfvCPAJVK7ejoIJPJ+fn5SOoPo7+/v6qqauBgmpubmTfW1tYylziY3DhwQAzZr52DQUFBgkTqiI+vBAAwGICHh1tAgBcAICoqgB/Jy8io3bZN3dx8amnpPkPDScz5zJs39s2bkqKiFhqNwfxXPG1tbXjVj9rarubmnjVrJgIAxo8X7+rqAwB0dVEVFUcw77t//5tTp3Tevt166FBEXd3/PjcJCQk1NjbikxFEn/Hayi0tLUjiRFBQcNOmTQTNa+YSL168uGzZMrSYmp1GMwDAysrq8ePHERER2traoaGhlZWV48ePz8jIWLVqlbm5eVhY2M2bN/HpCSrVvLy8KMOMjIxt27aZm5uXlpYSLOkJEtiElMxa4QCnx62qqorqjhWECW2LiopiutJo6ItZclpERIRMJs+YMSMmJoZMJispKcnI/OVrNjutcD4+PmNjY2NjY+wN6e/Cw8ODX6guLCyMl7/5ccjIyOC1hGRlZbFyhw0bxjwmJy4ujjf5IsDHxycqKjps2DBVVVWCCBEvLy+2PJ8dzEcWhcRc4mBy48ABMZR7Pgbj/wcb2tt7Q0IKeXi4+vpoAICenn4IoYdHZmNj95Il8nQ6A+JGJuBfdW1kZIY9ffoJANDbS8vKqufiAv39dHyCPXvCpk8fOXmyNGFHOp2oj6OiooLcDAAAbW29CQlVVlYzAQBFRS0rVijm5zcBAPLyGpculWeuTlISSVZWZNq0kevXT8nJafgzkzaCvhoGEn3GaysDAOLj45ubmwEASUlJBM1rZg1uWVnZ7Ozs0NDQDx8+DKAZbWBgEBMTU1JS4ujo6OTkJCcnBwCQkZFBi5cBAGgl9Z9twlalWkZG5unTpwCA3t5edsNXSAJ7gJTogQ7g9Lh7e3slJSWlpKRYZgiZBKaZtwAA1NXVDQwM4uLifHx8BpBNwbdSd3d3cHBwcHCwjY0Nu/QcOHD4VxiyPV9ISGFrK8XVNd3ZOe3s2Xeamh5TpsisWzf58OHIGzeSR44clphIkpAQ9PbOvnUrRVJS+MGDDykp1bm5jbW1XR8+1KWl1WB3wIMH57q7Z6xc6evgEDdt2sjJk6U/fWp0c8vAypKSEj59OjYmprysrC0ysjQ5mZSRUdve3rtjR4i//19mu61bty4zMxMAQKHQ9PSenjkTq6rqoqBwp6ys7fDhecHBBWlpNZGRpYcPzwMANDZ2x8aWf/xYRyJ1AgD27dOyt4+Jji5rb+/V0RmPMqRSqZMm/eU1FIk++/v7I9FnpPVsZmYWExMzZcqU48ePT58+3djYGKll4jWv1dTUkAY3MvLt7u52cHAoKChYsmSJmpoaPp9Ro/4yb4iPj2/dunXLly9XUVGZMGECmnRgZGREJpM1NDT27t07fvx4LDF637K1ta2urg4PD3/z5k1rayuauHHw4EF3d/eVK1c6ODig0VA8AQEBISEhFApl48aN+JQTJ05MTExMTk5Gq6ezsrIiIiI6Ojr27dvX29v7/PlzDw+PBw8eZGZmYgW9e/euubm5uLj4/fv3ZWVlpaWl8fHxFRUVRUVFycnJycnJJBLp8+fPycnJ5eXlFRUVKSkpb9++RYrndnZ2hBmMmC45AGCAVuLAgcPPw39Ot7Ovjy4gwEOjMXh5uQEAFApNSIi3r4/Ox8fNzc1WTR9CSKHQ0LwSAEBvL01Q8C+Tg1A+PT39WBqUTECAh/B16NatW7t27SJ89sFKqa0ly8qKsIuETKZ2dPSNHi2C8gwPD+fn51+yZAlTsr+IPhO0lXt6evj5+dHnLILmNfyrBjeDwejq6sI+K31RMxqlwScgk8lovjseZpVqfAtQKBTmxjE1NbW1tVVXV8c0vtmlJNDV1SUiIvLtPgk+Pj4LFy6kUqktLS0pKSnYFAxmOErWHDj8/Pzner5/HQaD8erVK319/W+8HVdWVpaVlS1evPh7BfYzs3r1altb21WrVv1bAWhpaZmamqqoqNTV1U2fPh25AHLgwOEXhdPzcfjZqa6ujomJERYWXrVqFeYh9Q/T3d2dlJREo9HmzZs3wGwODhw4/BJwej4OHDhw4PDfYsjOcOHAgQMHDhxYwun5OHDgwIHDf4sh2/OhwaEBEtTX1yclJX11/lQq9f3793V1dYPfJSMjA5NRbmtri4yMDAwMJJhlD0xYWFhvb+/fC5R9DN8d5jZpb28PDw//iqyYj843Hi88DAYjMTERvyoRy/yLpw27HL47RUVFqampaWlpLS0tqampqampaEXm58+fU1NT2UlO/5M0Nja+e/du8Ol/6Ln3g+jo6Hjx4gV+QepXMPhTF0KYlZWFdMO/DjqdjpbufAvf8Vr7aRmaPR+DwQgLC7t8+TLLv3Z2dgIAmpqaiovZOip8kaampoMHD5aVlQ1+F8wHgEwmnz17dvny5WQyuby8/Is7ooABADk5Od3d3V8XMCGGHwFzm2RlZVlbW/+tTJiPznc5XnhaWlr27t2LvwujzAc+bRB0Or27u5s5h++OiIjI2rVrP3/+LCEh8fLlSwsLCyQRICIicuXKFQkJiR9X9BdBR+SPP/64cuXK4Pf6vucedlH8UA4cOLBixYoPHz58SyaDP3U7OzvPnj37LR1tX18fWjH8LXzHa+3n5Z+QSPs3yMrK0tXVZd7++vVrV1fX71KEkZFRQkLCV+wYGBh49uzZQSbu6+tbu3btV5Tyr0BoEzKZjBQyBwnz0fmOxwuPnp5eSkoK83Z2pw3G8ePHc3NzB8jhO3Lu3Dlzc3MIYXt7u5CQEDIlj46Ojo+P/6HlDkxVVZW1tTWEMCMjY+XKlf9KDP/MRdHc3DxnzpwfXQqBw4cPe3l5/cOF/gcZ+l4N0dHRZWVlLS0tMjIylpaWHh4e4uLic+bMSUxMTEtL8/b2Lisr8/DwUFNTi4yMvHjxYlJSkpub27p16549e3b58uX58+djWeGNDnbu3Ik2MhgMvOMBmUwOCwvj5+fn5+c3MzO7d++eoqJiZmbmvn37kA/Atm3bwsLCurq6/Pz8fHx8lixZcvjwYULOBDeDt2/fBgQEzJo1a9euXVeuXNHQ0Lh3756AgACJRBo1atTGjRsJxgL46p8+fXrs2LH+/v46Ojo7d+68dOmSsrLysmXL9PT0fHx85syZs2fPnuPHjxcVFVVVVYWGhlpZWSkpKenr6587dy4wMFBXVxcvup+RkREeHl5TU9Pa2mplZaWtrY33Q9DR0cFSPnjwQEREhPmdODIyEito/vz5+Mi9vLwIR+fx48fMx4vgGoE3lCCU1dHRoaure/LkSVlZ2fXr18fExMjIyBw4cMDDwwMAEBER8fjx47a2Nn9/fxcXF5Q5ti9Lb4qGhoagoCBJSUn0yoXlEBAQQKFQMPsIPz8/ZKfg5uZWXFzs6+uL/Fr/Lps3b546dWpHR0dbW5u4uPjjx48dHBwSExNPnz7NzjoDgT9/1NXVsZPQ3t4e31z4Y6Gvr4//0wCtGhgYmJmZGRkZKSkp2draeuPGjZCQkHPnzi1evJiQIX6vzs5OdP5v3bp1165dEyZMAADEx8efOnXq/fv3paWlL1688PT09Pf3X7hwYVBQ0M6dO62trfHn+fbt2+3t7ZEph4mJCboojI2N7e3tsauvurqacOoSrix2/iHYyZmZmSkhIZGdnX3nzh0/P7/GxsaAgAB9fX2CZgK+hcePHz/ABQghRGfX48eP7e3t6+rqFBQUoqKiQkJC8C/uNTU1Xl5e48ePj4uLQ95P+LpbW1tfvHgxNzd3xowZQUFBdnZ2NTU1YWFhPj4+srKy+PvPx48f9+7dm5OT8+zZM3b3MURUVBSdTk9MTLS0tOTn53/x4sWIESMSEhLu37/PfDkMQf7trvdHgT28h4SEkEik3NzcRYsWQQgdHR2dnZ0hhImJicifes2aNR8/foQQuri4mJmZlZaWqqurQwifPn26b98+LMO+vj680QH88/2G4HgQEBCwc+dOOp2el5fX0NCgoaHR2dmJ7A4wHwDMpuDy5cvXr19nzpngZoAkLiGEy5Yty8jISE9PNzQ0RFtGjRqVn59PMBbAYs7NzV2+fDmEcN++fehBEovB1tYWeVvfu3ePQqEsXrzY19f30KFDM2fOhBCOHTu2o6OjtrZ22rRp+FY1MTFJSkpqbm5WUFCArPwQUJvk5uYi24empib8Ox9zQYTImY8O8xaCHwLeUIIZJyenq1evQgjnzp2bnp7e3NyM1ET19PTevn0LIVRTU6upqcEyx04bdq4Lurq62DsfPgeCfURZWZmKigqdTg8ICEDval/H3Llz3d3dr169GhoaKicn19nZiaozsCkE/vwhnIRYczEfC3xLDtCqCQkJJiYmEMKMjAxtbW0I4fPnz3fs2MGcIQHs3MPORgMDg7CwMAjhvHnzSCRSdna2lpYWhLC8vFxYWDg5OZlwnuNNOdBFQbj64F9PXcKVNYB/CISwr69PSUmJRqNBCE1NTd3d3SsqKpBVPTOEK5TdBYjAzq6HDx+icnfu3BkUFIRPs379enQXsrGx8fLyYr7Gg4KCLC0tIYQ3b948cuQIhPDkyZM+Pj7MLSAhIQEhZHcfw7C2tvb19W1ra6upqTE2Nm5qaoIQPnnyhEajYQEPYYbmOB+eefPmvXnzpqioiGBHgIl7YaL+CxYs+PTpEybSLyoqih9LKC4uxhsdYNsJjgf6+vpdXV2ampoCAgIyMjJmZmaTJ09GA0LMnrFIxoU5Z4KbASFmLGCUMi8vj2AsgKUfN24ciUSqqakREhJCSphYDHv37vX09ExNTV2wYEFJSYmioqK5ufmNGzfQkAbKkNACAIDZs2cHBwdDCJF9K7MfAkqWkJCgoKAAACCIO7MriDly5rbCthBcI/CGEsxYWFj4+vrW1tYqKSk9efLkjz/+wFwg0KJ4cXHxtrY25uIG8KbAwOdAsI+Ql5efPn16SEhIbW0taoqvY/PmzQ8fPuzo6FizZg0/P//u3bvRu9TA4eHPH8JJiDUX87HAt+TArUpoAQkJiba2NuYMCWCNzM4rA3PkmDBhwogRI1JSUgjnObMpB7PfCP7UJVxZA/iHAAAqKyu7urpQkOhWMEDFCVcou9OYUHHCvSU5ORm5ebx//z4+Ph5/yTBf4+wajWULMJfFXIWjR49ev37dxsZGTEwsOztbXFwcALB161YeHh4s4P379xsbG//dofpfgqHf8+3Zs2f69OmTJ0+GEAIAuLi4+vv7AStR/+bmZg0NDWxHyOQkgDc6wLYTHA/KysqePn169OjRAwcOVFdX79y5882bN3v37sWeNcBfjbAhhIScmd0M6HQ6Pj0WMACgra1txowZ7GIePnz4kSNHUlNTDxw4oK6ujq+1srKyhIREQEDAjBkzpKWlIyIi2traAADp6en44giNaWxsLC8vX1BQ4OvrC9j7IUhJSSUmJqIc8JNX2RWElcV8dJi3EFwj8IYSNBoNnycAYNSoUYqKivb29g8ePHj//n13dze6oeCPBf7QYLDzpmCOBzuIBPuIY8eOXbhwQVZWFnwDGzZsyMrK0tbW5uLisrS0zMvLU1FRGSA8wOSGQTgJseaqq6sjHAt8S+J/E0Ji1wIDHFwEcyMDVucYAKC3t1dUVHT27NnsznPw50VBuPoI2RKuLHb+IYixY8f29PQgw3d0K2AZMHMLf7E6zPmg/86dOxe5eSxcuFBSUhLdVdAlM8A1TiiFuQUGExIAoLu7Oz09XVJS0t3dfdiwYWgO9ufPn9va2rCA79y5Exwc7ObmxjKHX5oh2/NFRESUlZWVlZVJSUmdPn06JiamrKwsMTFRU1PTz8/v9evXERERhYWFFRUVt2/ffv36dWRkZFxc3KVLl5KSkmpra6uqqt69e1dQUNDR0YEylJaWxhsdVFZWFhQUxMbG8vHx4R0P4uLiPDw8pKWlTU1NOzo67O3tKRSKlZVVW1sb8gFobm5OS0vLyMhAPnOpqakiIiL4nAluBtXV1aqqqqdOncrMzCwqKoqMjJw3b565ufmVK1eCgoJMTU2lpKQIxgJYI9TX1zs5OXl5eSEv8tbWVhQDega0tbWdOnUqAGDkyJGbNm1SV1e3srLq7e0tLy9vbGxMSkp69+5dU1NTaWkpluH9+/f9/f3v3Lljb2/f2tpK8EPA2kRPT09ERGTr1q23b9+m0Wg5OTlod0JBXV1dhMiZjw7zFoIfAt5Q4syZMwYGBoQzYffu3erq6oKCgvr6+kuXLgUA1NXVFRUVvX37try8vKysLCEhAcscO23YuS7MnTv34sWL79+/x+eQlJTEbDShrq4uLy9PGO76u0hKSv7222+6uroAAAsLi/Xr16PtA5hCEM6foqIi7CTk4uLCmktHRwd/LHh4ePAtif9NCGny5MmfPn1yc3NDjVBdXR0dHV1YWMjHx0fIEL8Xdu5VVVXl5ORkZGTU1NQUFBS8ffuWRCKVlZWlpqYCAEpKSgICAu7cuePq6ko4z8eMGYOZcgAA0EXR19eHv/oKCwvxp25nZyf+ymLnH4IQFBR89OjRlStXoqKiGAzGli1bXr58WV5enpGRQUhJaOH8/Hx2FyAAgE6no7OrrKwsJSUlNze3trb2w4cPaWlp+D7p0qVLhw8fPnXq1KdPn7KysjQ1NfF1l5eXj4+PLy4urq2tTUlJSU9PR5nk5ORACPEtEBkZ2dLSkpCQwO4+hvHo0aPo6GhVVdXly5dfu3bNyspq+fLlcXFxoqKi2OXwxfPzF4btd9AhBDK2xvygKRQKc5qOjo7BZNXd3Y23q8YyZDAYdDqdQqHQ6XQqlYrKYjAYDAajs7Pz7+ZMp9N7e3uxmGk0GpVKZS60r69v4DwTExMTEhIqKioyMzPRgAQBfF3IZPLAuTEYjNu3bzc3NxcUFLx7987X1xdt7+zsZPbphhB2dHQgWwbC9oELYj46zFv6+vqwutPpdMyem06nP3nyhDlPNHjzxeYaoJSB40EwGAy85zhqrr9VIrsw2P1mVyP8+UM4CfHNBf96LAgtOYDNOrsWgIM4iwYgNzdXV1e3q6sLbz3P7jzHLgr81ccyW8I129XVNUAMdDp94ARYMvwV+l2gUqkUCgV/pQ/mGoeDaAGW0Ol0MpmMtUx/f/93rMvPD0e3cyhjZ2fX1dWlr6/f0NAwcuTIb3z/aGxsnD9/vr29vaioaHNz84oVK8aNG/e9Qv0ukEgkKSmpAZxj/zFOnDhRWFh448YNeXkWJsMcWJKWlmZra4te/jhw+KFwer6hDJ1O//DhQ01NzaxZs8aOHfvtGTY0NKSkpEhISMybNw+Nt3NgSWpqqpiYGBqT4zBIwsLCmpubp0+fThjW4sDhu8Pp+Thw4MCBw3+LITvDhQMHDhw4cGAJp+cD4Dvp0nZ3d7NTZ/52pWkC311SdvDS0p2dnX/88ccACX4eudufJxIOHDj8VHB6PgC+ky5tV1cXNn2fADulaSR//LdKYSfo/I0MXlq6vLx848aNg4zte/F36/jjIvkJ+XYPkL9FXV1dXFzcAAnQvPyvyxyJ/33dvt/ItzwnUSiUV69eDT59RkbG8+fPCwsLv644dnz3J+whDKfnAy0tLQUFBSIiIniByq9g1KhRR48eZfmnEydOELQnEPb29n9r0cybN29+//13AMDUqVMtLCzwW74RTU3NQaacPn06Uu74Ymzfi79bxx8XyU8F9tj01R4gX0dAQACzmCdGZ2fn3bt3v7r3Ki4u3rZt29eG9pV8y3MS2reiooLd4yAzrq6uAIBFixZFRkYOvojB8O1eLv8dhub0vL6+vqNHj3Z0dIiLi0dFRT18+HDOnDl4sdq4uLjAwEBlZWUpKSkxMTFMl/bhw4csFXItLCwcHR1tbGzS09MhhMuWLYuIiFBVVT1+/DhWaGBg4J07d+Lj4wkS2DQazcrK6sqVK8LCwng5XVNTU0z+ePTo0SgTvOYvQTna2tqanaDz7Nmz3dzcWEokV1dXP3z4cNq0aeHh4a6ursyauSylpevr67dv3z5nzpxXr17FxMQEBgYy6/wScv6iFDhaX8FSCBtf65kzZ27ZsmXVqlXc3NwCAgJYrTU0NJ49e4YOx4kTJ169eoWp9AoJCd25c2f8+PEBAQH379//lkh+Iezt7Tdv3jxlyhRTU1O0JSIiAimZWVpa/rhyNTU1kUAPS0RFRdXU1JgVvAaJuro6s5LcD+XNmzcVFRXW1tZTp05Fwg6Dh0qlbtu27cWLF6qqqpjY2Be5f//+7t27ubm5bW1tB1/EYHI+ceLEIGPgMGRXst+5c+fYsWMQwsePH+vq6hLEanNzc6dOnYpSYrq0zCqxeIXcadOmVVRUQAjFxMS6u7spFIqysjK+xJaWFiUlJcgkgQ3/VJqGTErQmPwx4ovK0QMIOrOTSC4sLIyOjqZSqXJycgwGg6CZy05aGkJoYmLy4sULCCGzzq+UlBRzzl+UAmeuDrtaZ2VlIT8KlADLGTscBJXemzdvIv1fJKry1ZH8bHh5eTk7Oy9fvhwpmyxatOjevXva2toJCQn19fXKysrXrl0rKCiwsbG5c+dOZ2fntm3bTExMnj59umrVqmvXrkEIY2NjHz16dOHChfv372PZ0un0/fv3m5qa9vf3Z2ZmHjlyhEKhnDx50tvbe9euXe/fv4cQPnnyxM/Pb9euXampqXQ6/ciRI1FRUWZmZp2dnQkJCdra2vv37580aZK/v7+rq6ulpWV3d/f+/ftPnz4NIbx9+7ajoyOE8Pr162/evNmxY0dmZmZGRsbEiRM9PDyMjIzu37+vqanZ1NTU0dGxfft2JDSBIS0t7eDgoKmpeejQofr6+gULFqCjef/+faQKjairq1u9evWFCxdmz57d1NR069atK1euGBgYdHZ27tu3z8LCYv/+/ZMnT05OTia0JIlEOnfuHLKDoNFoxsbG27Zty8zMvHfv3pYtW2g02vHjxy0sLM6fP79gwYLW1lYI4cOHD93d3Q0NDffu3Zufn4/FkJGRISYm5u/v39/fLykpiWp348YNCGFBQYGbm5u1tfWZM2fwtUtKSho+fLifn5+rqyuSls7Pz1dUVPTx8TE0NETnfEREhIeHh4GBQUhICFZERkbGpEmTIISPHz9eunQpc5zl5eVLly7NyMhgzjA9Pd3BwWH37t3r16+PjIzEx4OPs7q6evHixVevXr1+/Tp29Q1VhuzXTkz9VltbG+mE4cVqmaVvASuVWHwyTDF2+PDhAgICgoKCra2thBIJ+WC6twTJWnYasoNXjmZ+LmYnkaygoEAikeLj4wEADAaDoGPLTloaAIDVnZ3OLyHnQbYDc3WYaz19+vQJEyYwvzdgIRFUepOSktBL85IlS5D9zddF8lORkpISFRUlLi7Oy8vr4uIya9asjo6OvXv3WltbP3v2bOTIkePGjVu1atWkSZNGjx7d398/fPhwDQ0NNTW1jRs36ujocHFxUalUR0fHbdu2HTlyREREBMuZm5v7+PHjmZmZ3NzcFRUV+/fvd3Z2lpaW3rJly759+8zMzD58+PDx40czM7Njx451d3dTqVRVVVVdXV0ajYZcT/n4+G7fvv3777/v2bNHXV29vb1dWFhYXV29q6sLX4Xhw4cvW7YMnZYaGhptbW1r164NDg62srLq6uoik8lkMllHR4dw7lGpVDs7u/j4eG9v74aGhp07d378+BEA0NbWhpdiGDVqlLCw8JQpU1JTUz09Pclk8pgxY3Jzc2NiYhQVFUeNGnX79u0jR46cPn2a0JI9PT0LFizQ09N7/fo1Nze3hoaGhoaGurr6zJkzkXb5xIkTR4wYcebMmcmTJyPT+fPnz+/YscPCwoJMJquqqmIxaGhoCAkJbdiwgZeXl8FgbN682cPDw93dHQBw4sSJYcOGzZgxw8nJCX+jmDt3roCAgJmZmbW1NTc3NwBAVVWVSqUaGBi4ubl5eHj09vY6OTkJCQkpKSmdO3cOK0JDQwMpkC1evLilpYU5zgkTJrDMEABw5cqVpUuXOjo6ZmRkLFu2DN/a+DiFhIRu3boVEBAgICDwi34IGTxDtufDIJFIWlpaA4jVokcAgNNfBqxUYgm7DFAiswQ2VgRhd0z8F/FF5egBBJ0BG4lkDw+PxsbGJUuWIEUoQgzspKXxsGs6Qs5fLQXOXOv4+PgdO3Z4e3t//vyZuZUAk0qvrKzs8+fPAQBdXV2fPn36ukiYFZb/XQgPHANI7w/eAwRj1KhRWlpa4eHhDQ0NY8aMIXhuREdHIy1QeXn5xYsXM7sBoG+qM2fOBAAMMKWCYGjAy8uLduTm5t6/f//t27fDwsLWrVtH2EtISEhISEhAQGDBggUkEsnMzOz9+/ckEklERATVCwP/cIY3yhj4qfeLT2zMTa2mphYeHs7Dw4M/eQjw8PAICgoi2wowOK8PrDr457AvWl7gSyTEye4Jm2CxgocQJ+Ghk+PV8EsSFxf38uXLN2/enD9/niBWGxUVVVxcjOzHMF3aARRyS0pKqqurk5OT8/Pzm5ubIyMj09PT29vbWVqZECSwS0pKkNJ0SUkJQQkayR/n5+ejHb+oHM1O0BktSGApkSwhIeHt7X3r1i1JSckHDx4QNHP19fVZSkvX1NR8/vw5PDy8t7eX0HRJSUltbW3v3r3D5+zh4fFFKXDm6rOsdUlJye3bt1esWPHbb79t3LiRRCKhnENCQrDDISAggFfpPXDgQEhIyIIFC86cOaOqqvoVkXh7e9vY2PzAc/Hvw+6BY4DHIOxPaAs7dxGEra3thQsX0Bs/wXNj4sSJQUFB6FEgOTmZnR9CT0+PvLy8iIhIX18fAACpPmJ/HcDQAABgaWmJ/JlZzpZCdHZ2qqur8/Hx2djYrF+/Xk9Pb4C2IhhlIFg+9X7xiY25qe3s7BoaGiZOnMj8JkR4PMUyGcBMA3+YmLczPwgSrFoI7QyYjjvzdoLFCh5CnISHziHs1TBkx/mcnZ0dHBwIqruD0aIdpEosM62trXgfy0FKYDOLzH61oDM7iWQ0jtLb24sXAsbDTloaD8umI+T8LVLgX1FrgkovvqC/GwkmQPzz0NfXt2jRInV1dRsbm5qamqdPn44bN66ystLOzm7OnDnt7e1nzpxBRsEGBgZr165tamrasmWLnp5eXV3dunXrTExMenp6HBwcRo8ebWRkhDxLCZiamqID19XVhcy779y5Exsb29/fv3r1alVVVXNzcxKJVFpaOnbs2LNnz5qampqbmxcWFmpra4eGht65c+fDhw/9/f0zZsyws7OztbVdtWpVXV3dhg0bjIyMWltbp06dumvXLnt7ew0NjdevX/Pz86M3D4Sjo2NCQgJzVPPnz/fx8fHz83v69Cna0tPTY2xsTEhWXV09ffr048ePUyiUoqIiBQWFFStWnDx5kk6noyG94ODgo0ePVldXE1rSz89vypQpN2/eVFNTc3V1jYyMnD17dlhY2Llz55SUlEpLS3fv3q2np1dTU7No0aI9e/YwGAwdHZ2VK1du2LCB+frS0dE5efJkTEwMACAhIeHNmzc8PDz5+fnBwcGjRo3asGHDw4cP8elzcnJ4eHhiY2NTUlK4uLhSU1OLi4uFhYVjYmJCQ0OHDRtWUlJy7Nix8ePHb9++PS4uDiuira1t+/btGzduvHnz5vjx40tKSghxFhUVycvLX7x4kTnDw4cP6+jomJiYnDhxguBjjI+zoKBg7dq1EMJr165Nnz69qqpqUGfqr8mQVS+7evVqS0vLlStX/pnibt++raWlFR0dffr06X+mRAIcieQhCZlMxg/REejt7f3ilMKenh5+fn6WIqs0Gg2/vaurC/9FsbOzU1RUFCtIQEAAQkilUlGJHR0dYmJi6K8QQhqNxsXFRSgFPU4JCAj09PQICwsTSr9z587+/fvZ1VpISAj7dlddXZ2SkmJiYjJANSGEFAoFleLi4tLW1mZra4tFCP7akhQKRUhIqK+vj4+Pj5ube+BmbGxsjIyMXLp0aUtLy8ePH6dOnYofB6HT6QwGA31VJkClUgEA/Pz8A4TNju7ubuxtGF9EX18fPz8/GrAfZFYQwrt3727evLm5ubm+vr66utrc3Px7xfnrMjRXNfT09IwbN27MmDHV1dVycnL/QIlaWlpkMvnf6vYAAEZGRmJiYpxub4gxQLcHABjMTHrmLgeD0FGhsTEMrNvDCuLi4sJKxHcqXFxcLG/9aF0Kcwy+vr5hYWED9GRYrXt6enbu3AkA8PLyYpcYiwErpbu7u6urCx8h+GtLojk1KDbwpWZEM2iEhITQtw38DBcAAN7BnMC39CX4j8D4IlDMf2vhR1NTk7Ozs7i4OGax8h3j/HUZsu98HDhw+DmpqKiorKxcuHDhYBKHhYVpaWlJSUkNMvOenp6QkBA6nb5w4cLv9dRbVFT06dMnJSWl6dOnf5cM/2E4FivMcHo+Dhw4cODw32Ioz+3kwIEDBw4cmOG8+XLg8GtDp9NDQ0ONjIx+dEEfP35Eoj+EEcEBqKioyMvLmzlzJoVCwQssMEOn0zMyMtDv4cOHy8vLM6srfHfCwsKWLl2KjfNRKJS3b9+yWz7x6dOnnp4eAICAgMD48eMlJCS+utyKioqGhgYAAFqN3tTUVFZWJiAg8A9b8hYVFbW1tXFxcSkqKpaUlAAARo4cOWHChM+fP3d2dk6YMGHkyJH/ZDz/JJx3Pg4cfm36+vqQtMq38EUtm8jIyPz8fElJScwX4ot4enreuXNHWVn58uXLX1SeRPM45syZ09DQkJGRsXDhQj8/v0EW9NVgEs+DEZ4eM2aMqanp69evSSSShYXFwYMHv7pcSUlJOzs7Nzc3tMhdUlLy+vXr/7BgKQBARERk7dq1nz9/lpCQePnypYWFBRoZFRERuXLlyrd07T8/nHE+Dhz+62CqzQOksbW11dPTY54ZOAAbN25cuHDh7t27AQBPnjxBvhmBgYGlpaXjx49PT09fvHjx5MmTDxw4YGJisnXrVjqdzsvLW19fP3LkyIcPH3p4eKSkpDQ1NR08eHDy5MnKysoFBQV6enrDhg3bvXv3pk2bSCTSvHnzli9f7urqSiaTSSTSiBEjBAQEpk2bNsCyd2aoVOrGjRtR3ywtLd3U1MQupba29t69ezds2FBaWjpx4kQymYxNKHVycjp69CjSDwMA3L9/n5+fPzk5ef/+/TIyMoQqzJgxIzY21szMrKqqSkBAgE6nnzx5Ei3B+vTpU0xMzOjRowsLC3fu3CkjIwMAyMrKCg4OVlZWJpFIAABMKB9faFtb2+nTp0eMGCEjI9PV1XX06NHBdKXnz59HGvcdHR2ysrK5ubkKCgpv375FGjqDb8Nfj39vKSEHDhzYkpaWNnHixHv37i1btszCwgL+VcgYr9ccERGB5NdfvXqlpaV1586dlStXOjk5PXjwYOXKlUihGL8vQdEYU23++PFjZGTkmzdv7O3tS0pK8MHk5OQsXLjw6NGjqampBHlrvBQ1oQrPnj0TFBS8desWJqEQFRW1ceNG9LupqSkgIABCuHDhwuLiYgghEjmrr68nkUiGhoZIdBtCaG5ujlbid3d3o+XVioqKvb29jY2NY8aMgRCmpKRACI8fP+7r6wshTEpKwoeRlZU1duzY6urq27dvL168uLW1NSsr6+zZs5jE8wDC0wQWLFjg7+/f0dFx/vz5NWvWYNubmppmzZoVEhKC/tvd3a2rqwshrK+vj4mJYVkFCKG6ujoSXg8NDY2NjYUQtrS0zJo1i0qlQgjLy8vnz5/PYDDq6+tnzZqFxBkYDIarqyvLQiGEp06d8vLyghBu3LgxNDSUzZn1F0pKSoSEhNrb28vLy2VlZe3t7SGE58+fZzAY7ES3hwacno8Dh58UKSkpMplMp9Pl5eVjYmIIjhaYmQaEEKn+t7a2IsONzMxMTU1NCGFkZOT27dsH7wFibW3t6+vb1tZWU1NDCMbS0vLVq1cQwmvXrt26dQtCmJeXJysry2AwpKSkCMogGEFBQaNHj543b159fT2EcOvWrVjMEEIkG7Ro0SLU0aKe7/Tp00inBhUHIdy8eTNyFrt8+TLaoqSk1NbW5u7ujvWjENfzMbNy5cq0tLSenh4ZGZm+vr5Xr14h3xXMRAVJrKGWpFAoTU1NEydOZM5nwYIF5ubmp0+fnjdv3tWrV7HtSO5r6dKl2BYDA4NNmzZhskHMVYAQ+vj4qKmpQQgPHTrEYDAghE+fPv3tt9+wBGpqasXFxY8ePcIcY7BGY1movb392bNnw8LC1qxZ09jYyLIpmJk7d667u/vVq1dDQ0Pl5OQ6OztR1YyNjb29vdHLK7vj++vCGefjwOEnhZeXd9iwYdzc3PPnz8/KyiIIGeONRNB3LcxORFRUFNMybmhoGLwHyNGjR69fv25jY0NYBo6HIG/d0tKCSVET6O7uNjY2zs/P5+fnR7Ko7e3teNVplmvLbGxsrl+/7uLiYmxsXFdXhzaKiIhISEjgV8Tfu3fP29vbx8dnMC1pZWX1+PHjiIgIpLtWWVk5fvx4wGpJOEF4mhl9ff0LFy7ExsZevXoVfSCl0+l5eXkNDQ2NjY2Y8m1wcLC6urqWllZ1dTW7KmzYsKGtre3x48ejR49GzUKhULD19QAAfn7+np4elo3GslAAAB8f3/Dhw3l4eAbvsrt58+aHDx92dHSsWbOGn59/9+7dSPt3MKLbvy6cno8Dh5+d2tpabW1tlj4eCMg0Wo/fMngPkO7u7vT0dElJSWS1Q8gQ/tX4AslbD7DG/OrVqwAAMTGxffv21dTUAAAWLlwYGxuLJUC6WRh454Thw4dj1hAAgHHjxqmpqe3bty83NxcFc/ToUU1NzcuXLw/QCBgGBgYxMTElJSWOjo5OTk7YCnesUuyEpwlgEfLy8goKCiKfitDQUFNT0wkTJhw6dOj27dsAgOTk5I6OjsOHD1tYWAQEBLCsAgCAj49v3759tra2mzdvRlsWLVqELJkAAL29vZ2dnSoqKjo6OnFxcVjRqNGYC0XIyclpa2sbGBj4+/uDwTmQbNiwISsrS1tbm4uLy9LSMi8vT0VFBQwouj0E4Kxq4MDhJ4VOp7u7u6MJeLNmzUKOFrq6uhYWFvX19chMQ0tLKzExsaWlJSEhoaWlpbm5ubi4OD4+vqysrLS0ND4+vqKigouLC78v5gHS3d2NeYDY29urqKhERUUtW7ZMVVWVMLuhoqIiKytr+PDh2tra+/bts7S0fP78eV1d3YMHDzIzM1tbW1++fMm8rCIxMfHMmTPTp09//Pixo6MjAMDGxubgwYNOTk7Tpk3r6emZM2cO8iQJDw/fvXu3p6cnAODOnTvi4uLx8fEPHjwYO3ZsY2MjqmlLS8unT59ERUX5+Pjq6+tjY2OvXbu2ZMkSSUlJa2vrlpaWzMzMzs7OtWvXMi+H4OPjW7du3fLly1VUVCZMmLB69WoAAGaiMmvWLFVV1VOnTunq6ra0tCQmJnZ1dbW3t3/+/BmvVRYXF/f58+fnz5/X1tZmZWUZGxubmZk1NDR4eXk9evRoxIgR8+fP3717t5WVFS8v7759+5DTiLW1NXMV1NTUUJ47d+7Mzc3FFg8oKCjY2tra29vPnj07Ojrax8eHn59fQ0PDysoKCYL39fUpKCiMHj2auVAVFZWPHz/W1dXx8PBERkaePXsWALBjx47ly5ebmZkNcJpJSkr+9ttvurq6AAALCwvsPfjixYsGBgYLFy5cvnz5b7/99nfO3F8AztxODhx+UmRlZSsrKxkMBrbgDC9k/HcZeF+k2sxgMNAHty9qXBHkrVmChKpramqkpaXx4pBUKrWvr2/wiwK/C3Q6Hd3TqVQqs1LlAMLTX1cWAIBMJg/w0Rijr68P/4UTAAAhbGtrI3xgZLnxiyCp8YEPE/hrmxB+gyEq7Mnp+Thw+BlhMBji4uJ1dXVf3dVx4MCBHUO/53N3d1+1ahUa0EY0NjaGhYVJSEisWrWK8LTFTE1NTXR0NABgxowZmF4ttnH69OnMsgthYWHNzc0rV67EKyAgB68lS5YQEvf39/v5+UEI0WonZjIyMvLy8gAAXFxccnJy06ZNG7x6LzvIZPLjx49/++03zl31p+XDhw/oO9jKlSv/7Vg4cBhy/HPTSP8N7t+/DwDA23JWVVVdvXoVdTOXLl0aTCbXr18XERFB08QRFy5cAAAYGBigucgEXr58iRUaGRmJFlQtX7585cqVLPM/cODAAAeCTqfPmTMHAFBSUvLu3TtZWVkHB4fBhD0AiYmJXFxcycnJ35gPBw4cOPyKDOW5ndnZ2cyqTkJCQkeOHLl//z4vL297e/tg8hk1apSFhUV6enpqaioAoKurC42CjBs3jouLq6+vD3m4ox8A5wSWnZ29cePG9vZ2Go0WEBDw7NkzBoPR19dHpVL7+/tbWlpQMrxzWH9/P2EaFTc3N3rJk5KSWrhwoZ2d3enTp4OCgtBfGxoa0Ld4LGcGg4FEKCCEra2tWD50Oh0Tp5g1a1ZdXd3s2bPRiAuDwejo6MBPb2tqakKqThw4cOAw9BiyPV9PT09MTMyyZcsI26WkpLq6uk6dOsXHx4dNJv4iO3fuFBAQuHfvHgAgMDDQ0NAQbafRaIaGhiIiImQy2cDAgGBxGRMT09LSEhERkZ6ePnPmzNWrV7e1tc2aNUtJSenChQsaGhr29vb49AkJCUuWLAkLC1u3bh27SHR0dAAAQUFBXV1dixcvjo2NNTIyev78eWNj45QpU6ZNm3b8+HF5eXlra+sTJ04oKCigmeVubm7Hjx///fffVVVVS0tLb926NWrUqNjY2EePHgkKClpZWRkaGo4fP76jo6OxsdHY2DgxMXH58uWDbBwOHDhw+LUYsj2fh4cHcnNmhsFgoCXA7IbWmBk5cuT69evRTO729nZsyhYvL6+6ujoAYPjw4bNnzybshXopfX39uXPnKioqAgAkJSVVVFQEBAQcHBx27Njh5OTU3NyMpT98+PDkyZPXrFkTFBSEVhwzg5yym5ubb9y4kZOTY2ZmtnDhQisrKwkJiYkTJwoKCl69enXevHnx8fFOTk7z58///fffAQD5+fmioqLbt28vKCh48+YNNmd97ty5AIAVK1bcvHmzpqYmJiYmOjo6ISGBm5v74cOHg2wcDj+O0tLSkJCQuLi4AdJACLOysoqKitglaGxsfPfu3XePrb6+PikpafDpqVTq+/fvscXpGD8ovJ+T8vJyPz+/8vLyv7VXWlpaXFxcSUlJV1fXwCnb2tpS/6SkpAS/RJIDnqHZ8/X09Fy/fn3RokVHjx4FAKB/McTExE6cOOHq6pqdnY3Wog4GW1tbKpW6adMm5lkqfxf0sVRKSopOp9fW1mLbi4qKCgsLExMTAwMDZWVlWe6LvoXOmDHj06dPaH6KiIhIZ2cnkrJF07IFBATQD35+fvQt9MyZM93d3WjFK/PiVl5eXjTTh0qlrl69eurUqYaGhmg9EId/kYyMjPDw8DVr1rx//36AW1hnZ+fZs2eTk5NZ/gkA8McffyA15O8FyrapqWnwQiEo/cGDB8vKygjbv3t4Pw46nf4towBNTU2urq6amprYcvXBsGXLluLiYgkJCSsrq4KCgoETi4mJJSYmGhoaQgjDwsLU1NTQ/DgOBIZmz8fHx3f48OHNmzejlxsdHZ2urq49e/bk5+fHxsYiYYJJkyYtWrSI8H2SJRDC/v5+TU1NLS0tKpU6ffp0CCH4U9CBn58fm+fCcncGg0F4UkM7NjY2CgoKysvLY9sVFRXb2trWrl2rp6eHib4TePXqlZSU1MGDB9XU1CgUCgCgp6dHVFR07NixA1RhzZo12dnZe/fu/WJlU1NTw8PDnzx5EhgYOPSEG34tnjx5MmXKFB4entOnT7M7HwAAYmJiysrKzNtJJBIS9Z85c+Z3jOrNmzfoQ8LUqVMH/9UEADBmzBj8FGuM7xveD8Xe3r6iouKrd4+Li+Pl5VVSUlq7du3g93r79u3MmTOnTp3q7++PJgfQaLSrV6+6u7v7+fmZmpoiez8DAwNvb29ubu7p06dLSEjMmTNn//79GhoaSODt8+fPCxcu9Pb2fvbs2Z49ewAASHzAy8vr1KlT6Anm8OHDrq6u69atc3Nzs7W1RbI7Q5WhqeHCx8e3f/9+AEBAQIC3t7eenh6JRHrw4MHMmTOzs7Pd3d3t7OxEREQGI/pXXV3t7++fnJzs6Oi4b98+BoNBo9FcXFwAADExMZmZmWvWrLl586alpWVdXZ2UlFRsbCz6dBMfH79//35paemrV6+KiIgUFxfTaDTUlzQ3N585c8bV1fXy5cuCgoLok1FCQsK1a9eMjIzmz5+/aNGic+fOoQDS0tLQE+K1a9eEhYVra2vT09NHjhx56NChmJiYgICAd+/eeXh4NDc3FxQUUCiUvLy84uLi1tbWvLy8wsLChoaGyspKcXHx5OTkq1eviouLR0dHo2f2pKQk9Nk2JSUFSVulpqaOHDkyIiJiwYIFJiYmo0aN+hFHh8NgKC0tTU9PHzFiRE1NzZkzZ/z9/UVERPT19c+dOxcYGKirq7t3796amhovL6/x48fHxcVhsiAYgYGBmZmZkZGRkpKSra2tN27cCAkJOXfu3OLFi588eUImk0NCQlxdXdF3eIze3l4HBwcVFZXExMRNmzZNnDjRwsJCU1OTRCKVlJQEBwd7eHiIi4vPmTMnMTExLS3Ny8vr4sWLubm5M2bMCAoKsrOzq6mpCQsL8/HxkZWVPX78+PLlyx8+fPjgwYMBlq7jw5s8ebKJiYmZmZmNjc2DBw9GjRrV2Njo7++/cOHCoKCgnTt3Wltb4+OPi4sLDAxUVlaWkJBoaWnp6OgQFxePiop6+PChnJzcw4cPp02bFh4efv/+/cePH6OU0tLSAgICU6ZMCQoKsra2Hj58uJGRkY2NTXp6OoRw2bJlERERqqqqx48fLywsjI2NzcnJkZaW3rNnT1BQkKSkpISERG5ublVVVWhoqJWVlaam5vbt25HE9tOnT2NjY/n5+fn5+QnKKQ0NDW/fviWRSK9evdLV1cU3srKyMpZDTEwMYa3R2rVrly1b9vjx42XLlqElLufPn5eTk9u1axcAQEJCgkwmKygodHZ2zps3D79jTk5ORkYGMp9CA/xbt24FAKDVWbNmzbpy5YqlpWVQUNCZM2dQJ6qlpeXj42NgYGBoaIgE7YYs/8aE0n+Hrq4u9KO5ubmzs/M75kwmk7u6unp6epj/RKVSe3t78VtMTExUVFQoFAqm446nr6+vqalp8EXX1dWhmaUDw2AwGhoaIITt7e3I7oQdZDKZwWAMXuidw4/DxMQErY1ZsWIFshQgeCysX7/+48ePEEIbGxtkT4MnISHBxMQEQpiRkaGtrQ0hfP78+Y4dO5KTk83NzX19fVevXn3w4EHCXsxWDMbGxm/evIEQWlhYODo6YsYOiYmJenp6EMKgoCBLS0sI4c2bN48cOQIhPHnypI+PD4VCQS48WEWMjIwSEhIIJRLCgxB6e3tv374dQujk5MRgMLKzs7W0tCCE5eXlwsLC8fHx+Phzc3ORSROE8M6dO8eOHYMQPn78WFdXt7CwMDo6mkqlysnJMRgMfMr79+/TaDRnZ+dz585BCKdNm4bcG8TExLq7uykUirKyMmTyK9DV1c3NzWX2vsB8MwICAnbu3InkpJkPqJeX1+HDh1k2Mt55g0B/f7+jo6OgoOCuXbuQUcO4cePw5gk0Gg3iLC+io6NlZGScnJy2b9+uq6v7+fNnlExOTi4lJSUiIgIdlOrqai0trfb2dktLy/v372O5zZkzp7q6mmUkQ4mh+bWTJdjiAUlJye+rnDRs2DARERFmtUAAAB8fH36xPJVKbWxsrK2t7e7uRnNVCPDz8/+theqjRo0ajLYQFxcXsrgUExMb+APvsGHDuLi4pKWlBx8Dhx8Npj5F8FiIj49XUFAAALA89/Cg1whkQZCRkbFq1Spzc/OwsLCbN28SUjJbMfDw8KDrRVtbG/8BHBN4ZOcRISgoKCkpGRoaitee/mJ4AAAzM7P379+TSCQkkIYFMGHChBEjRiQmJuLjJ3hW4ENVUFAgkUjx8fEAAAaDgU85d+5cPz+/5uZmFBhWheHDhwsICAgKCqIVQSz9Cpi9L7Cc9fX1u7q6NDU1B5bIYOl3gcWGp7u7m5eX99SpU5mZmaGhoUhJnODewOw4MWLEiGPHjnl6eq5atcrU1BTbLi4uLikpiUnTtba22traysjIsJwM2NXVZWxsbGxsjD5xDTH+Qz3fz0B3d/elS5dev37d2Nj4b8fC4WcH/jlyjJ5SmbdLSkqiT+UQQuYpMJgDA7Y7+iEjI+Pv74+2MM+LGcCKgUQiaWlpMWfLMmYAwOfPn93d3fX19QUFBZlT4neBf3VI4OPjs7GxWb9+PcFavbe3V1RUVF5efoD48aF6eHg0NjYuWbIEWeNif6XT6Rs2bNiwYQOy5RugCgS/AlR3dt4XAICysrKnT58ePXoUyVOwq+ng/S7QF2YAgKqqqomJCUvLC8JnSYLlBX4S36RJkzQ0NDZt2lRQUAAhlJSU9PT0TEhIwOeGVX/48OHBwcHBwcHIYWqIMTTH+X5aJCQk5s+f/29HweEXoLGxsaioKDY2VlFRsbS0NDY2VlxcnOCxcOnSpcOHD8fHx3/69Km/v7+/vx+vuTx58uRPnz65ubn19PQUFRVVV1dHR0cXFhbq6Oi4ublpaGjMmzfv5MmThHIJVgxoY0BAQEtLC4VC2bhxY0xMjL29vbKycnp6emFhIXKEKC4urq2tTUlJqaqqqq2t/fDhg5CQ0KpVq3Jzc8+dO0en0728vOTl5QsKCmJjY+fOnYufsPP+/Xt8eK2trSNGjNi1a1dcXBw2I6akpCQgIKCiosLV1XXu3Ln4+IOCgoqLi/Py8qZMmQIAiIuLmzJlSnJy8vnz5+Pj411cXPj4+JDTOp1ORyknT57My8tra2srLS0dHh6+cuXK6urq5OTkyZMnNzc3R0ZGSktLt7e3f/r0ieBXMHfu3IsXL54/f56db0ZCQkJSUpKioiL+TQvR2dkZExNTUVFRUVFBaOSamhosB8InGUVFxY0bN1ZUVEhJSeXm5np7ewMAXFxcjh492traOmrUqL6+vtWrV1dWViLLCzMzs8DAwJqamjt37nR0dCQmJnp5eQEA8vLympubf//9d35+/uDg4MOHD3/48KGqqopEIvn5+aGx2Pnz5xcVFVVWVkZFRVlaWn6Hk/gnZujrdnLgMITp7+9HLgQsfQaQAwPLHclkMl48iADeisHU1NTW1lZdXR0bIxggW+YABAQEIIRUKnWQuyCqq6tTUlJMTEwAAHl5eQcOHAgODhYWFsa6TJbxu7i4tLW12draYituKRSKkJBQX18fHx8fvrtlMBj9/f0CAgLIUGKASAh+BVjdWXpfMBgMOp3e398/cJ6IwfhdAAB6enr4+fnr6+vHjBmDT9zV1SUgIDAkjRT+ATjvfBw4/MLw8fEN4K0zQGczQLcHAMAPhHd3d3d3d+O3DL4PQym5uLgGv0tPTw8adkIvKyiAzs5OQsAs4+/u7u7q6sJ7A6ERUOZRN25ubrTxi10UoWvBKsJS7Z2bm5ubm3uQbkeDnG2AIsTcdP/u7hxYwhnn48CBA1uqq6vNzMxQ5/fPlCgsLLxx48bbt29jEzGampr27NmTlZU18I49PT3jxo2bNm1adXX1D4+Swy8O52snBw4cOHD4b8F55+PAgQMHDv8tOD0fBw4/I99RephGo6F8cnJy0JaPHz+mpqbi1dL/ATo6Ol68eMFuHcJXQ6fTkSPmFwkLC8NP8W9vbw8PD/8uJX5RdJvBYCQmJv4gOcCPHz/6+PgQJBI5ItcDMzR7vp6eHnQga2trs7Oz0W8ajdbV1ZWamsrOBoEddDr93bt3mJ3eP0BhYSF2k/oKBn/S0+l07KTPz89HQqA/P18neD8YKisrUWtkZGT8u7qF31F6mJeXt6amZs6cOZh4Ap1Od3FxERcX/27hDoIDBw6sWLHiw4cPaBn+96Kvr4/ZhpMlOTk5+NHKrKwspOz1LSV+URMciVy3tLTs3bu3srLyK4obmMjIyPz8fElJSXzmHJHrL/MDdGH+fRgMxvnz5xUVFWk0WnV19YgRI7y9vdH2HTt2pKSk/K3campqVFVVc3Nzf0ywf6Gjo6Ovr2/btm1OTk4DJ2P3p82bN/v6+ubk5CxcuDAtLe2LJaKFun/88ceTJ080NTWfPn36NXF/AwPUhSWNjY2HDx8uLi5mp/b0LVAolHXr1q1evTo2NvbAgQOLFi3CRO/+eaKjo1VUVNDvzZs3I10uRGlpqZ+fH/bf4uLiGzdu3Lhx49SpUxBCf39/fX3958+fP3369Pz58yiNjo4OUsyCELq7uxcWFkIIaTTa3bt3X7z4v/bOPB6q9v//p5QoFVnTHinlbtO+KEUbWSpRCHcSN2lR3S3aZUkLZblDEiJlqYQIFYWk1ZJkH/u+DDPDzFy/P67ffR7nc2ZJy939vc31/GvmzHXe13Kuc95zttcrytvbG74eDgDo7e11c3Pz9fUNCwvbsmULLmVHrLS3t/fKlSsWFhaBgYHHjh2rq6vj35empqZFixYBABgMxqZNm35kWH4WVCp14sSJPxKhsrLSxsYGAJCTk7Nu3TquZY4cOQIPHdra2t965OkLdnZ2jx8/Ji0cPXp0QUEBAKC2thYqzwEA7t275+rqGh4efuDAgdjY2JKSko0bN966dQvwnWn9lf55zjdgwAAHB4f6+vrS0tIxY8ZYWVmlpKTA5fLy8gsXLvymaPLy8mPGjPlnWvo/QBV8YWFh+E4uL3ANfq5wKrtjGBYZGenm5nbnzh0HB4dHjx7hyu4YhkF/wYULF+7YsWP37t2enp4YhjU2NpqYmDg7O0dGRjo5Ob1///7Lly+rV6++cePG6dOnk5KSsJ+k7I4L//ed7xO87yMiIiLKysrjx49fuXLl5cuXS0pKoNbGV6Xuu7q6zM3N/f39tbS0goKC+mKL0Xeg9LCOjg6+JCkp6erVq/jXwMBAVVXVAwcOwJmjqqra2dm5ZcuWbdu2GRoawjIODg6enp5QcKSoqAjaOzg7O4uIiGzatOmPP/5ISUmJi4vDMOzMmTMjR460trbetm3bzp07qVQqZ6WDBg1SUVEBAFhYWEyYMMHd3Z3U5mvXrvn5+Z04ccLb2xsAEB4e3tDQEBER8fr165SUlIiICCaTeevWLW9v77Vr15aUlNy9e1ddXd3Ly0tNTe3ly5fEUEwm89KlS5GRkQYGBu3t7aWlpUePHr19+7aZmVlVVVVaWtrMmTMxDOOM8OzZs5s3b547d87Pz6+8vFxDQwNe7/Hz8wsLC7ty5QqM//nz57/++uuPP/4gOXNdv35dU1OzpaXFzMxsz549bDbb19c3KSkJrxHXBMf+Ft0mqavU19dHR0cnJCRAM7LExERra2u4Reh0+vHjx0NCQqytraH/4rVr1+Lj452cnOBQBwQE6OrqxsbGEptE6ntubm5ubm5qamp2djaxGBS5fvLkiZycHBS5Tk5Ojo6O/vPPP42MjI4ePdrd3c1H5Jo40/ot/3bq/QcxNTU9evQoAODPP/8UExOD+ww8+QsKCvLy8lqzZg3UeCVBo9GOHTsWHBy8e/fu58+fAwA0NDSOHj2qpaWloaHR2dmZlJSUkJDg6OhYXFzc3d195coVNzc3HR2d9vZ2BweH3bt3r1u3Lj4+XlNT093dHQDg5eUVExNTWFjo6+trY2Nz8uRJzkqZTKa+vr6FhcW7d+8uXrxoamp64sSJpUuX1tfXs1isQ4cOPXnyxMjIqKOj4/LlywsXLkxMTOTaa1tb2zFjxiQlJeFLnjx5sm3bNvi5sbExIiICALBixYovX77AejEMq6uro1Aourq6sMEAAGNjY6gy3NXVVVlZCQBQUFCg0+kNDQ1jxowBAMA/sFDftrq6Ggr+kigpKTly5EhoaOiOHTsoFMq7d++mTp0KALh58+bq1avxLr969cre3t7MzGzv3r3Tp0/PzMzkNaoPHjywsbHR1taOjY0lbaba2toNGzacPXt2wYIFkZGRCxcu9PT0XLdunaurq5+f37p16+CYBAUFpaSk2NnZ8Zo2jo6O1tbWPT09cXFxY8aMwcXNYa8BAFD5iUKhwJOYqKgoIyOjtra2wsJCOp2uqKgIAMjIyOAVv+/wkh7u6uqKiIgwNTVNS0uDS9LS0iZPnhwXFwe/FhcXz507Nysr6/bt23AjAgBYLNaUKVMiIyPfvHkTGhoKFyopKcGNCwCIjIw0NjYGPASROStNTk7W1tZ+/vy5qalpamoqseWvX7+GV88AAHJycgUFBeXl5fPmzcOXAABIwtklJSVz5swBAISFhdnb2xOjXb58OTo6GgCQkpJSVlampaUFdbq9vb2NjIwAABISEgAAUgQGg7F69WoAAI1Gu337NgBAU1MzJycnLy9PT08PANDY2AjP+Uiy1Hi9NBpt8uTJdDo9MTFRS0sLAODj4wMtyWCNvDTBiY2HItcAAG1t7ZSUFACAiopKdXU1SbS6rq5OVVW1o6MjPz+fUxEbh7Pv5ubmjx49Av8Lp8j1jh07iNdI4MKvilz3Y/rnOR/ExMQkODi4vb1dUlJy3rx5d+7ciY6O1tfXz8rKevLkibi4+KBBg7iKsXp5eUlLS5uamtrb28PphWHY+vXrHz16JCIiEhgYGBMT09zc7ODgICoq6unpSaVSx4wZk5eX9/Tp00mTJgkLCyckJKxfv97a2hoaDAEA9PT0jh49OmzYsNmzZ7u6ukI9XCJCQkKqqqqqqqqzZ8/GMGzixIlnz56dPXs2FJtXVlbW0NBgMplv375dsGDBuHHj1qxZw7XXHh4eNjY2Ojo61tbWMKuFhIRALQwMw6SkpOCp0oABA4h6EN7e3h4eHvX19crKynDJgAEDCgoK0tPTr169Cs3/BgwYQKPRoqOjodc88dRZXl6eq/Wavb29oaGhsbHxwoULDx06NHv27Pb2dgzD1NXVoRQy7PKCBQsUFBTk5OQ8PDwOHTp04sQJXqOqo6OzcOHCadOmaWtrkzaTrKzs0KFDZ8yY8erVq1WrVtXX19vb2zs7O0dFRe3atevAgQMREREYhvn4+MjLy/O/wfPhw4cLFy74+PisX78ev+E/YMCAV69eJSUlQanMAQMGAADa29tjY2PV1dVHjhw5depUPAI0u/9xuEoPR0REDBs2bNasWdBqGMOw5cuXJyQknDt37vz583DJoEGDxMXFJSQkcOGSgQMH7t+///Lly5GRkZs3b4YLaTQa/pa3sLBwd3c3xkMQmbNSWMvQoUNHjhyZl5dHbDauyAyHgut9I5JwNhTjxjAM1+PGycjIkJeXxzBs1apVEydOxIMvW7YsNzcXbyEpwpcvX+Ab5SIiItu3b8eLvXjxgiTzzVWWGq64YcOGBw8eQOmviooKYWFhODKcItEk0W2uwDLi4uKtra0k0WohISEjI6Pp06dXVFRwKmJzDized076InKNvyuJwznTkGL1f5XVq1ez2WwrKys9PT0LC4uAgIC2tjYxMTH+WvUYNyV1DMPgLqGhoUGhUA4fPnzx4kVbW9uRI0fm5ORYWFgYGxuXlJTo6uoSNdd1dXXhgxJQbYHXDsYV4o7Ud83775v0GIbZ2tpevHjR29tbX1+/trYWLhQTE5OQkCCKXFy7di04OLgvvoaQvuyoEJLKPp9R5QzOKXjPyz0Aw7Dz589ramqSLiKRmDVr1vHjxx89elRfX+/o6Igv/yap+8zMTHjgeP78+VcHiiu8pIfb2tpUVFS2bNlSXl4OH/OJj49XUlJKTU3FlTaHDh06derU9evXKyoq4n+zzMzMPn/+TBQS09TUfPXqFfz89u1bDQ0NjIcgMmelcEDmzZu3b98+f39/YstxRWYMw1pbW2fPng3/aMMl8IorL+FswPGG8ejRo+/du4dhWGdnZ25uLh68qalJVVWVcxX4VUZGJj09HT6/iut6AwCkpKTgtVDwt8w3SZaaGGrXrl0eHh5SUlLm5ubGxsYbN24kVsFLE5wYgVcZkmg1nU63srJKSEiws7OTkpLipYjNte+kGvsicg0l2XC4zrT+rVjdnzOfkJDQtm3bKisrp0yZsmXLlsLCQjhX+GvVY3yV1BsaGhYvXtzV1fX69WtJScm//vpLRkYmLCwMFibJTAgJCVlYWOzYsQPuMHx2MAivnYSkeY8X4+THJz0xuY4fP15FRcXe3h7+owcAHD58eP78+S4uLvgqnMcpriNJ2lG7urpIxw4cqLLPZ1Txwemj4D2phaNHj/7w4UNsbCyvR3x/ROqeWNfixYvhgWPFihV8hogXzc3NuPTw2bNnIyMjoZqXj4/PgAEDJkyYMG7cOBUVlcOHD9Pp9Dt37gQFBT18+BD+W3/27BmFQomKioLXmXE1r6FDh1pbW0N7Uoibm1tSUlJ4eLi3tzeNRoNmp97e3vfu3QsICHj06FFUVBSTyeSslEqlPn/+vLCwMCIi4uLFi8T/BxiGLVmyxNjY2M3NLTo62sDAQEFB4f79+2VlZTk5ORiGKSsrHz9+fOXKlVQqVVVV1c7ObsKECRkZGTU1NZWVlc+ePSssLITXBiAHDx58+PDhsmXLTp48qays7OHhER8fD++3OTs7p6SkNDc3Q6loYgRhYeEjR47MmjVLX1+/t7e3uLi4qKgoKSlJR0dHTExsx44dHh4eTCbz48ePUJbayMgoNTWV5MY8c+ZMeXn5rVu3btu2TVFREVp94TXimuCcotvEaXD+/HlYAF6tLS0tzcjIsLe3p9Pp9+7d8/f39/Pza29vd3R0pNFolpaWcnJyUBHb0tKSTqcTzy9JfS8vL3///n1iYiJxuBQUFI4fP37lypWQkJC8vDx4y9nW1pbJZLq6usbHx0dGRjY0NBQXF0ORa14zrZ/zU66Z/p/l7du3Pj4+8PPevXuhKSuDwVi5cuWcOXNsbW2rq6s51+rs7Ny8efPdu3c9PT2fPn0KADhw4MCJEycePXrk5uYGvyYkJPj4+Hz8+LGoqGjy5Mlr1649duwYlUo1NTVdu3ZtbW0tDNXU1GRlZQU/x8TEyMnJGRoa3rhxg2trk5KSFixY8PDhwy1btujq6tbW1mpqasIHlMeNG3fq1CkDAwNjY+PGxsbJkyfj/SK1XFlZ+fLly8HBwatWrYIOk3Q63cbGxsXFJS4u7t69exQK5cuXLxMmTPDy8oIHNQzDjh496ubmpq2t7e/vDwCor69XVVU9cuRIVFTU6dOnL1++XFhYOGzYsISEhJ6enmXLlsHaP3/+LCcnd/PmTV7jn5+fr6+vn5iYeOrUKdiYnTt3btu27fLlyxMmTCgtLYVdTkhIgLddY2JiDh8+XFVVxWtU29vbTU1Nly9fXlZWRtpMVVVVs2bNOnLkCI1Gu3//vri4eFFR0Y0bN0aPHl1cXOzu7q6iolJfX29oaPjy5csTJ06QHIMhnz9/XrJkyaxZszw9PQ8dOqSjowNnSF5enoiISGho6N27d7dt2wZvlcnLyxcXF1dUVEydOhUarj548EBMTCw3N5fXgPxDMBgMJpNJpVK/WpJrrzs6Ojj9jbku/CZoNBrXCEwms6enB37u+6OzpGeA+/hIcFdXF7ynxRkNilbDrwwGg1dn4W1OWIbzV/4+z18t09HRAW8cstlsNptNdMzmszW/2nfYawqFAoPjMBiMn2vK/d+l/6uX9fT0QM1Z/AOEv1Y9xqGkzmAw2Gw2vDfAZrPhDRJ44QsAQKPReEnfMplM/PoYSfedE14q+CTNez5i+byU3Xt6ehgMxr+ictvR0UG04WUwGMLCwmw2G/6ZhX3hVNnnP6o4fRS8x2Gz2SRRYwQCIWj0/8yH+E9w4cKF5uZmXq8DIxAIxE+kP9/nQ/xXQCr7CATiV4LO+RAIBAIhWKBzPgQCgUAIFv3Tk53FYsHnpzEMGz58+KRJk/C3Vn8xDQ0NBQUFK1eu/OmRc3NzWSwWfO0dkpOTIy0tjb9R3tPTk5aWtmDBAuLTJd8EKeCPQKPRUlNTNTQ0ON2x+VBbWzt69GjO5VQqFaqWTJs2jfPX7OzsefPmwTe4MzIyurq6Vq5cyWmTXVpa2tjYSFwyc+ZMUVHRnp6eyspKRUXFvrezrq6utLSUKAT1+fNnBoMBNa7+RcrLy/Pz8+fOnQvlSPiUZDKZ8DUPUVFR2Ox379719PQoKCjwel0Egfjv0j/P+YSEhISEhBYtWlRfX5+Tk7NixYrw8PBf3Iavirj/YPCrV69ClUUoBo9hWFlZGfE1wdra2p07d/JRlPgqpID828O/QFVVlbm5OfHdOP6w2WwfHx/4Oj+J3t5eMzMzNTW15OTkqKgo0q+VlZUbN26E7wgePHhQSEho0qRJW7du5byqLyMjY2Njc+vWLfjkbVRUVFFREVQb+FYp0cbGxi9fvsDPHR0dPT09bm5uCQkJ3xTkpxMQEODp6amkpOTi4sI5UCT+j/g5IBC/hv55zocRhJhlZWV7e3s9PT23bdv2y2qnUCguLi4+Pj5z586Njo7+6fFHjBihoqICpYQdHR1NTExmzJiBq1tBJkyYAF+8/W5IAXnR09NjYWHB/9g6ZcqUbzp1CAwMXLZsGX4UTkpKmjlzJnzL+NatW7NnzxYTEzMzM1u4cCEuxIVhGJvNjo+Ph0pXHR0dMTExFy9exDBs0KBBjx8/Xr9+PbEKMTExBQUFBQUF+Ir99OnTmUymhITE8uXLv9WR6rfffvvtt98wDEtISCgvL7exsZkxYwZ/tZ2v0tjYuH///unTpyspKRUWFmpraw8bNsza2nr79u0UCmXJkiVr1qw5ePDg5MmTU1JSNDQ0CgoKjhw5QpRWT0lJWbFixZQpU65evXrr1i24MDIysqSkZMKECa9fv1ZXV1+6dOmpU6dGjhx55MiRTZs2qamp3b9/f9++fRiGvXnzxtHRcdCgQSwWy8fHZ8yYMXV1dZKSklBwmclkXr58ecSIESNHjoyOjvbx8ZGWlsYwrLS0NDs728jICJbx8vL6+PHj8uXLi4uL7e3tZWVlf2RMEIifRb/NfDhVVVWxsbFQuJLNZh85cmTNmjU3btzw8/OjUqlxcXHCwsLCwsJGRka3bt2iUqkPHz708fFRUFAgxbl27dqQIUMoFIqcnNy2bdt27NixcOHCmpoaKpXKqeaFi7hLSkpCEfeHDx+ePn1aXV09KSmpsrIyNjbW0tISF0OCtLW17d69e+LEiRiGpaenHz9+/Pnz5yUlJVFRUb6+vtnZ2d7e3seOHRsxYsTZs2fhKlAMHkpqXbt2TUlJyd7ePi0t7cOHD6KiolBlqrS01N/fX0VFJSkp6fz583Q6XU9Pz9bW9vXr1wAATU3NxMREZWXlI0eOJCcnl5aWNjc3y8jIGBgYHDt2TElJycTEhH9nc3Nzofq+vr6+o6Mjr+GFhf39/VtbW83MzD5+/MhisV6+fGlubs452kFBQZaWlvjXp0+fysrKwsyXnZ2trq6OYdjw4cPb2tqam5txYbPQ0FAjI6Pr16/DwaytrYUvcY4ZM6agoICU+SAsFovFYlVWViYkJEATBj6vBpqbm0tLSx84cEBXV3fv3r3GxsbW1tanT5+OjIzMzs6+efOmv7+/uLg4FO3Mz88/efJkampqdHT0d/wFgYlk6dKlK1as6O7ubm5uHjduXEVFhYmJSUdHx5w5c6qqqgwMDBYuXBgSEqKjo6Orq0tSw9m0adOOHTvodDpUA8f+FuyH4jgaGhqpqakSEhKjRo0aO3YsPL12cHDYu3fvnj17hISEioqKoKSLs7OznJwclHu1srISExPT0tI6c+bM2LFjYQEJCQkqlQobnJSUFBwcDDc39HP48OGDhYWFn5+fu7s7/COCQPzr9M+rnTgkIWaS9HN6evrr169NTExmzpzJX8Y6JycnJSXFysrq3LlzTk5O9fX1o0aNmjVrlq+vb05ODucpAlFUesiQIQ4ODnv27AkPD6fT6a6urqKiooqKiqdPnyatJS4uLicnN336dDc3N2lp6QEDBly4cKGurq6qqmru3LltbW1Dhw6dM2cO0W9WVlZ2/Pjx69evnzp1qry8fG9vL5PJtLW1tbW1hTJIGIdstKKiopCQ0IYNG6D0tp6e3vXr12/evIlhGI1G27Bhg46OTmho6IgRI2DAUaNG8e+sqqqqqKiooaEhm83mNbywZHp6+qRJkw4fPiwrK0tU/SYFrK2txW/L1dbWVldXd3R01NfXV1dXt7W11dXV4a+3Dx06FG/PmzdvJk2ahF+dGzdu3MyZMwMDA/Py8j58+MCZXCEvX7708vLy9vb+qosvhmE2NjYFBQWjR4/W0NCA6qbwTHTu3Lmtra18NMe/GpkrPygabmBgEBYW5u7uvnz5cqhZ+lXtcm1t7cGDB9+/f//t27dz586FC0NDQ6HTDYZha9euhTcOgoOD8UsC69atGz9+PIZh3d3do0aNUlRUTE9Px7vQ1NSUlpb24sULLS2t7xsHBOKn088zH0mImST9vHHjxs7Ozvnz5w8ZMoS/jDWn9jwuDC8mJoYbmHGFqD3NR4Udwkdq+atASZSSkhJhYWH4fAdMKpyy0Xgtw4cPHzJkiIiICFQaXLJkSUJCQlFREbxShwsG9rGzfIYXFnB1dcVVMYmq36Q4ra2tuMzNpUuXnJycMjIy/P39nZycEhMTJ0yYgGep7u5u/HDv4uISHx9/9OjRmpoaR0fHlpaW5ORkCQmJhoaG+vp6XuYJampqe/fuvXjxoq6u7ldHGJ741tTUCAkJ3blz582bN/PmzcO4KfdjfRPv/yrfJBpOEtfv6urS19cvKCgQFhaGosNf1S7/lX4OCMS/SL/NfFyFmEnSz6WlpWFhYYcPH963bx9/GWtO7Xn8J64vRPLSnpaWlualws4JMbKQkBCDwcAwDNd65lXXqFGj8vLyYG4AALDZbE7ZaF61/PHHH7NmzZo+fTrgITzP6+1P2BE+wwuL+fv77927t7y8HHYEV/0mRVNUVMRz28WLF319fdetW+fo6Ojr62toaLh27dqCggIMwxobG6dOnTpkyBBYu7Ozs4mJiYmJiYSExLZt24YPHy4mJmZoaPjp0yc7Ozuud5hYLBbeI66PiXJiYWFhbm6+fft2RUXF69evL1iwgDhQfRHv/ya+STScJK5/4cIFDMNGjhxpb2/fF+1yyC/wc+A/7RGIX0D/vM/HYrECAgIwDPP09BQXF09PT/fz8xs3blxpaWleXt7p06dZLFZQUNDSpUszMjIUFBQMDAz09PR8fX1VVVWXLFly7NgxUkBce37KlCkGBgZSUlKfPn3KzMxUUlIqLy/PysoiXWjCRdy7u7uJIu6DBw+GKuwaGhpmZmakc4XW1taPHz9iGLZy5crCwsKUlJSpU6eWlpa+evXKysqqtrb24MGDPT09xcXFdXV1mZmZDAajra0NisEfOHAgMzNz0KBBVlZWhw4d0tbW1tTU7OrqevHihYeHh6Oj4/jx4zMyMpydnYuLi6uqqjIzM6dPn97U1JSUlCQtLd3W1pabmyslJXXixAk9Pb3S0tKEhAQYkEKh8O8s9rf6voGBAa/hzc3Nra+vLy8vt7W1Xbt2bVBQUGRkpKamprKy8rJly0jRhIWFoSMoV21SLS2tuLi4Fy9exMfHw7PzqVOnPn/+HJqMYxgmKio6Y8YMYWFhCoUSHh4uJiYGb+CR+PDhw9u3b7u6ugwNDeG1RAzDGhoanj59ymQyKRQKvpCIqalpTk6OsrLy77//XlxcDCdbYmLi58+fy8vL58+f7+joqKCg8OrVq97e3rq6uuzsbDk5OTabjfvk9ZGGhoZPnz5BKf3c3NwRI0YMHjy4rq7u6dOn7u7uq1atkpSUtLGxKSoqqqioePLkibm5OSnCy5cvT548OWvWrJs3b0Knb1tb2/3797u6us6cObO7u3vRokVDhw598+ZNRUWFoaEhlLGFfg5bt27F47i5uZ05c4ZKpba0tBD9HA4fPtzS0iInJ8dgMDZs2ODv7w/9HNhsNvRzuHnzJu7n8OzZM+jnUF5evn79+k+fPn3TaCAQPxeB03AhSj8LCwuzWKze3l78ahJ/GWs6nT5w4EA+etOkwrxEpbu6urg+r88fAACTyRwwYADnRSrOurq6ukRFRVksFn7DjCQbzQsajSYqKtrd3f1VqWgiLBaLzWYPHjyY//ASIal+k6iqqkpJSYHPZXClvr5eXFwcXoWDbeYsU1ZWNmHChG9NOV+FxWIJCQnBzcH5miCf7f6LgRuxurpaWlqaOGm/ql3OYDA4X7vs7OwcMmQIafJzXfhVeG0vBOKXIXCZD/FfAZ4W4B7xCAQC8bNAmQ+BQCAQgkW/fcIFgUAgEAiuoMyHQCAQCMECZT4EAoFACBb9OfOxWKznz5+Hh4d/qwzjV8Pev3//JwbkSltb2+PHj79plaqqqri4uIaGBtLyxsbGhw8flpaWwq+9vb2PHj2Cr8Rxpaen59X/gssxNzY2kvwN+gLniDU0NDx79uxb42AYVl9fT3xT8ztgs9l4v+BL2d9HT09PcnIyUaq7srIyIyMDw7C4uLi+a3P/OFCk7dfUhffxlwEAeP/+fVFR0a+s9EeAon2/uFJe+z7k/fv38I1MAMDTp08zMjJIj3fAAji4yMb37e//Cfpz5tu/f//cuXMVFRX7uK9+1XAAwmAw3r59+2NN+zrv37+3sbHpe/ni4uLz58+vWbPm1KlT8D0zSGlpqaOjo4aGhq+vb2JiIgDAxcWlpaXl999/55W/hYWFS0pK1NTUBgwYICQk1N7eDjVB8vLy1NTU+KRMXhBH7LstLOCKEhISwcHBJIHKb2LgwIEjR45ctGhRbW3tN725QYJkhcFms4OCguDL2h8/foTuGb8GZ2fnO3fu/IKKiH38ZXR0dJw6dYpTXALr8w77g6t8K1++fLGwsPinayHCa9/HMKyjo8PS0rK9vR0K3dnb20tLSw8ZMuTQoUPEYqNGjVq2bFleXt6gQYN6e3vd3d2xH9jf/xuAfkpTU9OKFSv6Xp7BYGzatOkfa843Q6VSJ06cyL9MYmJibW0t/GxsbJyWlgYAiIuL27VrF17GwcEhKCgIAJCXl7dgwYKWlhaoAhMUFOTs7Mwrcl5enpiYGPzMZDKrqqrg582bNz979uy7O1VZWWljYwMAyMnJWbduXd9XJG6d6urqixcvfncbIBiGlZeX/2CQefPmEYM8evTI3Nz8B2N+Kx0dHVu2bJkzZ86vqe5f6ePBgwfhHCaCz6W+Ex8f7+Pj8/PaxRM5Obl/uoq+7Ps9PT0aGhpQmB4A8PnzZw0NDfh50aJFlZWVxIBSUlI5OTnwc0VFBfzwg/v7/2X67TlfeHh4Y2PjnTt39u/fr6Ojg2HY3bt31dXVvby81NTUXr58iWFYUlJSQECArq5ubGwsbjjw5s0bKGQVFBSkoaHR0tKira197tw5GxsbU1NTDMPS0tKgBDNnwJycHCcnJxsbG0NDwydPnnBtWFVV1ZkzZ2JiYnbv3s1isY4ePWpubn727Nnly5fDEwg/P7+wsLArV658tY9Pnz7FJT2zs7OhmaqioiLxD3JLSwuFQsEwDPoVQBHIzs7OJ0+e2NnZ8QkOAGCxWN3d3adPn8a9b/j4GGAY9uHDh/Hjx1dXV3t6eq5ataq1tfXDhw+nT5/GRwy3sIANu3TpEi6ClZSU5Orqev369T/++IPJZPr6+lpYWHR3d+/bt+/kyZP41mEymfLy8o8fP/4pf94/ffqkqKgYGhqqp6fn5eXFq1hycrKfn5+Li8uNGzcwDEtLS7t27VpAQAAU6Ort7XVycrp37x6Uci4vL9fQ0OAqyvpPEB4e7uXlRafTnz9/DnukoKCQkZHBZrOtra3Ly8uJk/zNmzdKSkoBAQH6+vrEeYhhWGtr66FDhy5cuKCmphYYGEij0Tw8PC5cuKCrq9vT00PqI5FLly49fvzYysrq3bt3GIYFBwffuXPH2toamldoaWnt37/f2dm5tLT06NGjt2/fNjMzq6qqqq2tDQgIgIUxDLt27Vp8fDwUmsGprq4+f/58aGhoWloahmFsNvvw4cPJycnbtm3r7OzE5xKpI0+ePHn8+PGJEydKSkqIvaDRaP7+/q9fv37//j2xDOeQXr9+XVNTs6WlxczMbM+ePWw229fXNykpiTQmeHk6nX78+PGQkBBra2vYVBaL5eTktGDBAgcHBz5N6ujoOHjwoLW19fr162/fvs1nw3E2si/7/pMnT8rKyp4+fbp3796ysrKcnBzcnXjSpEnZ2dmkmEwms7e39/79+1CNHfva/v6fpt9mPnV1dRkZGSMjI0NDQwAAhmHz5s1rb2+3s7OzsbG5e/cuyTYBNxxQVVVtb2+HEZqbmzmdCtTU1KqqqjgDYhjm5ua2evVqJyennJwcTU1Nrg3r7u5etmyZtrZ2fHz8wIEDp0yZMmrUqJMnT06fPv3Zs2f5+fkJCQnbt2+3trbm0zs+DgZE+wIMwzZv3hwZGVlWVvbgwQPoV8BisaKjo6HMGJ8qent7vby8vLy8UlNT+zjms2bNmjFjRk1NjZWVVX5+/rBhw6qqqiwsLPAR42Vh0dPTY2tre+jQod27dzc1Nd24cYPkTYFvHSj4Mnr06Pz8/D62ig/Kyso9PT06Ojq+vr58LuIRLSw4rTACAgLExMSgAB6GYRMnTvzpqjG8AAA0NDTIysra2dlBkWhlZWUtLa38/PyBAweqqKjIycmRJnlra+umTZtiYmKI8xAAEBYWNmrUqMOHD7e3t69evdrT05NKpY4ZMyYvLy8hIYHURyLDhw/X1NScNWvWw4cP3759++7dOyMjoz///LOrq2vBggUUCuXKlSvHjh0jGYYQfTwaGhpu3bq1fPly6B2Bc+DAAS0tLRMTk/nz52McRiv4XCJ1hOgBQuxFUlIS7qTBxycEwzAzM7PS0tJhw4YZGxuXlZXBrampqUkaE7y8l5eXtLS0qampvb29kZERAKCnp8fBwSE9PT04OPjjx4+8mvT06dNJkyYJCwsnJCQYGxvz2XDE5vV938/IyDA0NDx06NCqVas2btzIy+QE5+7du15eXsHBwd8wBf+z9NvMh4P/bcENB0aMGNHR0fFV2wQcTqcCqLdJCohh2IIFC2JiYgAARPsYEpMnT6ZQKNDGhc1mk4K8ePEC/i/jL+/Ex8Ggs7OT6MijpaUVGBiYkZHR3Ny8fPly2GwzM7O0tDT+9+GFhYX37t17+PBheKLTRywtLW/evJmYmLh8+fLY2NiKigoo8snpZkC0MqioqOjs7IRlcEMJPoiKivK6mf+twPHHtyBXiBYWnFYYaWlpcMDxTQY7kpmZCZ0T4NnYP0FycnJTU9OlS5daW1uTk5PhGYydnV1AQMCrV6+WLVvGOckHDRo0atQojGMezp49OzU1lUqlKioqysjI5OTkWFhYGBsbl5SU6OrqcvYRZ/HixeHh4U1NTUwmMzMzE/4bmDRpkrq6+qBBg3DrRJJhCNHHA/5DnT59ekVFBTFyeno6cV8gOYHgxUgdIXqAkHqBr8LHJwRWtGHDhgcPHuTn5zc1NVVUVAgLCw8YMIBXNLxrysrKDAajublZVFRUVFR0yJAhy5Yto1AofJpEHCL+Gw6n7/s+g8GA2rPr1q0rKSmRkZHBheBJJSHbt2/fv39/UFAQV8Xafka/zXzg74eX4FVdzp84bRNwCXm4CqcrAjEm1+X6+vqTJk0qLCwMDQ3l1TB/f/+GhoZVq1YRjQLwIFJSUvDCKQCAz0OMfBwM8vPzV69ejREU8efOnautrZ2SkoL72WIYJiIiMmPGDF7xv8PEAKKjo5OamlpcXOzk5OTq6jp27Fhi73hZGYwbN667uxvenIeGEpzeFESB/9bW1ilTpmDfq/oPAxKHF/BVMiJaWHBaYXBuMtipxYsXQ+eEFStWfEcj+0JiYqKHh4eDgwOUC7969SqGYUpKShISEhEREbNnz+bjDUKah3PmzNHR0UlLSwsJCREVFZWRkYEGtnQ6/f3797ymJYvFMjQ0NDQ0hHe2Ro8eHR0dDWshPZNCMgwh+nhUVVVZWVklJCTY2dkRN4SkpCR8Ng1WSnICwecSqSNEDxBSL/BVSD4hcKYR2bVrl4eHh5SUlLm5ubGxMXSQJkXj7BqdTpeUlJSSksJ/gh7CfJpErLSPG67v+/6SJUvg8gEDBkhLS69duxY30KiuribZtuC7PDTm5Jxs/Yz+6dWAYVh6enppaWlFRcXz58+Lioqqq6szMjJqamoqKyufPXtWWFgoIiJCsk2AhgPQ6MDY2Hj+/PltbW0fPnwgORUUFxc3Nze/ePGCQqEQA7a3t1+/fj07Ozs1NTU+Pv7gwYPwzzUJCQkJb2/vwYMHS0pK+vn55ebmVlVV1dTUvHnzhkajXblyxdfXd8eOHUpKSkwm8+PHj7itK38OHjx45swZcXHxpKQkooOBrKxsSkpKQkKCl5eXhIREfX39rl27jIyMOjo6XF1duYZiMBhhYWFdXV0RERGGhob48i9fvrx9+/bZs2dz5szhJX49ePDgzZs3r1mzZtq0aRMnTtywYQOGYSkpKXDEVFRUuFpYdHd3BwYGurm5bd26lc1m79ixAwBA9Kbo6urCt464uHhPT8/UqVO/T/WfzWYHBgZiGBYcHLxp06aGhoaMjIyurq7GxsaSkhKuBrZEC4uioiKSFcb+/fs3btzY29vb3t5eWVmZnZ1dVFSUlJQErfv+OYKCgt6+fQst6ZlMpqSkpJeX16ZNm1asWLFnzx54TiwrK0uc5B8+fGhpabl//76enh5xHvr7+48dO9bf319RUTE2NhZaOqxbty4lJUVVVfXcuXOkPjY2NkIHdmj0uGfPHmlp6cePH1taWkpJSf32229z5851dXW9f//+ly9f8vPzZ8yYQTIMefToEe7j0d7efuHCBVNTU0tLS+KNJWdn54MHD6anp+fm5vb29i5dupToBOLh4QHnEqkjxcXFuAeIiIgIsReNjY2Ojo7Tpk178uQJXiYpKWn79u0UCoV4Ojtz5kx5efmtW7fS6fSsrCwZGRkMw0hjghe2t7c3Nze/d+9ebW2tn58fhmEKCgqhoaGDBg2ytLSUl5e/dOkS1yYdO3bs8uXLDQ0NdXV18FyZ14bjagCJw2vf19PTi42NhSev7u7u0tLSGzduDAsL6+np2blzJ+7hjGHYgwcPWltbIyIipk+fjl8R7cv+/t9F0HU7ibYJuOEAhmEMBkNYWBhejexjKADA1atXTUxMmpqaoJG6sbEx15JQq57BYAwePJjrPaGOjo7hw4ezWCyuPgZ8GlBTUzN69GgYE9bS3Nw8cOBACQkJvBg0oeWalX8K0MoAwzBo10D6lY+VAZvN7u7uxr0ywP96U+Bb5/Hjx8LCwqtWrcJ+oeo/ycKCZIUBAKBSqSIiIpzWDf8WTCYTnzy8vEGI8/D27dsrVqzo6elpbm7Oysrau3cvAIBGo+HHQV59ZLPZvb29Q4YMwQeHjysI/hObzcZ9POAhiEqlctpH9Pb2wukEKyU6gYiIiOBzidgR7G8rXdh9Ui/gKiSfkODgYBMTE9KeyHUak6IR6ezsFBMTwzM3lUoVFRWFEUjV8QmC9W3DccJ134c/NTY2iomJ4V/b29sBAMS0J5gIeub7iTQ0NCxdutTR0XHEiBFNTU1r164dP378v92o/kZFRUVpaam6uvq/3ZD+xsKFCw0MDKZNm1ZbWztr1izouCsIdHd3t7S04JflEQICynw/k/r6+qysLAkJiSVLlnzT6RoC8e/S1dWVkZHBZDKXLFnC9bkPBKI/gTIfAoFAIASLfvtsJwKBQCAQXEGZD4FAIBCCRf+/F1VXV1daWrpkyZIfD5WdnU2n0+Xl5WVlZTmfQyPS2tqKq8tLSkpOnjz5l+l6fDcAgCdPnkAdyH+oChqNlpKSoq2t/U1rcQ47AODDhw8lJSXTp09XVlbGMKygoAB/SxcyadIk+DB6eXn5xIkTf1ofMAzGzM/Pnzt3Lo1GwxWhcHJycqSlpeEr/D+dhoaGgoKClStX/vTI393s3t7e1NRUGRmZIUOGTJ8+nX/h8vJyKLsF1YsaGxtLS0uHDBkye/bs72v29/H+/Xvia3yDBg0ivd/247DZ7MzMTAUFBfjGwjcRFxe3evVqXk9Bc0Lcdq2tra9fv+7o6Ni0aVNfDjtwbxo6dKiSktK3tvO/y//1w/GPAFU5GhsbcZOdH8HU1PTLly8SEhKWlpaFhYX8C48cOfLly5e6uroAgLi4OBUVlZ+ittV3vkPW8sSJE8rKynV1dT/i3cO/PeXl5du2bfumFTmHvbm5efPmzZWVlatWrcrKyjIxMaHRaKdPn6ZQKG/evNm4cWNXV1daWlpERASMoKenh7/A+1MICAjw9PRUUlJycXGJioriLFBWVlZXV/cTayTyHTYXfeT7mk2n09XU1KSkpJqbm/X19b9aXlJS0sHBwdfXF75XIykpefHixb6/O/Sz4OpO8HNpbm62s7MjCdP0kT7afbBYLFgM33ZUKvXUqVNr1qyhUql9nPZ83DD6M9+lc/0f4Kfrso8ePbqgoAAAUFtbm5CQAADo7e11c3Pz9fUNCwvbsmVLQ0NDamqqmpra+/fvAQDJycnTpk2D65qYmPz5558AgIKCAjU1tVu3bkVERECl+ZiYGF1d3Zs3bx47dqyoqAgA4ODg4O3tvWnTJh8fHzs7O9wnoe98n++EiooKnU7/1rW+tT1SUlLftC7nsJuZmZ05cwYvYGZmdvLkyczMTABAVlbW2LFj4fJXr14BAJ4+fbpmzZr9+/f/jH78f4yMjHx9feFnTg+Bf5pvtbn4pyksLBw7dmxPTw8AIDo6Gr4uVl9f7+joGBQUdP36dejtQNw14AkinGxMJvPw4cMw1MePHz08PO7evXvu3Ln6+nq48N27dydPngwNDXVxcXFxccHrdXFxgbIjAICWlhZbW9sTJ05cu3bN2dmZyWT2peVc3Ql+Ltra2llZWf9EZMiRI0fy8vKISyIjI0+dOvWtcbi6YfRv+mfmYzKZ+vr6FhYWb9++vXbtmqmpKQCgpKTkyJEjoaGhO3bsoFAoBQUFCgoKISEhurq6165d+2pMW1vbMWPGJCUl4UscHR3/+usv+DkhIaG0tBQAMGbMGLgEz3wfPnyYNm3ay5cv4XK8QGFhIQCAQqEsWrQIABAVFQXlbuGusmjRoqqqqurqaq5mOomJiS4uLn/99ZeNjQ2VSt25c6efn191dfWaNWsSEhJycnJGjhx5586dzMzMKVOmXLt2TVNT08zMDFbq6+trY2Nz8uTJ2traDRs2nD17dsGCBY8ePZKWlg4KCiorKyMNVGBg4IYNG/bt2+fk5HT27NmtW7c6OzvPmzcvPDz84sWL6urqMDdfvXo1Li7u3LlznK3F29Pb2wuFNvT09C5dukRqT1+GncViiYqK4oMJALh165aSkhL8TMx8kHPnzlVVVY0aNaqjo4NrfBqNduzYseDg4N27dz9//ry5uVlLS+vs2bPW1tYmJiZcV7l7966IiMiVK1fwI29lZeWVK1du3bq1a9eu9vZ2W1tbT09Pruv2nffv348bN66qqsrDw0NdXb2lpeX9+/enTp3KyclZsGDBxYsX1dTUUlNTAQCJiYn+/v46OjoPHz7kP6t9fHzMzc27urr27t174sQJQNhqeLO5joCjo+P169fV1dVPnTrFOZIqKiqLFy8uLi7Gl6irq+fn58PP3t7e8AM+8wEAc+bMCQwMBADExsY+ffoUANDc3Dxv3jyYQcvKypYuXcpms+vq6ubNm0ej0QAAbDYb/y/b2Ng4b968hw8f4gGPHz8Oj93btm2LjY3tywhLSUllZWX19PTExMRkZWWRZgJx76BSqcQVg4KCvLy81qxZU1xcTKFQTp8+HR0dbWVlBf53JgAAtLW1z5w5s3v37q1bt3I2gLjLPH36NDAw8OzZs9evXy8rK1u9ejXMysS6IiIiVq5cee3ateXLl7948aKurk5JScnd3b2wsBBuu46ODgsLiy1btoSFha1fv97d3Z0UmdSAqqoqJyenkJCQBQsWwNG7evXq9evXHR0dvby8+jKG/136Z+YDADg5OcGN9/LlS21tbQCAlpbWu3fvAADe3t4wx4wbN669vb2mpmbmzJlfDQiNWkRERHbv3t3b2wsAGD9+fHNzM14A/tPEj7zJyckyMjKurq47d+7U0ND49OkTXD527NisrKzExES451dVVS1cuLCtrc3c3Jw4NWHm49oSBoOhqKgIqzMwMPjrr79cXV09PDwAAObm5vfv3wcEhzApKSkqlcpisSZNmvTixQt9ff3g4ODr168LCws3Nzdv2bIlKioKlpw4cSLcw0kDlZeX99tvv8Ey0dHR8C/85cuXDx06BAA4duxYSEhIfX29qqpqR0cHfrwjgbdHQkKCRqM1NjZOmTIFAEBqz1eHHWo7wZNjyOPHj4cNGwY/kzJfZWXlzZs34SjB8eHE3d39ypUrAID8/PzRo0ez2WxTU9MHDx4AAKZNm9bU1MR1rejoaHl5+SVLltTV1cFeNDY2AgBu3brFZDLPnz//4w6CAIB169ZlZ2d3d3fLyMgwGIxHjx6Vl5fn5OQsX74cAHDv3r1du3bRaDR1dfXQ0NADBw7MnTsX8J3VWVlZenp6AICgoKB9+/aRthrebNII5OXlrVmzBgBgb2/P9cygsbHRxMREVFTUz88PAFBaWopPGAAA3FkAYdcAAISEhKioqAAADhw4wGazAQBhYWG///47XkBFReXLly+BgYF79uzhDBUcHPzp06fVq1fjPzk6Op46dSouLk5LS6uhoaEvwyslJXXgwIHLly/r6+tnZWVxzgTi3oGTmZlpbGwcGhq6YcOG/fv3f/78OTk5uaenZ+zYsWw2mzQToF4u7E51dTUxDnHwGQwG7AuNRrt9+zYAQFNTMycnh1RXSUkJNGIMCwuzt7cHAGhoaMBzPnzbeXl5nT59GgDg4uJy8eJFzshEtm7dCvd0W1vboKCg169fwxs0AAA5OTl4raW/0p/v80HwWwgktXisbzr9kK6urkGDBh0/fvzt27exsbFQ6LatrY0oM8h5r2LUqFF//vlnQEDA+vXrDQwM8OXi4uKSkpL4q+4tLS179uyRkZGxsrLqS4++ydlg0KBBw4YNGzhw4NKlS+vq6mpqakxNTa2srBgMxqhRo4hS8TikgSKWgTqNGIaNGDECt5ior6/nJbfPiZCQkIiICLRowDCM1B5SYc5hl5aWFhcXJ9by+fNnqF7Nye3bt6urqy9duiQtLX316lWuCuCcQvuc1hycrdLX1y8oKBAWFra1tcUw7MOHD1AOaseOHUJCQnDTdHZ2Qq8G6Gj/HfAyviDaXHCK+vd9VpO2Gj6BSSMwfvx4CoVSXV0tKirKqSJLpVKlpKRCQkJCQkLs7Oxyc3NJ+wVXSQdDQ8PW1tabN2/Ky8vDwlDfCy8gLCzc3d3NNRSLxcrPz6+vr29oaPj48SP+6+DBg4cPHy4kJNT3+/pEdwLOmcB178jJyVm/fr2xsXFcXNzly5dJThGkmYD9vbHExcXhhMchDv6XL1+g7hoUE8b+3hakujjNYXA4Dz5w3DgjEyG5YeAjgGHY4sWL8/Pz9+7dq6+vb2Nj08fx/A/RbzMfpy0ASS0eLwn+fpefyWTy0v4vLi6OiYnBMExZWXnLli3V1dUYhuGuqhBYHQ7xODt8+HA6nY5/nTp1qqqq6vbt2+EFT0lJyYCAgBcvXhCjAd4KA9/qbACpqalRVVXt7Ox8/fo1hmHl5eWkxxm+OlAkiC3kJbePQ/TBINbFpz0Yj2G3tbXFn17BMCwxMdHe3h6vBR92Go3W3d19/PhxBwcH6Dr74MEDzobxEdrntQkuXLiAYdjIkSPt7e1hk4YNG/b48WMMwz59+tTa2gp7N3z4cOjVALPjd8DV+AIfOviBl6g/18aT5glpq+GRSSMwfPjwQ4cOvXr1at++fXPmzCHFDA8Ph4OwefPmadOm1dTUKCsrt7a21tTUwAJEE1ecwYMH29vb79mzx8TEBC5ZuXIl9LbFMIxOp3d0dEybNk1NTS0tLQ3fpjBUbGysgYHBxIkTDxw4AF0JIWPHjl2+fLmOjg50u/2qjwfJnYDPTCAiIyNz584duGJmZibJKYLrTMC4OcYQB19aWjo9Pb2pqQnDMNyeAgBAqgtfFw/F1fwELwMjkCITIblh4COAYVhra+vs2bM9PT1jYmJ8fX35j+R/kX6b+ebPnx8eHh4fH5+YmPj58+fy8nIPD4/4+PikpKS0tDRnZ+fi4mKo0//s2TOo03/y5Eno3s6JgoLC8ePHr1y5EhISkpeXB93Mvb297927FxAQ8OjRo6ioKCaTmZyc3NjYCBX0IyMjoTv52bNnIyMjg4KCMAyDjl+3b9++d++eqalpd3f38+fPKysrKRRKeHi4jY0N9IIpKiqqqKjg5eouIiICnQ2ePHkCnQ3Wrl0bGhp69uxZOp0O//hDZ4O2tjYWi/XXX3+FhoZu2rRp4sSJ58+f19HRMTIySk1NZbFYnz59evz4MZ1Of/XqVU1NTWRkJJvNJg3UkydPoOI+i8VKT0//8uVLTU1NVlbW69evocXEx48fGxoaHB0daTQaSW4fB7bn6dOnzc3NL1++TE5Obmtr+/TpE7E9nA9/cx32kydPSklJubi4PH78+M8//1y1apWFhQWGYZ2dnTExMXV1ddDCbe/evVQqFRq5tbS0TJo06fjx4+Xl5aQq7O3t6XT6vXv3/P39/fz8YKsyMzPLysqgNQdnX16+fHny5MmoqKgbN25AG3F3d3dLS8s1a9akpaUBADIzMzMzM3/cNZ6r8QX0HsFtLgYPHgxF/S0tLel0ellZGWlWEwPOnj0bOmAUFhYWFRXV1dXhW621tRU2m0KhkEagrq7O1dU1KCjo8OHDnJ6O8vLyO3fuDAoKcnd3nzJlioaGhoiISEhIiIODQ1RU1P379+H/OXzXwFe0srLatGmTrKws/Dp58uQ9e/Y4Ojo+fPjw8OHDISEhwsLCqqqqlpaWe/bsgbsYPNULCgqaNGnShAkTli5dGhYWlpGR0dLS8u7du/T09NDQ0KSkJOjqvGvXLpgCuYK7E+BPMpNmQnV1Nb53EFfU09OjUqmqqqp2dnYTJkyQkJAIDg6+cuUKvIFNnAl0Or2oqCglJaWsrKy0tJSUeNrb2/HBl5GROXLkyKxZs/T19Xt7e4uLi6HdB6kukttMe3v74sWLz58/n/k3TU1N2dnZOTk5dXV1OTk5r169EhMTI0YmDQJ0wzh+/Hhubu779+/nz59vbGzs5uYWHR1tYGAAfd77K/1ZvYyrLQAfIXk2mw0f6+D6a3d3t7CwcF1d3ZgxY4gH987OziFDhnCaEvzTkJwNoKI8LvSOOxuMHj26oqKCzWbjQwH/OH+1wXwGihPAW24fQvTBIMG/PbyGncVi1dXV4dfKfhCS0D5/oClBdXW1tLQ03mwmk9nT08NLgP+74W98gfNNov64A8ZXtxokIyMDADB27NiWlpaHDx+eOnWK+Ct0SKDRaF1dXfAFSpyWlhYJCQk+o8pgMIhXOGHzWltbSde9uS78KtDY4VunRx9nApVKxXc9kvVKH2cC5+DDqc55cZhYFyd8zE9weEXGONwwYMCBAwf++gPaL6Y/Z75vhUKhSElJ/RrXm18Dm80WFxevra3t42ERgeDEwcGhs7Nz48aN9fX1srKy0KYVgfhPgzJff+bNmzd5eXmysrLr1q37t9uC+K/CYrHevHlTXV09b968cePG/dvNQSB+AijzIRAIBEKw6LdPuCAQCAQCwRWU+RAIBAIhWKDMh0AgEAjBAmU+BAKBQAgWKPMhEAgEQrBAmQ+BQCAQggXKfAgEAoEQLFDmQyAQCIRggTIfAoFAIAQLlPkQCAQCIVigzIdAIBAIwQJlPgQCgUAIFijzIRAIBEKwQJkPgUAgEIIFynwIBAKBECxQ5kMgEAiEYIEyHwKBQCAEC5T5EAgEAiFYoMyHQCAQCMECZT4EAoFACBYo8yEQCARCsECZD4FAIBCCBcp8CAQCgRAsUOZDIBAIhGCBMh8CgUAgBAuU+RAIBAIhWKDMh0AgEAjBAmU+BAKBQAgWKPMhEAgEQrBAmQ+BQCAQggXKfAgEAoEQLFDmQyAQCIRggTIfAoFAIAQLlPkQCAQCIVigzIdAIBAIwQJlPgQCgUAIFijzIRAIBEKwQJkPgUAgEIIFynwIBAKBECxQ5kMgEAiEYIEyHwKBQCAEC5T5EAgEAiFYoMyHQCAQCMECZT4EAoFACBYo8yEQCARCsECZD4FAIBCCBcp8CAQCgRAsUOZDIBAIhGCBMh8CgUAgBAuU+RAIBAIhWKDMh0AgEAjBAmU+BAKBQAgWKPMhEAgEQrBAmQ+BQCAQggXKfAgEAoEQLFDmQyAQCIRggTIfAoFAIAQLlPkQCAQCIVigzIdAIBAIwQJlPgQCgUAIFijzIRAIBEKwQJkPgUAgEIIFynwIBAKBECxQ5kMgEAiEYIEyHwKBQCAEC5T5EAgEAiFYoMyHQCAQCMECZT4EAoFACBYo8yEQCARCsECZD4FAIBCCBcp8CAQCgRAsUOZDIBAIhGCBMh8CgUAgBAuU+RAIBAIhWKDMh0AgEAjB4v8BmGoNiKO4Gz0AAAAASUVORK5CYII="
PAGE_JSON = {
"spans": [
{
"start": 0,
"end": 3696,
"box": {
"left": 0.12100741176470588,
"top": 0.08015236441805225,
"width": 0.7625643173109246,
"height": 0.8289201816627079,
"page": 0,
},
}
],
"id": 0,
"metadata": {"width": 595.0, "height": 842.0, "user_unit": 1.0},
}
FIRST_10_TOKENS_JSON = [
{
"spans": [
{
"start": 0,
"end": 5,
"box": {
"left": 0.14541159663865547,
"top": 0.08015236441805225,
"width": 0.031124640759663848,
"height": 0.010648907363420378,
"page": 0,
},
}
],
"id": 0,
"metadata": {
"fontname": "HXONRZ+NimbusRomNo9L-Regu",
"size": 8.966379999999958,
},
},
{
"spans": [
{
"start": 6,
"end": 10,
"box": {
"left": 0.2218368002857143,
"top": 0.08015236441805225,
"width": 0.028109224561344556,
"height": 0.010648907363420378,
"page": 0,
},
}
],
"id": 1,
"metadata": {
"fontname": "HXONRZ+NimbusRomNo9L-Regu",
"size": 8.966379999999958,
},
},
{
"spans": [
{
"start": 11,
"end": 18,
"box": {
"left": 0.28294983802016804,
"top": 0.08015236441805225,
"width": 0.04515740219831938,
"height": 0.010648907363420378,
"page": 0,
},
}
],
"id": 2,
"metadata": {
"fontname": "HXONRZ+NimbusRomNo9L-Regu",
"size": 8.966379999999958,
},
},
{
"spans": [
{
"start": 19,
"end": 23,
"box": {
"left": 0.5239827089210084,
"top": 0.08015236441805225,
"width": 0.03749755185546227,
"height": 0.010648907363420378,
"page": 0,
},
}
],
"id": 3,
"metadata": {
"fontname": "HXONRZ+NimbusRomNo9L-Regu",
"size": 8.966379999999958,
},
},
{
"spans": [
{
"start": 24,
"end": 25,
"box": {
"left": 0.6157472036638656,
"top": 0.08015236441805225,
"width": 0.010051387327731112,
"height": 0.010648907363420378,
"page": 0,
},
}
],
"id": 4,
"metadata": {
"fontname": "HXONRZ+NimbusRomNo9L-Regu",
"size": 8.966379999999958,
},
},
{
"spans": [
{
"start": 26,
"end": 29,
"box": {
"left": 0.6266233613445378,
"top": 0.08181785724465564,
"width": 0.02369895794957974,
"height": 0.00851912114014249,
"page": 0,
},
}
],
"id": 5,
"metadata": {
"fontname": "HXONRZ+NimbusRomNo9L-Regu",
"size": 7.173099999999977,
},
},
{
"spans": [
{
"start": 30,
"end": 31,
"box": {
"left": 0.6508250420168067,
"top": 0.08015236441805225,
"width": 0.005018158890756309,
"height": 0.010648907363420378,
"page": 0,
},
}
],
"id": 6,
"metadata": {
"fontname": "HXONRZ+NimbusRomNo9L-Regu",
"size": 8.966379999999958,
},
},
{
"spans": [
{
"start": 31,
"end": 35,
"box": {
"left": 0.6558673121815126,
"top": 0.08015236441805225,
"width": 0.02927711439327727,
"height": 0.010648907363420378,
"page": 0,
},
}
],
"id": 7,
"metadata": {
"fontname": "HXONRZ+NimbusRomNo9L-Regu",
"size": 8.966379999999958,
},
},
{
"spans": [
{
"start": 36,
"end": 37,
"box": {
"left": 0.7629575354285715,
"top": 0.08015236441805225,
"width": 0.008378667697478945,
"height": 0.010648907363420378,
"page": 0,
},
}
],
"id": 8,
"metadata": {
"fontname": "HXONRZ+NimbusRomNo9L-Regu",
"size": 8.966379999999958,
},
},
{
"spans": [
{
"start": 38,
"end": 40,
"box": {
"left": 0.7722364705882353,
"top": 0.08181785724465564,
"width": 0.012888674302521032,
"height": 0.00851912114014249,
"page": 0,
},
}
],
"id": 9,
"metadata": {
"fontname": "HXONRZ+NimbusRomNo9L-Regu",
"size": 7.173099999999977,
},
},
]
FIRST_5_ROWS_JSON = [
{
"spans": [
{
"start": 0,
"end": 5,
"box": {
"left": 0.14541159663865547,
"top": 0.08015236441805225,
"width": 0.03112464075966384,
"height": 0.010648907363420376,
"page": 0,
},
}
],
"id": 0,
"metadata": {},
},
{
"spans": [
{
"start": 6,
"end": 10,
"box": {
"left": 0.2218368002857143,
"top": 0.08015236441805225,
"width": 0.02810922456134457,
"height": 0.010648907363420376,
"page": 0,
},
}
],
"id": 1,
"metadata": {},
},
{
"spans": [
{
"start": 11,
"end": 18,
"box": {
"left": 0.28294983802016804,
"top": 0.08015236441805225,
"width": 0.045157402198319374,
"height": 0.010648907363420376,
"page": 0,
},
}
],
"id": 2,
"metadata": {},
},
{
"spans": [
{
"start": 19,
"end": 23,
"box": {
"left": 0.5239827089210084,
"top": 0.08015236441805225,
"width": 0.03749755185546222,
"height": 0.010648907363420376,
"page": 0,
},
}
],
"id": 3,
"metadata": {},
},
{
"spans": [
{
"start": 24,
"end": 35,
"box": {
"left": 0.6157472036638656,
"top": 0.08015236441805225,
"width": 0.06939722291092432,
"height": 0.010648907363420376,
"page": 0,
},
}
],
"id": 4,
"metadata": {},
},
]
FIRST_3_BLOCKS_JSON = [
{
"spans": [
{
"start": 0,
"end": 851,
"box": {
"left": 0.14541159663865547,
"top": 0.08015236441805225,
"width": 0.7133684323462186,
"height": 0.2190099524940618,
"page": 0,
},
}
],
"id": 0,
"metadata": {},
"box_group": {
"boxes": [
{
"left": 0.14228497673483456,
"top": 0.07860914035534348,
"width": 0.7309202049960609,
"height": 0.22434301670826529,
"page": 0,
}
],
"metadata": {"type": "Table"},
},
},
{
"spans": [
{
"start": 852,
"end": 1183,
"box": {
"left": 0.12100823529411764,
"top": 0.31637727296912105,
"width": 0.7625634937815128,
"height": 0.040477662327790986,
"page": 0,
},
},
{
"start": 1185,
"end": 1289,
"box": {
"left": 0.1266559638184874,
"top": 0.3591322037054633,
"width": 0.7513104815193276,
"height": 0.011832114014251716,
"page": 0,
},
},
{
"start": 1291,
"end": 1461,
"box": {
"left": 0.12100823529411764,
"top": 0.37338398517814736,
"width": 0.7624131321277309,
"height": 0.025941021377672124,
"page": 0,
},
},
],
"id": 1,
"metadata": {},
"box_group": {
"boxes": [
{
"left": 0.12920637371159402,
"top": 0.31513023829516773,
"width": 0.7484953551733193,
"height": 0.08984719319468157,
"page": 0,
}
],
"metadata": {"type": "Text"},
},
},
{
"spans": [
{
"start": 1462,
"end": 1588,
"box": {
"left": 0.14803378151260504,
"top": 0.43017611738717343,
"width": 0.31311072265546214,
"height": 0.069223729216152,
"page": 0,
},
}
],
"id": 2,
"metadata": {},
"box_group": {
"boxes": [
{
"left": 0.13913303663750656,
"top": 0.4264316875974243,
"width": 0.3255823984867384,
"height": 0.0700013076890959,
"page": 0,
}
],
"metadata": {"type": "Table"},
},
},
]
FIRST_10_VILA_JSONS = [
{"spans": [{"start": 0, "end": 851}], "metadata": {"type": "Table"}},
{"spans": [{"start": 852, "end": 1461}], "metadata": {"type": "Caption"}},
{"spans": [{"start": 1462, "end": 1588}], "metadata": {"type": "Table"}},
{"spans": [{"start": 1589, "end": 1679}], "metadata": {"type": "Caption"}},
{"spans": [{"start": 1680, "end": 1803}], "metadata": {"type": "Paragraph"}},
{"spans": [{"start": 1804, "end": 1831}], "metadata": {"type": "Section"}},
{"spans": [{"start": 1832, "end": 2309}], "metadata": {"type": "Paragraph"}},
{"spans": [{"start": 2310, "end": 2330}], "metadata": {"type": "Section"}},
{"spans": [{"start": 2331, "end": 2604}], "metadata": {"type": "Paragraph"}},
{"spans": [{"start": 2605, "end": 2642}], "metadata": {"type": "Section"}},
]
SEGMENT_OF_WORD_JSONS = [
{
"spans": [
{
"start": 3370,
"end": 3372,
}
],
"id": 895,
"metadata": {},
},
{
"spans": [
{
"start": 3373,
"end": 3382,
}
],
"id": 896,
"metadata": {"text": "in-domain"},
},
{
"spans": [
{
"start": 3383,
"end": 3394,
}
],
"id": 897,
"metadata": {"text": "sci-entific"},
},
{
"spans": [
{
"start": 3395,
"end": 3405,
}
],
"id": 898,
"metadata": {},
},
{
"spans": [
{
"start": 3406,
"end": 3408,
}
],
"id": 899,
"metadata": {},
},
]
| 249,118 | 509.489754 | 234,906 | py |
PRISim | PRISim-master/prisim/interferometry.py | from __future__ import division
import numpy as NP
import scipy.constants as FCNST
from scipy import interpolate, ndimage
import datetime as DT
import progressbar as PGB
import os, ast
import copy
import astropy
from astropy.io import fits, ascii
from astropy.coordinates import Galactic, SkyCoord, ICRS, FK5, AltAz, EarthLocation
from astropy import units
from astropy.time import Time
import warnings
import h5py
from distutils.version import LooseVersion
import psutil
import astroutils
from astroutils import geometry as GEOM
from astroutils import gridding_modules as GRD
from astroutils import constants as CNST
from astroutils import DSP_modules as DSP
from astroutils import catalog as SM
from astroutils import lookup_operations as LKP
from astroutils import nonmathops as NMO
import prisim
import baseline_delay_horizon as DLY
import primary_beams as PB
try:
import pyuvdata
from pyuvdata import UVData
from pyuvdata import utils as UVUtils
except ImportError:
uvdata_module_found = False
else:
uvdata_module_found = True
try:
from mwapy.pb import primary_beam as MWAPB
except ImportError:
mwa_tools_found = False
else:
mwa_tools_found = True
prisim_path = prisim.__path__[0]+'/'
################################################################################
def _astropy_columns(cols, tabtype='BinTableHDU'):
"""
----------------------------------------------------------------------------
!!! FOR INTERNAL USE ONLY !!!
This internal routine checks for Astropy version and produces the FITS
columns based on the version
Inputs:
cols [list of Astropy FITS columns] These are a list of Astropy FITS
columns
tabtype [string] specifies table type - 'BinTableHDU' (default) for binary
tables and 'TableHDU' for ASCII tables
Outputs:
columns [Astropy FITS column data]
----------------------------------------------------------------------------
"""
try:
cols
except NameError:
raise NameError('Input cols not specified')
if tabtype not in ['BinTableHDU', 'TableHDU']:
raise ValueError('tabtype specified is invalid.')
use_ascii = False
if tabtype == 'TableHDU':
use_ascii = True
if astropy.__version__ == '0.4':
columns = fits.ColDefs(cols, tbtype=tabtype)
elif LooseVersion(astropy.__version__)>=LooseVersion('0.4.2'):
columns = fits.ColDefs(cols, ascii=use_ascii)
return columns
################################################################################
def thermalNoiseRMS(A_eff, df, dt, Tsys, nbl=1, nchan=1, ntimes=1,
flux_unit='Jy', eff_Q=1.0):
"""
-------------------------------------------------------------------------
Generates thermal noise RMS from instrument parameters for a complex-
valued visibility measurement by an interferometer.
[Based on equations 9-12 through 9-15 or section 5 in chapter 9 on
Sensitivity in SIRA II wherein the equations are for real and imaginary
parts separately.]
A_eff [scalar or numpy array] Effective area of the interferometer.
Has to be in units of m^2. If only a scalar value
provided, it will be assumed to be identical for all the
interferometers. Otherwise, it must be of shape broadcastable
to (nbl,nchan,ntimes). So accpeted shapes can be (1,1,1),
(1,1,ntimes), (1,nchan,1), (nbl,1,1), (1,nchan,ntimes),
(nbl,nchan,1), (nbl,1,ntimes), or (nbl,nchan,ntimes). Must
be specified. No defaults.
df [scalar] Frequency resolution (in Hz). Must be specified. No
defaults.
dt [scalar] Time resolution (in seconds). Must be specified. No
defaults.
Tsys [scalar or numpy array] System temperature (in K).
If only a scalar value provided, it will be assumed to be
identical for all the interferometers. Otherwise, it must be of
shape broadcastable to (nbl,nchan,ntimes). So accpeted shapes
can be (1,1,1), (1,1,ntimes), (1,nchan,1), (nbl,1,1),
(1,nchan,ntimes), (nbl,nchan,1), (nbl,1,ntimes), or
(nbl,nchan,ntimes). Must be specified. No defaults.
nbl [integer] Number of baseline vectors. Default=1
nchan [integer] Number of frequency channels. Default=1
ntimes [integer] Number of time stamps. Default=1
flux_unit [string] Units of thermal noise RMS to be returned. Accepted
values are 'K' or 'Jy' (default)
eff_Q [scalar or numpy array] Efficiency of the interferometer(s).
Has to be between 0 and 1. If only a scalar value
provided, it will be assumed to be identical for all the
interferometers. Otherwise, it must be of shape broadcastable
to (nbl,nchan,ntimes). So accpeted shapes can be (1,1,1),
(1,1,ntimes), (1,nchan,1), (nbl,1,1), (1,nchan,ntimes),
(nbl,nchan,1), (nbl,1,ntimes), or (nbl,nchan,ntimes).
Default=1.0
Output:
Numpy array of thermal noise RMS (in units of K or Jy depending on
flux_unit) of shape (nbl, nchan, ntimes) expected on a complex-valued
visibility measurement from an interferometer. 1/sqrt(2) of this goes
each into the real and imaginary parts.
[Based on equations 9-12 through 9-15 or section 5 in chapter 9 on
Sensitivity in SIRA II wherein the equations are for real and imaginary
parts separately.]
-------------------------------------------------------------------------
"""
try:
A_eff, df, dt, Tsys
except NameError:
raise NameError('Inputs A_eff, df, dt, and Tsys must be specified')
if not isinstance(df, (int,float)):
raise TypeError('Input channel resolution must be a scalar')
else:
df = float(df)
if not isinstance(dt, (int,float)):
raise TypeError('Input time resolution must be a scalar')
else:
dt = float(dt)
if not isinstance(nbl, int):
raise TypeError('Input nbl must be an integer')
else:
if nbl <= 0:
raise ValueError('Input nbl must be positive')
if not isinstance(nchan, int):
raise TypeError('Input nchan must be an integer')
else:
if nchan <= 0:
raise ValueError('Input nchan must be positive')
if not isinstance(ntimes, int):
raise TypeError('Input ntimes must be an integer')
else:
if ntimes <= 0:
raise ValueError('Input ntimes must be positive')
if not isinstance(Tsys, (int,float,list,NP.ndarray)):
raise TypeError('Input Tsys must be a scalar, float, list or numpy array')
if isinstance(Tsys, (int,float)):
Tsys = NP.asarray(Tsys, dtype=NP.float).reshape(1,1,1)
else:
Tsys = NP.asarray(Tsys, dtype=NP.float)
if NP.any(Tsys < 0.0):
raise ValueError('Value(s) in Tsys cannot be negative')
if (Tsys.shape != (1,1,1)) and (Tsys.shape != (1,nchan,1)) and (Tsys.shape != (1,1,ntimes)) and (Tsys.shape != (nbl,1,1)) and (Tsys.shape != (nbl,nchan,1)) and (Tsys.shape != (nbl,1,ntimes)) and (Tsys.shape != (1,nchan,ntimes)) and (Tsys.shape != (nbl,nchan,ntimes)):
raise IndexError('System temperature specified has incompatible dimensions')
if not isinstance(A_eff, (int,float,list,NP.ndarray)):
raise TypeError('Input A_eff must be a scalar, float, list or numpy array')
if isinstance(A_eff, (int,float)):
A_eff = NP.asarray(A_eff, dtype=NP.float).reshape(1,1,1)
else:
A_eff = NP.asarray(A_eff, dtype=NP.float)
if NP.any(A_eff < 0.0):
raise ValueError('Value(s) in A_eff cannot be negative')
if (A_eff.shape != (1,1,1)) and (A_eff.shape != (1,nchan,1)) and (A_eff.shape != (1,1,ntimes)) and (A_eff.shape != (nbl,1,1)) and (A_eff.shape != (nbl,nchan,1)) and (A_eff.shape != (nbl,1,ntimes)) and (A_eff.shape != (1,nchan,ntimes)) and (A_eff.shape != (nbl,nchan,ntimes)):
raise IndexError('Effective area specified has incompatible dimensions')
if not isinstance(eff_Q, (int,float,list,NP.ndarray)):
raise TypeError('Input eff_Q must be a scalar, float, list or numpy array')
if isinstance(eff_Q, (int,float)):
eff_Q = NP.asarray(eff_Q, dtype=NP.float).reshape(1,1,1)
else:
eff_Q = NP.asarray(eff_Q, dtype=NP.float)
if NP.any(eff_Q < 0.0):
raise ValueError('Value(s) in eff_Q cannot be negative')
if (eff_Q.shape != (1,1,1)) and (eff_Q.shape != (1,nchan,1)) and (eff_Q.shape != (1,1,ntimes)) and (eff_Q.shape != (nbl,1,1)) and (eff_Q.shape != (nbl,nchan,1)) and (eff_Q.shape != (nbl,1,ntimes)) and (eff_Q.shape != (1,nchan,ntimes)) and (eff_Q.shape != (nbl,nchan,ntimes)):
raise IndexError('Effective area specified has incompatible dimensions')
if not isinstance(flux_unit, str):
raise TypeError('Input flux_unit must be a string')
else:
if flux_unit.lower() not in ['k', 'jy']:
raise ValueError('Input flux_unit must be set to K or Jy')
if flux_unit.lower() == 'k':
rms = Tsys/eff_Q/NP.sqrt(dt*df)
else:
rms = 2.0 * FCNST.k / NP.sqrt(dt*df) * (Tsys/A_eff/eff_Q) / CNST.Jy
return rms
################################################################################
def generateNoise(noiseRMS=None, A_eff=None, df=None, dt=None, Tsys=None, nbl=1,
nchan=1, ntimes=1, flux_unit='Jy', eff_Q=None):
"""
-------------------------------------------------------------------------
Generates thermal noise from instrument parameters for a complex-valued
visibility measurement from an interferometer.
[Based on equations 9-12 through 9-15 or section 5 in chapter 9 on
Sensitivity in SIRA II wherein the equations are for real and imaginary
parts separately.]
noiseRMS [NoneType or scalar or numpy array] If set to None (default),
the rest of the parameters are used in determining the RMS of
thermal noise. If specified as scalar, all other parameters
will be ignored in estimating noiseRMS and this value will be
used instead. If specified as a numpy array, it must be of
shape broadcastable to (nbl,nchan,ntimes). So accpeted shapes
can be (1,1,1), (1,1,ntimes), (1,nchan,1), (nbl,1,1),
(1,nchan,ntimes), (nbl,nchan,1), (nbl,1,ntimes), or
(nbl,nchan,ntimes). It is assumed to be an RMS comprising of
both real and imaginary parts. Therefore, 1/sqrt(2) of this
goes into each of the real and imaginary parts.
A_eff [scalar or numpy array] Effective area of the interferometer.
Has to be in units of m^2. If only a scalar value
provided, it will be assumed to be identical for all the
interferometers. Otherwise, it must be of shape broadcastable
to (nbl,nchan,ntimes). So accpeted shapes can be (1,1,1),
(1,1,ntimes), (1,nchan,1), (nbl,1,1), (1,nchan,ntimes),
(nbl,nchan,1), (nbl,1,ntimes), or (nbl,nchan,ntimes). Will
apply only if noiseRMS is set to None
df [scalar] Frequency resolution (in Hz). Will apply only if
noiseRMS is set to None
dt [scalar] Time resolution (in seconds). Will apply only if
noiseRMS is set to None
Tsys [scalar or numpy array] System temperature (in K).
If only a scalar value provided, it will be assumed to be
identical for all the interferometers. Otherwise, it must be of
shape broadcastable to (nbl,nchan,ntimes). So accpeted shapes
can be (1,1,1), (1,1,ntimes), (1,nchan,1), (nbl,1,1),
(1,nchan,ntimes), (nbl,nchan,1), (nbl,1,ntimes), or
(nbl,nchan,ntimes). Will apply only if noiseRMS is set to None
nbl [integer] Number of baseline vectors. Default=1
nchan [integer] Number of frequency channels. Default=1
ntimes [integer] Number of time stamps. Default=1
flux_unit [string] Units of thermal noise RMS to be returned. Accepted
values are 'K' or 'Jy' (default). Will only apply if noiseRMS
is set to None. Otherwise the flux_unit will be ignored and
the returned value will be in same units as noiseRMS
eff_Q [scalar or numpy array] Efficiency of the interferometer(s).
Has to be between 0 and 1. If only a scalar value
provided, it will be assumed to be identical for all the
interferometers. Otherwise, it must be of shape broadcastable
to (nbl,nchan,ntimes). So accpeted shapes can be (1,1,1),
(1,1,ntimes), (1,nchan,1), (nbl,1,1), (1,nchan,ntimes),
(nbl,nchan,1), (nbl,1,ntimes), or (nbl,nchan,ntimes).
Default=1.0. Will apply only if noiseRMS is set to None
Output:
Numpy array of thermal noise (units of noiseRMS if specified or in units
of K or Jy depending on flux_unit) of shape (nbl, nchan, ntimes) for a
complex-valued visibility measurement from an interferometer.
[Based on equations 9-12 through 9-15 or section 5 in chapter 9 on
Sensitivity in SIRA II wherein the equations are for real and imaginary
parts separately.]
-------------------------------------------------------------------------
"""
if noiseRMS is None:
noiseRMS = thermalNoiseRMS(A_eff, df, dt, Tsys, nbl=nbl, nchan=nchan, ntimes=ntimes, flux_unit=flux_unit, eff_Q=eff_Q)
else:
if not isinstance(noiseRMS, (int,float,list,NP.ndarray)):
raise TypeError('Input noiseRMS must be a scalar, float, list or numpy array')
if isinstance(noiseRMS, (int,float)):
noiseRMS = NP.asarray(noiseRMS, dtype=NP.float).reshape(1,1,1)
else:
noiseRMS = NP.asarray(noiseRMS, dtype=NP.float)
if NP.any(noiseRMS < 0.0):
raise ValueError('Value(s) in noiseRMS cannot be negative')
if (noiseRMS.shape != (1,1,1)) and (noiseRMS.shape != (1,nchan,1)) and (noiseRMS.shape != (1,1,ntimes)) and (noiseRMS.shape != (nbl,1,1)) and (noiseRMS.shape != (nbl,nchan,1)) and (noiseRMS.shape != (nbl,1,ntimes)) and (noiseRMS.shape != (1,nchan,ntimes)) and (noiseRMS.shape != (nbl,nchan,ntimes)):
raise IndexError('Noise RMS specified has incompatible dimensions')
return noiseRMS / NP.sqrt(2.0) * (NP.random.randn(nbl,nchan,ntimes) + 1j * NP.random.randn(nbl,nchan,ntimes)) # sqrt(2.0) is to split equal uncertainty into real and imaginary parts
################################################################################
def read_gaintable(gainsfile, axes_order=None):
"""
---------------------------------------------------------------------------
Read gain table from file and return
Input:
gainsfile [string] Filename including the full path that contains the
instrument gains. It must be in HDF5 format. It must contain
the following structure:
'antenna-based' [dictionary] Contains antenna-based
instrument gain information. It has the
following keys and values:
'ordering' [list or numpy array] Three
element list of strings
indicating the ordering of
axes - 'time', 'label',
and 'frequency'. Must be
specified (no defaults)
'gains' [scalar or numpy array]
Complex antenna-based
instrument gains. Must be
of shape (nax1, nax2, nax3)
where ax1, ax2 and ax3 are
specified by the axes
ordering under key 'ordering'.
If there is no variations in
gains along an axis, then the
corresponding nax may be set
to 1 and the gains will be
replicated along that axis
using numpy array broadcasting.
For example, shapes (nax1,1,1),
(1,1,1), (1,nax2,nax3) are
acceptable. If specified as a
scalar, it will be replicated
along all three axes, namely,
'label', 'frequency' and 'time'
'label' [None or list or numpy array]
List of antenna labels that
correspond to the nax along
the 'label' axis. If the
nax=1 along the 'label' axis,
this may be set to None, else
it must be specified and must
match the nax.
'frequency' [None or list or numpy array]
Frequency channels that
correspond to the nax along
the 'frequency' axis. If the
nax=1 along the 'frequency'
axis, this may be set to None,
else it must be specified and
must match the nax.
'time' [None or list or numpy array]
Observation times that
correspond to the nax along
the 'time' axis. If the
nax=1 along the 'time'
axis, this may be set to None,
else it must be specified and
must match the nax. It must be
a float and can be in seconds,
hours, days, etc.
'baseline-based' [dictionary] Contains baseline-based
instrument gain information. It has the
following keys and values:
'ordering' [list or numpy array] Three
element list of strings
indicating the ordering of
axes - 'time', 'label',
and 'frequency'. Must be
specified (no defaults)
'gains' [scalar or numpy array]
Complex baseline-based
instrument gains. Must be
of shape (nax1, nax2, nax3)
where ax1, ax2 and ax3 are
specified by the axes
ordering under key 'ordering'.
If there is no variations in
gains along an axis, then the
corresponding nax may be set
to 1 and the gains will be
replicated along that axis
using numpy array broadcasting.
For example, shapes (nax1,1,1),
(1,1,1), (1,nax2,nax3) are
acceptable. If specified as a
scalar, it will be replicated
along all three axes, namely,
'label', 'frequency' and 'time'
'label' [None or list or numpy array]
List of baseline labels that
correspond to the nax along
the 'label' axis. If the
nax=1 along the 'label' axis
this may be set to None, else
it must be specified and must
match the nax.
'frequency' [None or list or numpy array]
Frequency channels that
correspond to the nax along
the 'frequency' axis. If the
nax=1 along the 'frequency'
axis, this may be set to None,
else it must be specified and
must match the nax.
'time' [None or list or numpy array]
Observation times that
correspond to the nax along
the 'time' axis. If the
nax=1 along the 'time'
axis, this may be set to None,
else it must be specified and
must match the nax. It must be
a float and can be in seconds,
hours, days, etc.
axes_order [None or list or numpy array] The gaintable which is read is
stored in this axes ordering. If set to None, it will store in
this order ['label', 'frequency', 'time']
Output:
gaintable [None or dictionary] If set to None, all antenna- and baseline-
based gains must be set to unity. If returned as dictionary, it
contains the loaded gains. It contains the following keys and
values:
'antenna-based' [None or dictionary] Contains antenna-based
instrument gain information. If set to None,
all antenna-based gains are set to unity.
If returned as dictionary, it has the
following keys and values:
'ordering' [list or numpy array] Three
element list of strings
indicating the ordering of
axes - 'time', 'label',
and 'frequency' as specified
in input axes_order
'gains' [scalar or numpy array]
Complex antenna-based
instrument gains. Must be
of shape (nant, nchan, nts)
If there is no variations in
gains along an axis, then the
corresponding nax may be set
to 1 and the gains will be
replicated along that axis
using numpy array broadcasting.
For example, shapes (nant,1,1),
(1,1,1), (1,nchan,nts) are
acceptable. If specified as a
scalar, it will be replicated
along all three axes, namely,
'label', 'frequency' and 'time'
'label' [None or list or numpy array]
List of antenna labels that
correspond to nant along
the 'label' axis. If nant=1,
this may be set to None, else
it will be specified and will
match the nant.
'frequency' [None or list or numpy array]
Frequency channels that
correspond to the nax along
the 'frequency' axis. If the
nchan=1 along the 'frequency'
axis, this may be set to None,
else it must be specified and
must match the nchan.
'time' [None or list or numpy array]
Observation times that
correspond to the nax along
the 'time' axis. If the
ntimes=1 along the 'time'
axis, this may be set to None,
else it must be specified and
must match the ntimes. It will
be a float and in same units as
given in input
'baseline-based' [None or dictionary] Contains baseline-based
instrument gain information. If set to None,
all baseline-based gains are set to unity.
If returned as dictionary, it has the
following keys and values:
'ordering' [list or numpy array] Three
element list of strings
indicating the ordering of
axes - 'time', 'label',
and 'frequency' as specified
in input axes_order
'gains' [scalar or numpy array]
Complex baseline-based
instrument gains. Must be
of shape (nbl, nchan, nts)
If there is no variations in
gains along an axis, then the
corresponding nax may be set
to 1 and the gains will be
replicated along that axis
using numpy array broadcasting.
For example, shapes (nbl,1,1),
(1,1,1), (1,nchan,nts) are
acceptable. If specified as a
scalar, it will be replicated
along all three axes, namely,
'label', 'frequency' and 'time'
'label' [None or list or numpy array]
List of baseline labels that
correspond to nbl along the
'label' axis. If nbl=1 along
the 'label' axis this may be
set to None, else it will be
specified and will match nbl.
'frequency' [None or list or numpy array]
Frequency channels that
correspond to the nax along
the 'frequency' axis. If the
nchan=1 along the 'frequency'
axis, this may be set to None,
else it must be specified and
must match the nchan.
'time' [None or list or numpy array]
Observation times that
correspond to the nax along
the 'time' axis. If the
ntimes=1 along the 'time'
axis, this may be set to None,
else it must be specified and
must match the ntimes. It will
be a float and in same units as
given in input
---------------------------------------------------------------------------
"""
if axes_order is None:
axes_order = ['label', 'frequency', 'time']
elif not isinstance(axes_order, (list, NP.ndarray)):
raise TypeError('axes_order must be a list')
else:
if len(axes_order) != 3:
raise ValueError('axes_order must be a three element list')
for orderkey in ['label', 'frequency', 'time']:
if orderkey not in axes_order:
raise ValueError('axes_order does not contain key "{0}"'.format(orderkey))
gaintable = {}
try:
with h5py.File(gainsfile, 'r') as fileobj:
for gainkey in fileobj:
try:
gaintable[gainkey] = {}
grp = fileobj[gainkey]
if isinstance(grp['gains'].value, (NP.float32, NP.float64, NP.complex64, NP.complex128)):
gaintable[gainkey]['gains'] = NP.asarray(grp['gains'].value).reshape(1,1,1)
elif isinstance(grp['gains'].value, NP.ndarray):
if 'ordering' in grp:
ordering = list(grp['ordering'].value)
else:
raise KeyError('Axes ordering for gains not specified')
if len(ordering) != 3:
raise ValueError('Ordering must contain three elements')
elif ('time' not in ordering) or ('label' not in ordering) or ('frequency' not in ordering):
raise ValueError('Required elements not found in ordering of instrument gains')
else:
if grp['gains'].value.ndim == 3:
transpose_order = NMO.find_list_in_list(ordering, axes_order)
gaintable[gainkey]['gains'] = NP.transpose(grp['gains'].value, axes=transpose_order)
for subkey in ['time', 'label', 'frequency']:
gaintable[gainkey][subkey] = None
if isinstance(grp[subkey].value, NP.ndarray):
if gaintable[gainkey]['gains'].shape[axes_order.index(subkey)] > 1:
if subkey not in grp:
raise KeyError('Key "{0}" not specified'.format(subkey))
else:
if not isinstance(grp[subkey].value, (list, NP.ndarray)):
raise TypeError('"{0} key must be specified as a list or numpy array'.format(subkey))
gaintable[gainkey][subkey] = NP.asarray(grp[subkey].value).ravel()
if gaintable[gainkey][subkey].size != gaintable[gainkey]['gains'].shape[axes_order.index(subkey)]:
raise ValueError('List of labels and the gains do not match in dimensions')
else:
raise TypeError('Value of key "{0}" in {1} gains must be a numpy array'.format(subkey, gainkey))
else:
raise ValueError('Gains array must be three-dimensional. Use fake dimension if there is no variation along any particular axis.')
else:
warnings.warn('Invalid data type specified for {0} instrument gains. Proceeding with defaults (unity gains)'.format(gainkey))
gaintable[gainkey]['ordering'] = axes_order
except KeyError:
warnings.warn('No info found on {0} instrument gains. Proceeding with defaults (unity gains)'.format(gainkey))
except IOError:
warnings.warn('Invalid file specified for instrument gains. Proceeding with defaults (unity gains)')
gaintable = None
if not gaintable:
gaintable = None
return gaintable
################################################################################
def extract_gains(gaintable, bl_labels, freq_index=None, time_index=None,
axes_order=None):
"""
---------------------------------------------------------------------------
Extract complex instrument gains for given baselines from the gain table.
Inputs:
gaintable [None or dictionary] If set to None, all antenna- and baseline-
based gains must be set to unity. If returned as dictionary, it
contains the loaded gains. It contains the following keys and
values:
'antenna-based' [None or dictionary] Contains antenna-based
instrument gain information. If set to None,
all antenna-based gains are set to unity.
If returned as dictionary, it has the
following keys and values:
'ordering' [list or numpy array] Three
element list of strings
indicating the ordering of
axes - 'time', 'label',
and 'frequency'. Must be
specified (no defaults)
'gains' [scalar or numpy array]
Complex antenna-based
instrument gains. Must be
of shape (nant, nchan, nts)
If there is no variations in
gains along an axis, then the
corresponding nax may be set
to 1 and the gains will be
replicated along that axis
using numpy array broadcasting.
For example, shapes (nant,1,1),
(1,1,1), (1,nchan,nts) are
acceptable. If specified as a
scalar, it will be replicated
along all three axes, namely,
'label', 'frequency' and
'time'.
'label' [None or list or numpy array]
List or antenna labels that
correspond to nant along
the 'label' axis. If nant=1,
this may be set to None, else
it will be specified and will
match the nant.
'frequency' [None or list or numpy array]
Frequency channels that
correspond to the nax along
the 'frequency' axis. If the
nchan=1 along the 'frequency'
axis, this may be set to None,
else it must be specified and
must match the nchan
'time' [None or list or numpy array]
Observation times that
correspond to the nax along
the 'time' axis. If the
ntimes=1 along the 'time'
axis, this may be set to None,
else it must be specified and
must match the ntimes. It must
be a float and can be in
seconds, hours, days, etc.
'baseline-based' [None or dictionary] Contains baseline-based
instrument gain information. If set to None,
all baseline-based gains are set to unity.
If returned as dictionary, it has the
following keys and values:
'ordering' [list or numpy array] Three
element list of strings
indicating the ordering of
axes - 'time', 'label',
and 'frequency'. Must be
specified (no defaults)
'gains' [scalar or numpy array]
Complex baseline-based
instrument gains. Must be
of shape (nbl, nchan, nts)
If there is no variations in
gains along an axis, then the
corresponding nax may be set
to 1 and the gains will be
replicated along that axis
using numpy array broadcasting.
For example, shapes (nant,1,1),
(1,1,1), (1,nchan,nts) are
acceptable. If specified as a
scalar, it will be replicated
along all three axes, namely,
'label', 'frequency' and
'time'.
'label' [None or list or numpy array]
List or baseline labels that
correspond to nbl along
the 'label' axis. If nbl=1
along the 'label' axis
this may be set to None, else
it will be specified and will
match nbl.
'frequency' [None or list or numpy array]
Frequency channels that
correspond to the nax along
the 'frequency' axis. If the
nchan=1 along the 'frequency'
axis, this may be set to None,
else it must be specified and
must match the nchan
'time' [None or list or numpy array]
Observation times that
correspond to the nax along
the 'time' axis. If the
ntimes=1 along the 'time'
axis, this may be set to None,
else it must be specified and
must match the ntimes. It must
be a float and can be in
seconds, hours, days, etc.
bl_labels [Numpy structured array tuples] Labels of antennas in the pair
used to produce the baseline vector under fields 'A2' and 'A1'
for second and first antenna respectively. The baseline vector
is obtained by position of antennas under 'A2' minus position
of antennas under 'A1'
freq_index [None, int, list or numpy array] Index (scalar) or indices
(list or numpy array) along the frequency axis at which gains
are to be extracted. If set to None, gains at all frequencies
in the gain table will be extracted.
time_index [None, int, list or numpy array] Index (scalar) or indices
(list or numpy array) along the time axis at which gains
are to be extracted. If set to None, gains at all timesin the
gain table will be extracted.
axes_order [None or list or numpy array] Axes ordering for extracted
gains. It must contain the three elements 'label',
'frequency', and 'time'. If set to None, it will be returned
in the same order as in the input gaintable.
Outputs:
[numpy array] Complex gains of shape nbl x nchan x nts for the specified
baselines, frequencies and times.
---------------------------------------------------------------------------
"""
try:
gaintable, bl_labels
except NameError:
raise NameError('Inputs gaintable and bl_labels must be specified')
blgains = NP.asarray(1.0).reshape(1,1,1)
if gaintable is not None:
a1_labels = bl_labels['A1']
a2_labels = bl_labels['A2']
for gainkey in ['antenna-based', 'baseline-based']:
if gainkey in gaintable:
temp_axes_order = ['label', 'frequency', 'time']
inp_order = gaintable[gainkey]['ordering']
temp_transpose_order = NMO.find_list_in_list(inp_order, temp_axes_order)
if NP.all(inp_order == temp_axes_order):
gains = NP.copy(gaintable[gainkey]['gains'])
else:
gains = NP.transpose(NP.copy(gaintable[gainkey]['gains']), axes=temp_transpose_order)
if freq_index is None:
freq_index = NP.arange(gains.shape[1])
elif isinstance(freq_index, (int,list,NP.ndarray)):
freq_index = NP.asarray(freq_index).ravel()
if NP.any(freq_index >= gains.shape[1]):
raise IndexError('Input freq_index cannot exceed the frequency dimensions in the gain table')
if time_index is None:
time_index = NP.arange(gains.shape[2])
elif isinstance(time_index, (int,list,NP.ndarray)):
time_index = NP.asarray(time_index).ravel()
if NP.any(time_index >= gains.shape[2]):
raise IndexError('Input time_index cannot exceed the time dimensions in the gain table')
if gains.shape[0] == 1:
blgains = blgains * gains[:,freq_index,time_index].reshape(1,freq_index.size,time_index.size)
else:
labels = gaintable[gainkey]['label']
if gainkey == 'antenna-based':
ind1 = NMO.find_list_in_list(labels, a1_labels)
ind2 = NMO.find_list_in_list(labels, a2_labels)
if NP.sum(ind1.mask) > 0:
raise IndexError('Some antenna gains could not be found')
if NP.sum(ind2.mask) > 0:
raise IndexError('Some antenna gains could not be found')
blgains = blgains * gains[NP.ix_(ind2,freq_index,time_index)].reshape(ind2.size,freq_index.size,time_index.size) * gains[NP.ix_(ind1,freq_index,time_index)].conj().reshape(ind1.size,freq_index.size,time_index.size)
else:
labels_conj = [tuple(reversed(label)) for label in labels]
labels_conj = NP.asarray(labels_conj, dtype=labels.dtype)
labels_conj_appended = NP.concatenate((labels, labels_conj), axis=0)
gains_conj_appended = NP.concatenate((gains, gains.conj()), axis=0)
ind = NMO.find_list_in_list(labels_conj_appended, bl_labels)
selected_gains = gains_conj_appended[NP.ix_(ind.compressed(),freq_index,time_index)]
if ind.compressed().size == 1:
selected_gains = selected_gains.reshape(NP.sum(~ind.mask),freq_index.size,time_index.size)
blgains[~ind.mask, ...] = blgains[~ind.mask, ...] * selected_gains
if axes_order is None:
axes_order = inp_order
elif not isinstance(axes_order, (list, NP.ndarray)):
raise TypeError('axes_order must be a list')
else:
if len(axes_order) != 3:
raise ValueError('axes_order must be a three element list')
for orderkey in ['label', 'frequency', 'time']:
if orderkey not in axes_order:
raise ValueError('axes_order does not contain key "{0}"'.format(orderkey))
transpose_order = NMO.find_list_in_list(inp_order, axes_order)
blgains = NP.transpose(blgains, axes=transpose_order)
return blgains
################################################################################
def hexagon_generator(spacing, n_total=None, n_side=None, orientation=None,
center=None):
"""
------------------------------------------------------------------------
Generate a grid of baseline locations filling a regular hexagon.
Primarily intended for HERA experiment.
Inputs:
spacing [scalar] positive scalar specifying the spacing between
antennas. Must be specified, no default.
n_total [scalar] positive integer specifying the total number of
antennas to be placed in the hexagonal array. This value
will be checked if it valid for a regular hexagon. If
n_total is specified, n_side must not be specified.
Default = None.
n_side [scalar] positive integer specifying the number of antennas
on the side of the hexagonal array. If n_side is specified,
n_total should not be specified. Default = None
orientation [scalar] counter-clockwise angle (in degrees) by which the
principal axis of the hexagonal array is to be rotated.
Default = None (means 0 degrees)
center [2-element list or numpy array] specifies the center of the
array. Must be in the same units as spacing. The hexagonal
array will be centered on this position.
Outputs:
Two element tuple with these elements in the following order:
xy [2-column array] x- and y-locations. x is in the first
column, y is in the second column. Number of xy-locations
is equal to the number of rows which is equal to n_total
id [numpy array of string] unique antenna identifier. Numbers
from 0 to n_antennas-1 in string format.
Notes:
If n_side is the number of antennas on the side of the hexagon, then
n_total = 3*n_side**2 - 3*n_side + 1
------------------------------------------------------------------------
"""
try:
spacing
except NameError:
raise NameError('No spacing provided.')
if not isinstance(spacing, (int, float)):
raise TypeError('spacing must be scalar value')
if spacing <= 0:
raise ValueError('spacing must be positive')
if orientation is not None:
if not isinstance(orientation, (int,float)):
raise TypeError('orientation must be a scalar')
if center is not None:
if not isinstance(center, (list, NP.ndarray)):
raise TypeError('center must be a list or numpy array')
center = NP.asarray(center)
if center.size != 2:
raise ValueError('center should be a 2-element vector')
center = center.reshape(1,-1)
if (n_total is None) and (n_side is None):
raise NameError('n_total or n_side must be provided')
elif (n_total is not None) and (n_side is not None):
raise ValueError('Only one of n_total or n_side must be specified.')
elif n_total is not None:
if not isinstance(n_total, int):
raise TypeError('n_total must be an integer')
if n_total <= 0:
raise ValueError('n_total must be positive')
else:
if not isinstance(n_side, int):
raise TypeError('n_side must be an integer')
if n_side <= 0:
raise ValueError('n_side must be positive')
if n_total is not None:
sqroots = NP.roots([3.0, -3.0, 1.0-n_total])
valid_ind = NP.logical_and(sqroots.real >= 1, sqroots.imag == 0.0)
if NP.any(valid_ind):
sqroot = sqroots[valid_ind]
else:
raise ValueError('No valid root found for the quadratic equation with the specified n_total')
n_side = NP.round(sqroot).astype(NP.int)
if (3*n_side**2 - 3*n_side + 1 != n_total):
raise ValueError('n_total is not a valid number for a hexagonal array')
else:
n_total = 3*n_side**2 - 3*n_side + 1
xref = NP.arange(2*n_side-1, dtype=NP.float)
xloc, yloc = [], []
for i in range(1,n_side):
x = xref[:-i] + i * NP.cos(NP.pi/3) # Select one less antenna each time and displace
y = i*NP.sin(NP.pi/3) * NP.ones(2*n_side-1-i)
xloc += x.tolist() * 2 # Two lists, one for the top and the other for the bottom
yloc += y.tolist() # y-locations of the top list
yloc += (-y).tolist() # y-locations of the bottom list
xloc += xref.tolist() # Add the x-locations of central line of antennas
yloc += [0.0] * int(2*n_side-1) # Add the y-locations of central line of antennas
if len(xloc) != len(yloc):
raise ValueError('Sizes of x- and y-locations do not agree')
xy = zip(xloc, yloc)
if len(xy) != n_total:
raise ValueError('Sizes of x- and y-locations do not agree with n_total')
xy = NP.asarray(xy)
xy = xy - NP.mean(xy, axis=0, keepdims=True) # Shift the center to origin
if orientation is not None: # Perform any rotation
angle = NP.radians(orientation)
rot_matrix = NP.asarray([[NP.cos(angle), -NP.sin(angle)],
[NP.sin(angle), NP.cos(angle)]])
xy = NP.dot(xy, rot_matrix.T)
xy *= spacing # Scale by the spacing
if center is not None: # Shift the center
xy += center
return (NP.asarray(xy), map(str, range(n_total)))
################################################################################
def rectangle_generator(spacing, n_side, orientation=None, center=None):
"""
------------------------------------------------------------------------
Generate a grid of baseline locations filling a rectangular array.
Primarily intended for HIRAX, CHIME and PAPER experiments
Inputs:
spacing [2-element list or numpy array] positive integers specifying
the spacing between antennas. Must be specified, no default.
n_side [2-element list or numpy array] positive integers specifying
the number of antennas on each side of the rectangular array.
Atleast one value should be specified, no default.
orientation [scalar] counter-clockwise angle (in degrees) by which the
principal axis of the rectangular array is to be rotated.
Default = None (means 0 degrees)
center [2-element list or numpy array] specifies the center of the
array. Must be in the same units as spacing. The rectangular
array will be centered on this position.
Outputs:
Two element tuple with these elements in the following order:
xy [2-column array] x- and y-locations. x is in the first
column, y is in the second column. Number of xy-locations
is equal to the number of rows which is equal to n_total
id [numpy array of string] unique antenna identifier. Numbers
from 0 to n_antennas-1 in string format.
Notes:
------------------------------------------------------------------------
"""
try:
spacing
except NameError:
raise NameError('No spacing provided.')
if spacing is not None:
if not isinstance(spacing, (int, float, list, NP.ndarray)):
raise TypeError('spacing must be a scalar or list/numpy array')
spacing = NP.asarray(spacing)
if spacing.size < 2:
spacing = NP.resize(spacing,(1,2))
if NP.all(NP.less_equal(spacing,NP.zeros((1,2)))):
raise ValueError('spacing must be positive')
if orientation is not None:
if not isinstance(orientation, (int,float)):
raise TypeError('orientation must be a scalar')
if center is not None:
if not isinstance(center, (list, NP.ndarray)):
raise TypeError('center must be a list or numpy array')
center = NP.asarray(center)
if center.size != 2:
raise ValueError('center should be a 2-element vector')
center = center.reshape(1,-1)
if n_side is None:
raise NameError('Atleast one value of n_side must be provided')
else:
if not isinstance(n_side, (int, float, list, NP.ndarray)):
raise TypeError('n_side must be a scalar or list/numpy array')
n_side = NP.asarray(n_side)
if n_side.size < 2:
n_side = NP.resize(n_side,(1,2))
if NP.all(NP.less_equal(n_side,NP.zeros((1,2)))):
raise ValueError('n_side must be positive')
n_total = NP.prod(n_side, dtype=NP.uint8)
xn,yn = NP.hsplit(n_side,2)
xn = NP.asscalar(xn)
yn = NP.asscalar(yn)
xs,ys = NP.hsplit(spacing,2)
xs = NP.asscalar(xs)
ys = NP.asscalar(ys)
n_total = xn*yn
x = NP.linspace(0, xn-1, xn)
x = x - NP.mean(x)
x = x*xs
y = NP.linspace(0, yn-1, yn)
y = y - NP.mean(y)
y = y*ys
xv, yv = NP.meshgrid(x,y)
xy = NP.hstack((xv.reshape(-1,1),yv.reshape(-1,1)))
if len(xy) != n_total:
raise ValueError('Sizes of x- and y-locations do not agree with n_total')
if orientation is not None: # Perform any rotation
angle = NP.radians(orientation)
rot_matrix = NP.asarray([[NP.cos(angle), -NP.sin(angle)], [NP.sin(angle), NP.cos(angle)]])
xy = NP.dot(xy, rot_matrix.T)
if center is not None: # Shift the center
xy += center
return (NP.asarray(xy), map(str, range(n_total)))
################################################################################
def circular_antenna_array(antsize, minR, maxR=None):
"""
---------------------------------------------------------------------------
Create antenna layout in a circular ring of minimum and maximum radius with
antennas of a given size
Inputs:
antsize [scalar] Antenna size. Critical to determining number of antenna
elements that can be placed on a circle. No default.
minR [scalar] Minimum radius of the circular ring. Must be in same
units as antsize. No default. Must be greater than 0.5*antsize.
maxR [scalar] Maximum radius of circular ring. Must be >= minR.
Default=None means maxR is set equal to minR.
Outputs:
xy [2-column numpy array] Antenna locations in the same units as
antsize returned as a 2-column numpy array where the number of
rows equals the number of antenna locations generated and x,
and y locations make the two columns.
---------------------------------------------------------------------------
"""
try:
antsize, minR
except NameError:
raise NameError('antsize, and minR must be specified')
if (antsize is None) or (minR is None):
raise ValueError('antsize and minR cannot be NoneType')
if not isinstance(antsize, (int, float)):
raise TypeError('antsize must be a scalar')
if antsize <= 0.0:
raise ValueError('antsize must be positive')
if not isinstance(minR, (int, float)):
raise TypeError('minR must be a scalar')
if minR <= 0.0:
raise ValueError('minR must be positive')
if minR < 0.5*antsize:
minR = 0.5*antsize
if maxR is None:
maxR = minR
if not isinstance(maxR, (int, float)):
raise TypeError('maxR must be a scalar')
elif maxR < minR:
maxR = minR
if maxR - minR < antsize:
radii = minR + NP.zeros(1)
else:
radii = minR + antsize * NP.arange((maxR-minR)/antsize)
nants = 2 * NP.pi * radii / antsize
nants = nants.astype(NP.int)
x = [(radii[i] * NP.cos(2*NP.pi*NP.arange(nants[i])/nants[i])).tolist() for i in range(radii.size)]
y = [(radii[i] * NP.sin(2*NP.pi*NP.arange(nants[i])/nants[i])).tolist() for i in range(radii.size)]
xpos = [xi for sublist in x for xi in sublist]
ypos = [yi for sublist in y for yi in sublist]
x = NP.asarray(xpos)
y = NP.asarray(ypos)
xy = NP.hstack((x.reshape(-1,1), y.reshape(-1,1)))
return (xy, map(str, range(NP.sum(nants))))
################################################################################
def baseline_generator(antenna_locations, ant_label=None, ant_id=None,
auto=False, conjugate=False):
"""
---------------------------------------------------------------------------
Generate baseline from antenna locations.
Inputs:
antenna_locations: List of tuples containing antenna coordinates,
or list of instances of class Point containing
antenna coordinates, or Numpy array (Nx3) array
with each row specifying an antenna location.
Input keywords:
ant_label [list of strings] Unique string identifier for each
antenna. Default = None. If None provided,
antennas will be indexed by an integer starting
from 0 to N(ants)-1
ant_id [list of integers] Unique integer identifier for each
antenna. Default = None. If None provided,
antennas will be indexed by an integer starting
from 0 to N(ants)-1
auto: [Default=False] If True, compute zero spacings of
antennas with themselves.
conjugate: [Default=False] If True, compute conjugate
baselines.
Output:
baseline_locations: Baseline locations in the same data type as
antenna locations (list of tuples, list of
instances of class Point or Numpy array of size
Nb x 3 with each row specifying one baseline
vector)
antpair_labels [Numpy structured array tuples] Labels of
antennas in the pair used to produce the
baseline vector under fields 'A2' and 'A1' for
second and first antenna respectively. The
baseline vector is obtained by position of
antennas under 'A2' minus position of antennas
under 'A1'
antpair_ids [Numpy structured array tuples] IDs of antennas
in the pair used to produce the baseline vector
under fields 'A2' and 'A1' for second and first
antenna respectively. The baseline vector is
obtained by position of antennas under 'A2'
minus position of antennas under 'A1'
-------------------------------------------------------------------
"""
try:
antenna_locations
except NameError:
warnings.warn('No antenna locations supplied. Returning from baseline_generator()')
return None
inp_type = 'tbd'
if not isinstance(antenna_locations, NP.ndarray):
if isinstance(antenna_locations, list):
if isinstance(antenna_locations[0], GEOM.Point):
inp_type = 'loo' # list of objects
elif isinstance(antenna_locations[0], tuple):
inp_type = 'lot' # list of tuples
antenna_locations = [(tuple(loc) if len(loc) == 3 else (tuple([loc[0],0.0,0.0]) if len(loc) == 1 else (tuple([loc[0],loc[1],0.0]) if len(loc) == 2 else (tuple([loc[0],loc[1],loc[2]]))))) for loc in antenna_locations if len(loc) != 0] # Remove empty tuples and validate the data range and data type for antenna locations. Force it to have three components for every antenna location.
elif isinstance(antenna_locations, GEOM.Point):
if not auto:
warnings.warn('No non-zero spacings found since auto=False.')
return None
else:
return GEOM.Point()
elif isinstance(antenna_locations, tuple):
if not auto:
warnings.warn('No non-zero spacings found since auto=False.')
return None
else:
return (0.0,0.0,0.0)
else:
if not auto:
warnings.warn('No non-zero spacings found since auto=False.')
return None
else:
return (0.0,0.0,0.0)
else:
inp_type = 'npa' # A numpy array
if antenna_locations.shape[0] == 1:
if not auto:
warnings.warn('No non-zero spacings found since auto=False.')
return None
else:
return NP.zeros(1,3)
else:
if antenna_locations.shape[1] > 3:
antenna_locations = antenna_locations[:,:3]
elif antenna_locations.shape[1] < 3:
antenna_locations = NP.hstack((antenna_locations, NP.zeros((antenna_locations.shape[0],3-antenna_locations.shape[1]))))
if isinstance(antenna_locations, list):
num_ants = len(antenna_locations)
else:
num_ants = antenna_locations.shape[0]
if ant_label is not None:
if isinstance(ant_label, list):
if len(ant_label) != num_ants:
raise ValueError('Dimensions of ant_label and antenna_locations do not match.')
elif isinstance(ant_label, NP.ndarray):
if ant_label.size != num_ants:
raise ValueError('Dimensions of ant_label and antenna_locations do not match.')
ant_label = ant_label.tolist()
else:
ant_label = ['{0:0d}'.format(i) for i in xrange(num_ants)]
if ant_id is not None:
if isinstance(ant_id, list):
if len(ant_id) != num_ants:
raise ValueError('Dimensions of ant_id and antenna_locations do not match.')
elif isinstance(ant_id, NP.ndarray):
if ant_id.size != num_ants:
raise ValueError('Dimensions of ant_id and antenna_locations do not match.')
ant_id = ant_id.tolist()
else:
ant_id = range(num_ants)
if inp_type == 'loo': # List of objects
if auto:
baseline_locations = [antenna_locations[j]-antenna_locations[i] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j >= i]
# antpair_labels = [ant_label[j]+'-'+ant_label[i] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j >= i]
antpair_labels = [(ant_label[j], ant_label[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j >= i]
antpair_ids = [(ant_id[j], ant_id[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j >= i]
else:
baseline_locations = [antenna_locations[j]-antenna_locations[i] for i in range(0,num_ants) for j in range(0,num_ants) if j > i]
# antpair_labels = [ant_label[j]+'-'+ant_label[i] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j > i]
antpair_labels = [(ant_label[j], ant_label[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j > i]
antpair_ids = [(ant_id[j], ant_id[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j > i]
if conjugate:
baseline_locations += [antenna_locations[j]-antenna_locations[i] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j < i]
# antpair_labels += [ant_label[j]+'-'+ant_label[i] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j < i]
antpair_labels += [(ant_label[j], ant_label[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j < i]
antpair_ids += [(ant_id[j], ant_id[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j < i]
elif inp_type == 'lot': # List of tuples
if auto:
baseline_locations = [tuple((antenna_locations[j][0]-antenna_locations[i][0], antenna_locations[j][1]-antenna_locations[i][1], antenna_locations[j][2]-antenna_locations[i][2])) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j >= i]
# antpair_labels = [ant_label[j]+'-'+ant_label[i] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j >= i]
antpair_labels = [(ant_label[j], ant_label[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j >= i]
antpair_ids = [(ant_id[j], ant_id[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j >= i]
else:
baseline_locations = [tuple((antenna_locations[j][0]-antenna_locations[i][0], antenna_locations[j][1]-antenna_locations[i][1], antenna_locations[j][2]-antenna_locations[i][2])) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j > i]
# antpair_labels = [ant_label[j]+'-'+ant_label[i] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j > i]
antpair_labels = [(ant_label[j], ant_label[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j > i]
antpair_ids = [(ant_id[j], ant_id[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j > i]
if conjugate:
baseline_locations += [tuple((antenna_locations[j][0]-antenna_locations[i][0], antenna_locations[j][1]-antenna_locations[i][1], antenna_locations[j][2]-antenna_locations[i][2])) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j < i]
# antpair_labels += [ant_label[j]+'-'+ant_label[i] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j < i]
antpair_labels += [(ant_label[j], ant_label[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j < i]
antpair_ids += [(ant_id[j], ant_id[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j < i]
elif inp_type == 'npa': # Numpy array
if auto:
baseline_locations = [antenna_locations[j,:]-antenna_locations[i,:] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j >= i]
# antpair_labels = [ant_label[j]+'-'+ant_label[i] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j >= i]
antpair_labels = [(ant_label[j], ant_label[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j >= i]
antpair_ids = [(ant_id[j], ant_id[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j >= i]
else:
baseline_locations = [antenna_locations[j,:]-antenna_locations[i,:] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j > i]
# antpair_labels = [ant_label[j]+'-'+ant_label[i] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j > i]
antpair_labels = [(ant_label[j], ant_label[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j > i]
antpair_ids = [(ant_id[j], ant_id[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j > i]
if conjugate:
baseline_locations += [antenna_locations[j,:]-antenna_locations[i,:] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j < i]
# antpair_labels += [ant_label[j]+'-'+ant_label[i] for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j < i]
antpair_labels += [(ant_label[j], ant_label[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j < i]
antpair_ids += [(ant_id[j], ant_id[i]) for i in xrange(0,num_ants) for j in xrange(0,num_ants) if j < i]
baseline_locations = NP.asarray(baseline_locations)
maxlen = max(len(albl) for albl in ant_label)
antpair_labels = NP.asarray(antpair_labels, dtype=[('A2', '|S{0:0d}'.format(maxlen)), ('A1', '|S{0:0d}'.format(maxlen))])
antpair_ids = NP.asarray(antpair_ids, dtype=[('A2', int), ('A1', int)])
return baseline_locations, antpair_labels, antpair_ids
#################################################################################
def uniq_baselines(baseline_locations, redundant=None):
"""
---------------------------------------------------------------------------
Identify unique, redundant or non-redundant baselines from a given set of
baseline locations.
Inputs:
baseline_locations [2- or 3-column numpy array] Each row of the array
specifies a baseline vector from which the required
set of baselines have to be identified
redundant [None or boolean] If set to None (default), all the
unique baselines including redundant and non-redundant
baselines are returned. If set to True, only redundant
baselines that occur more than once are returned. If set
to False, only non-redundant baselines that occur
exactly once are returned.
Output:
4-element tuple with the selected baselines, their unique indices in the
input, their count and the indices of all occurences of each unique
baseline. The first element of this tuple is a 3-column numpy array
which is a subset of baseline_locations containing the requested type of
baselines. The second element of the tuple contains the selected indices
of the input array from which the first element in the tuple is determined
relative to the input array. The third element of the tuple contains the
count of these selected baselines. In case of redundant and unique
baselines, the order of repeated baselines does not matter and any one of
those baselines could be returned without preserving the order. The fourth
element in the tuple contains a list of lists where each element in the
top level list corresponds to a unique baseline and consists of indices
of all occurrences of input baselines redundant with this unique baseline
---------------------------------------------------------------------------
"""
try:
baseline_locations
except NameError:
raise NameError('baseline_locations not provided')
if not isinstance(baseline_locations, NP.ndarray):
raise TypeError('baseline_locations must be a numpy array')
if redundant is not None:
if not isinstance(redundant, bool):
raise TypeError('keyword "redundant" must be set to None or a boolean value')
blshape = baseline_locations.shape
if blshape[1] > 3:
baseline_locations = baseline_locations[:,:3]
elif blshape[1] < 3:
baseline_locations = NP.hstack((baseline_locations, NP.zeros((blshape[0],3-blshape[1]))))
blo = NP.angle(baseline_locations[:,0] + 1j * baseline_locations[:,1], deg=True)
blo[blo >= 180.0] -= 180.0
blo[blo < 0.0] += 180.0
bll = NP.sqrt(NP.sum(baseline_locations**2, axis=1))
blza = NP.degrees(NP.arccos(baseline_locations[:,2] / bll))
blstr = ['{0[0]:.2f}_{0[1]:.3f}_{0[2]:.3f}'.format(lo) for lo in zip(bll,3.6e3*blza,3.6e3*blo)]
uniq_blstr, ind, invind = NP.unique(blstr, return_index=True, return_inverse=True) ## if numpy.__version__ < 1.9.0
# uniq_blstr, ind, invind, frequency = NP.unique(blstr, return_index=True, return_inverse=True, return_counts=True) ## if numpy.__version__ >= 1.9.0
count_blstr = [(ubstr,blstr.count(ubstr)) for ubstr in uniq_blstr] ## if numpy.__version__ < 1.9.0
if redundant is None:
retind = NP.copy(ind)
counts = [tup[1] for tup in count_blstr]
counts = NP.asarray(counts)
else:
if not redundant:
## if numpy.__version__ < 1.9.0
non_redn_ind = [i for i,tup in enumerate(count_blstr) if tup[1] == 1]
retind = ind[NP.asarray(non_redn_ind)]
counts = NP.ones(retind.size)
else:
## if numpy.__version__ < 1.9.0
redn_ind_counts = [(i,tup[1]) for i,tup in enumerate(count_blstr) if tup[1] > 1]
redn_ind, counts = zip(*redn_ind_counts)
retind = ind[NP.asarray(redn_ind)]
counts = NP.asarray(counts)
allinds_where_found = NMO.find_all_occurrences_list1_in_list2(invind[retind], invind)
return (baseline_locations[retind,:], retind, counts, allinds_where_found)
#################################################################################
def getBaselineInfo(inpdict):
"""
---------------------------------------------------------------------------
Generate full baseline info from a given layout and return information
about redundancy and the mapping between unique and redundant baselines
Input:
inpdict [dictionary] It contains the following keys and values:
'array' [dictionary] It contains the following keys and values:
'redundant' [boolean] If this key is present, it says
whether the array could be redundant (true)
or not (false). If key is absent, this
value is assumed to be true. When it is set
to true, it basically checks for redundancy
otherwise not. It is not meant to say if
the array is actually redundant or not but
only used for redundancy check to happen or
not
'layout' [string] Preset array layouts mutually
exclusive to antenna file. Only one of
these must be specified. Accepted
values are 'MWA-I-128T'
(MWA Phase I 128-tile),
'MWA-II-Hex-LB' (MWA Phase II Hex
and Long Baselines),
'MWA-II-compact' (MWA Phase II
compact=core + 2Hex baselines),
'MWA-II-LB' (MWA Phase II Long
Baselines), 'HERA-7', 'HERA-19',
'HERA-37', 'HERA-61', 'HERA-91',
'HERA-127', 'HERA-169', 'HERA-217',
'HERA-271', 'HERA-331', 'PAPER-64',
'PAPER-112', 'HIRAX-1024', 'CHIME', 'GMRT',
'CIRC', or None (if layout file is
specified).
'file' [string] File containing antenna locations
parsed according to info in parser (see
below). If preset layout is specified, this
must be set to None.
'filepathtype'
[string] Accepted values are 'default' (if
layout file can be found in prisim path,
namely, prisim/data/array_layouts folder)
and 'custom'. If set to 'default', only
filename should be specified in file and it
will be searched in the default
array_layouts folder
prisim/data/array_layouts.
If set to 'custom' then the full path
to the file must be specified.
'parser' [dictionary] Will be used for parsing the
file if file is specified for array layout.
It contains the following keys and values:
'comment' [string] Character used to
denote commented lines to be
ignored. Default=None ('#')
'delimiter' [string] Delimiter string.
Accepted values are whitespace
(default or None), ',' and '|'
'data_strart'
[integer] Line index for the
start of data not counting
comment or blank lines. A line
with only whitespace is
considered blank. It is
required. No defaults.
Indexing starts from 0
'data_end' [integer] Line index for the end
of data not counting comment or
blank lines. This value can be
negative to count from the end.
Default is None (all the way to
end of file). Indexing starts
from 0.
'header_start'
[integer] Line index for the
header line not counting comment
or blank lines. A line with only
whitespace is considered blank.
Must be provided. No defaults
'label' [string] String in the header
containing antenna labels. If
set to None (default), antenna
labels will be automatically
assigned. e.g. of some accepted
values are None, 'label', 'id',
'antid', etc. This must be found
in the header
'east' [string] String specifying East
coordinates in the header and
data. Must be provided. No
defaults.
'north' [string] String specifying North
coordinates in the header and
data. Must be provided. No
defaults.
'up' [string] String specifying
elevation coordinates in the
header and data. Must be
provided. No defaults.
'minR' [string] Minimum radius of circular ring.
Applies only when layout = 'CIRC'
'maxR' [string] Maximum radius of circular ring.
Applies only when layout = 'CIRC'
'rms_tgtplane'
[float] Perturbation of antenna positions
(in m) in tangent plane. Default=0.0
'rms_elevation'
[float] Perturbation of antenna positions
(in m) in perpendicular to tangent plane.
Default=0.0
'seed' [integer] Random number seed for antenna
position perturbations. Default=None means
no fixed seed
'baseline' [dictionary] Parameters specifying baseline
selection criteria. It consists of the following keys
and values:
'min' [float] Minimum baseline in distance
units (m). Default=None (0.0)
'max' [float] Maximum baseline in distance
units (m). Default=None (max baseline)
'direction' [string] Baseline vector directions to
select. Default=None (all directions).
Other accepted values are 'E' (east)
'SE' (south-east), 'NE' (north-east),
and 'N' (north). Multiple values from
this accepted list can be specified
as a list of strings. e.g., ['N', 'E'],
['NE', 'SE', 'E'], ['SE', 'E', 'NE', 'N']
which is equivalent to None, etc.
'skyparm' [dictionary] Sky model specification. It contains the
following keys and values:
'model' [string] Sky model. Accepted values
are 'csm' (NVSS+SUMSS point sources),
'dsm' (diffuse emission), 'asm' (both
point sources and diffuse emission),
'sumss' (SUMSS catalog), nvss (NVSS
catalog), 'mss' (Molonglo Sky Survey),
'gleam' (GLEAM catalog), 'custom'
(user-defined catalog), 'usm' (uniform
sky model), 'mwacs' (MWACS catalog),
'HI_monopole' (global EoR), HI_cube (HI
cube from external simulations), and
'HI_fluctuations' (HI fluctuations with
the global mean signal removed). If set
'HI_monopole' or 'monopole' the orientation
of the baseline vector does not matter
and only unique baseline lengths will be
selected if value under 'redundant' key is
set to True.
Output:
Dictionary containing the following keys and values.
'bl' [numpy array] Baseline vectors (unique ones or all depending on
value in key 'redundant'). It is of shape nbl x 3 and will
consist of unique baselines if value under key 'redundant' was
set to True. Otherwise, redundancy will not be checked and all
baselines will be returned.
'label' [numpy recarray] A unique label of each of the baselines.
Shape is nbl where each element is a recarray under fields 'A1'
(first antenna label) and 'A2' (second antenna label)
'id' [numpy recarray] A unique identifier of each of the baselines.
Shape is nbl where each element is a recarray under fields 'A1'
(first antenna id) and 'A2' (second antenna id)
'redundancy'
[boolean] If the array was originally found to be made of unique
baselines (False) or redundant baselines were found (True). Even
if set to False, the baselines may still be redundant because
redundancy may never have been checked if value under key
'redundant' was set to False
'groups'
[dictionary] Contains the grouping of unique baselines and the
redundant baselines as numpy recarray under each unique baseline
category/flavor. It contains as keys the labels (tuple of A1, A2)
of unique baselines and the value under each of these keys is a
list of baseline labels that are redundant under that category
'reversemap'
[dictionary] Contains the baseline category for each baseline.
The keys are baseline labels as tuple and the value under each
key is the label of the unique baseline category that it falls
under.
'layout_info'
[dictionary] Contains the antenna layout information with the
following keys and values:
'positions' [numpy array] Antenna locations with shape nant x 3
'labels' [numpy array of strings] Antenna labels of size nant
'ids' [numpy array of strings] Antenna IDs of size nant
'coords' [string] Coordinate system in which antenna locations
are specified. Currently only returns 'ENU' for East-
North-Up coordinate system
---------------------------------------------------------------------------
"""
try:
inpdict
except NameError:
raise NameError('Input inpdict must be specified')
if not isinstance(inpdict, dict):
raise TypeError('Input inpdict must be a dictionary')
if 'array' in inpdict:
if 'redundant' in inpdict['array']:
array_is_redundant = inpdict['array']['redundant']
else:
array_is_redundant = True
else:
raise KeyError('Key "array" not found in input inpdict')
sky_str = inpdict['skyparm']['model']
use_HI_monopole = False
if sky_str == 'HI_monopole':
use_HI_monopole = True
antenna_file = inpdict['array']['file']
array_layout = inpdict['array']['layout']
minR = inpdict['array']['minR']
maxR = inpdict['array']['maxR']
antpos_rms_tgtplane = inpdict['array']['rms_tgtplane']
antpos_rms_elevation = inpdict['array']['rms_elevation']
antpos_rms_seed = inpdict['array']['seed']
if antpos_rms_seed is None:
antpos_rms_seed = NP.random.randint(1, high=100000)
elif isinstance(antpos_rms_seed, (int,float)):
antpos_rms_seed = int(NP.abs(antpos_rms_seed))
else:
raise ValueError('Random number seed must be a positive integer')
minbl = inpdict['baseline']['min']
maxbl = inpdict['baseline']['max']
bldirection = inpdict['baseline']['direction']
if (antenna_file is None) and (array_layout is None):
raise ValueError('One of antenna array file or layout must be specified')
if (antenna_file is not None) and (array_layout is not None):
raise ValueError('Only one of antenna array file or layout must be specified')
if antenna_file is not None:
if not isinstance(antenna_file, str):
raise TypeError('Filename containing antenna array elements must be a string')
if inpdict['array']['filepathtype'] == 'default':
antenna_file = prisim_path+'data/array_layouts/'+antenna_file
antfile_parser = inpdict['array']['parser']
if 'comment' in antfile_parser:
comment = antfile_parser['comment']
if comment is None:
comment = '#'
elif not isinstance(comment, str):
raise TypeError('Comment expression must be a string')
else:
comment = '#'
if 'delimiter' in antfile_parser:
delimiter = antfile_parser['delimiter']
if delimiter is not None:
if not isinstance(delimiter, str):
raise TypeError('Delimiter expression must be a string')
else:
delimiter = ' '
else:
delimiter = ' '
if 'data_start' in antfile_parser:
data_start = antfile_parser['data_start']
if not isinstance(data_start, int):
raise TypeError('data_start parameter must be an integer')
else:
raise KeyError('data_start parameter not provided')
if 'data_end' in antfile_parser:
data_end = antfile_parser['data_end']
if data_end is not None:
if not isinstance(data_end, int):
raise TypeError('data_end parameter must be an integer')
else:
data_end = None
if 'header_start' in antfile_parser:
header_start = antfile_parser['header_start']
if not isinstance(header_start, int):
raise TypeError('header_start parameter must be an integer')
else:
raise KeyError('header_start parameter not provided')
if 'label' not in antfile_parser:
antfile_parser['label'] = None
elif antfile_parser['label'] is not None:
antfile_parser['label'] = str(antfile_parser['label'])
if 'east' not in antfile_parser:
raise KeyError('Keyword for "east" coordinates not provided')
else:
if not isinstance(antfile_parser['east'], str):
raise TypeError('Keyword for "east" coordinates must be a string')
if 'north' not in antfile_parser:
raise KeyError('Keyword for "north" coordinates not provided')
else:
if not isinstance(antfile_parser['north'], str):
raise TypeError('Keyword for "north" coordinates must be a string')
if 'up' not in antfile_parser:
raise KeyError('Keyword for "up" coordinates not provided')
else:
if not isinstance(antfile_parser['up'], str):
raise TypeError('Keyword for "up" coordinates must be a string')
try:
ant_info = ascii.read(antenna_file, comment=comment, delimiter=delimiter, header_start=header_start, data_start=data_start, data_end=data_end, guess=False)
except IOError:
raise IOError('Could not open file containing antenna locations.')
if (antfile_parser['east'] not in ant_info.colnames) or (antfile_parser['north'] not in ant_info.colnames) or (antfile_parser['up'] not in ant_info.colnames):
raise KeyError('One of east, north, up coordinates incompatible with the table in antenna_file')
if antfile_parser['label'] is not None:
ant_label = ant_info[antfile_parser['label']].data.astype('str')
else:
ant_label = NP.arange(len(ant_info)).astype('str')
east = ant_info[antfile_parser['east']].data
north = ant_info[antfile_parser['north']].data
elev = ant_info[antfile_parser['up']].data
if (east.dtype != NP.float) or (north.dtype != NP.float) or (elev.dtype != NP.float):
raise TypeError('Antenna locations must be of floating point type')
ant_locs = NP.hstack((east.reshape(-1,1), north.reshape(-1,1), elev.reshape(-1,1)))
else:
if array_layout not in ['MWA-I-128T', 'MWA-II-Hex-LB', 'MWA-II-compact', 'MWA-II-LB', 'HERA-7', 'HERA-19', 'HERA-37', 'HERA-61', 'HERA-91', 'HERA-127', 'HERA-169', 'HERA-217', 'HERA-271', 'HERA-331', 'PAPER-64', 'PAPER-112', 'HIRAX-1024', 'CHIME', 'GMRT', 'CIRC']:
raise ValueError('Invalid array layout specified')
if array_layout in ['MWA-I-128T', 'MWA-II-Hex-LB', 'MWA-II-compact', 'MWA-II-LB']:
comment = '#'
delimiter = ' '
header_start = 0
data_start = 2
data_end = None
antfile = array_layout + '_tile_coordinates.txt'
ant_info = ascii.read(prisim_path+'data/array_layouts/'+antfile, comment=comment, delimiter=delimiter, header_start=header_start, data_start=data_start, data_end=data_end, guess=False)
ant_label = ant_info['Tile'].data.astype('str')
east = ant_info['East'].data
north = ant_info['North'].data
elev = ant_info['Height'].data
ant_locs = NP.hstack((east.reshape(-1,1), north.reshape(-1,1), elev.reshape(-1,1)))
elif array_layout == 'HERA-7':
ant_locs, ant_label = hexagon_generator(14.6, n_total=7)
elif array_layout == 'HERA-19':
ant_locs, ant_label = hexagon_generator(14.6, n_total=19)
elif array_layout == 'HERA-37':
ant_locs, ant_label = hexagon_generator(14.6, n_total=37)
elif array_layout == 'HERA-61':
ant_locs, ant_label = hexagon_generator(14.6, n_total=61)
elif array_layout == 'HERA-91':
ant_locs, ant_label = hexagon_generator(14.6, n_total=91)
elif array_layout == 'HERA-127':
ant_locs, ant_label = hexagon_generator(14.6, n_total=127)
elif array_layout == 'HERA-169':
ant_locs, ant_label = hexagon_generator(14.6, n_total=169)
elif array_layout == 'HERA-217':
ant_locs, ant_label = hexagon_generator(14.6, n_total=217)
elif array_layout == 'HERA-271':
ant_locs, ant_label = hexagon_generator(14.6, n_total=271)
elif array_layout == 'HERA-331':
ant_locs, ant_label = hexagon_generator(14.6, n_total=331)
elif array_layout == 'PAPER-64':
ant_locs, ant_label = rectangle_generator([30.0, 4.0], [8, 8])
elif array_layout == 'PAPER-112':
ant_locs, ant_label = rectangle_generator([15.0, 4.0], [16, 7])
elif array_layout == 'HIRAX-1024':
ant_locs, ant_label = rectangle_generator(7.0, n_side=32)
elif array_layout == 'CHIME':
ant_locs, ant_label = rectangle_generator([20.0, 0.3], [5, 256])
elif array_layout == 'GMRT':
comment = '#'
delimiter = ' '
header_start = 0
data_start = 2
data_end = None
antfile = 'GMRT_antenna_coordinates.txt'
ant_info = ascii.read(prisim_path+'data/array_layouts/'+antfile, comment=comment, delimiter=delimiter, header_start=header_start, data_start=data_start, data_end=data_end, guess=False)
ant_label = ant_info['Station'].data.astype('str')
east = ant_info['east'].data
north = ant_info['north'].data
elev = ant_info['up'].data
ant_locs = NP.hstack((east.reshape(-1,1), north.reshape(-1,1), elev.reshape(-1,1)))
elif array_layout == 'CIRC':
ant_locs, ant_label = circular_antenna_array(element_size, minR, maxR=maxR)
ant_label = NP.asarray(ant_label)
if ant_locs.shape[1] == 2:
ant_locs = NP.hstack((ant_locs, NP.zeros(ant_label.size).reshape(-1,1)))
antpos_rstate = NP.random.RandomState(antpos_rms_seed)
deast = antpos_rms_tgtplane/NP.sqrt(2.0) * antpos_rstate.randn(ant_label.size)
dnorth = antpos_rms_tgtplane/NP.sqrt(2.0) * antpos_rstate.randn(ant_label.size)
dup = antpos_rms_elevation * antpos_rstate.randn(ant_label.size)
denu = NP.hstack((deast.reshape(-1,1), dnorth.reshape(-1,1), dup.reshape(-1,1)))
ant_locs = ant_locs + denu
ant_locs_orig = NP.copy(ant_locs)
ant_label_orig = NP.copy(ant_label)
ant_id = NP.arange(ant_label.size, dtype=int)
ant_id_orig = NP.copy(ant_id)
layout_info = {'positions': ant_locs_orig, 'labels': ant_label_orig, 'ids': ant_id_orig, 'coords': 'ENU'}
bl_orig, bl_label_orig, bl_id_orig = baseline_generator(ant_locs_orig, ant_label=ant_label_orig, ant_id=ant_id_orig, auto=False, conjugate=False)
blo = NP.angle(bl_orig[:,0] + 1j * bl_orig[:,1], deg=True)
neg_blo_ind = (blo < -67.5) | (blo > 112.5)
bl_orig[neg_blo_ind,:] = -1.0 * bl_orig[neg_blo_ind,:]
blo = NP.angle(bl_orig[:,0] + 1j * bl_orig[:,1], deg=True)
maxlen = max(max(len(albl[0]), len(albl[1])) for albl in bl_label_orig)
bl_label_orig = [tuple(reversed(bl_label_orig[i])) if neg_blo_ind[i] else bl_label_orig[i] for i in xrange(bl_label_orig.size)]
bl_label_orig = NP.asarray(bl_label_orig, dtype=[('A2', '|S{0:0d}'.format(maxlen)), ('A1', '|S{0:0d}'.format(maxlen))])
bl_id_orig = [tuple(reversed(bl_id_orig[i])) if neg_blo_ind[i] else bl_id_orig[i] for i in xrange(bl_id_orig.size)]
bl_id_orig = NP.asarray(bl_id_orig, dtype=[('A2', int), ('A1', int)])
bl_length_orig = NP.sqrt(NP.sum(bl_orig**2, axis=1))
sortind_orig = NP.argsort(bl_length_orig, kind='mergesort')
bl_orig = bl_orig[sortind_orig,:]
blo = blo[sortind_orig]
bl_label_orig = bl_label_orig[sortind_orig]
bl_id_orig = bl_id_orig[sortind_orig]
bl_length_orig = bl_length_orig[sortind_orig]
bl = NP.copy(bl_orig)
bl_label = NP.copy(bl_label_orig)
bl_id = NP.copy(bl_id_orig)
bl_orientation = NP.copy(blo)
if array_is_redundant:
bl, select_bl_ind, bl_count, allinds = uniq_baselines(bl)
else:
select_bl_ind = NP.arange(bl.shape[0])
bl_count = NP.ones(bl.shape[0], dtype=int)
allinds = select_bl_ind.reshape(-1,1).tolist()
bl_label = bl_label[select_bl_ind]
bl_id = bl_id[select_bl_ind]
bl_orientation = bl_orientation[select_bl_ind]
if NP.any(bl_count > 1):
redundancy = True
else:
redundancy = False
bl_length = NP.sqrt(NP.sum(bl**2, axis=1))
sortind = NP.argsort(bl_length, kind='mergesort')
bl = bl[sortind,:]
bl_label = bl_label[sortind]
bl_id = bl_id[sortind]
bl_length = bl_length[sortind]
bl_orientation = bl_orientation[sortind]
bl_count = bl_count[sortind]
select_bl_ind = select_bl_ind[sortind]
allinds = [allinds[i] for i in sortind]
if minbl is None:
minbl = 0.0
elif not isinstance(minbl, (int,float)):
raise TypeError('Minimum baseline length must be a scalar')
elif minbl < 0.0:
minbl = 0.0
if maxbl is None:
maxbl = bl_length.max()
elif not isinstance(maxbl, (int,float)):
raise TypeError('Maximum baseline length must be a scalar')
elif maxbl < minbl:
maxbl = bl_length.max()
min_blo = -67.5
max_blo = 112.5
subselect_bl_ind = NP.zeros(bl_length.size, dtype=NP.bool)
if bldirection is not None:
if isinstance(bldirection, str):
if bldirection not in ['SE', 'E', 'NE', 'N']:
raise ValueError('Invalid baseline direction criterion specified')
else:
bldirection = [bldirection]
if isinstance(bldirection, list):
for direction in bldirection:
if direction in ['SE', 'E', 'NE', 'N']:
if direction == 'SE':
oind = (bl_orientation >= -67.5) & (bl_orientation < -22.5)
subselect_bl_ind[oind] = True
elif direction == 'E':
oind = (bl_orientation >= -22.5) & (bl_orientation < 22.5)
subselect_bl_ind[oind] = True
elif direction == 'NE':
oind = (bl_orientation >= 22.5) & (bl_orientation < 67.5)
subselect_bl_ind[oind] = True
else:
oind = (bl_orientation >= 67.5) & (bl_orientation < 112.5)
subselect_bl_ind[oind] = True
else:
raise TypeError('Baseline direction criterion must specified as string or list of strings')
else:
subselect_bl_ind = NP.ones(bl_length.size, dtype=NP.bool)
subselect_bl_ind = subselect_bl_ind & (bl_length >= minbl) & (bl_length <= maxbl)
bl_label = bl_label[subselect_bl_ind]
bl_id = bl_id[subselect_bl_ind]
bl = bl[subselect_bl_ind,:]
bl_length = bl_length[subselect_bl_ind]
bl_orientation = bl_orientation[subselect_bl_ind]
bl_count = bl_count[subselect_bl_ind]
select_bl_ind = select_bl_ind[subselect_bl_ind]
allinds = [allinds[i] for i in range(subselect_bl_ind.size) if subselect_bl_ind[i]]
if use_HI_monopole:
bllstr = map(str, bl_length)
uniq_bllstr, ind_uniq_bll = NP.unique(bllstr, return_index=True)
count_uniq_bll = [bllstr.count(ubll) for ubll in uniq_bllstr]
count_uniq_bll = NP.asarray(count_uniq_bll)
bl = bl[ind_uniq_bll,:]
bl_label = bl_label[ind_uniq_bll]
bl_id = bl_id[ind_uniq_bll]
bl_orientation = bl_orientation[ind_uniq_bll]
bl_length = bl_length[ind_uniq_bll]
bl_count = bl_count[ind_uniq_bll]
select_bl_ind = select_bl_ind[ind_uniq_bll]
allinds = [allinds[i] for i in ind_uniq_bll]
sortind = NP.argsort(bl_length, kind='mergesort')
bl = bl[sortind,:]
bl_label = bl_label[sortind]
bl_id = bl_id[sortind]
bl_length = bl_length[sortind]
bl_orientation = bl_orientation[sortind]
count_uniq_bll = count_uniq_bll[sortind]
bl_count = bl_count[sortind]
select_bl_ind = select_bl_ind[sortind]
allinds = [allinds[i] for i in sortind]
blgroups = {}
blgroups_reversemap = {}
for labelind, label in enumerate(bl_label_orig[select_bl_ind]):
if bl_count[labelind] > 0:
blgroups[tuple(label)] = bl_label_orig[NP.asarray(allinds[labelind])]
for lbl in bl_label_orig[NP.asarray(allinds[labelind])]:
# blgroups_reversemap[tuple(lbl)] = tuple(label)
blgroups_reversemap[tuple(lbl)] = NP.asarray([label], dtype=bl_label.dtype)
if array_is_redundant:
if bl_label_orig.size == bl_label.size:
warnings.warn('No redundant baselines found. Proceeding...')
outdict = {'bl': bl, 'id': bl_id, 'label': bl_label, 'groups': blgroups, 'reversemap': blgroups_reversemap, 'redundancy': redundancy, 'layout_info': layout_info}
return outdict
#################################################################################
def getBaselineGroupKeys(inp_labels, blgroups_reversemap):
"""
---------------------------------------------------------------------------
Find redundant baseline group keys of groups that contain the input
baseline labels
Inputs:
inp_labels
[list] List where each element in the list is a two-element tuple
that corresponds to a baseline / antenna pair label.
e.g. [('1', '2'), ('3', '0'), ('2', '2'), ...]
blgroups_reversemap
[dictionary] Contains the baseline category for each baseline.
The keys are baseline labels as tuple and the value under each
key is the label of the unique baseline category that it falls
under. That label could be a two-element Numpy RecArray or a tuple.
Each element in this two-element tuple must be an antenna label
specified as a string. e.g. {('9','8'): ('2','3'),
('12','11'): ('2','3'), ('1','4'): ('6','7'),...} or {('9','8'):
array[('2','3')], ('12','11'): array[('2','3')],
('1','4'): array[('6','7')],...}
Output:
Tuple containing two values. The first value is a list of all baseline
group keys corresponding to the input keys. If any input keys were not
found in blgroups_reversemap, those corresponding position in this list
will be filled with None to indicate the label was not found. The second
value in the tuple indicates if the ordering of the input label had to be
flipped in order to find the baseline group key. Positions where an input
label was found as is will contain False, but if it had to be flipped will
contain True. If the input label was not found, it will be filled with
None.
Example:
blkeys, flipped = getBaselineGroupKeys(inp_labels, blgroups_reversemap)
blkeys --> [('2','3'), ('11','16'), None, ('5','1'),...]
flipped --> [False, True, None, False],...)
---------------------------------------------------------------------------
"""
try:
inp_labels, blgroups_reversemap
except NameError:
raise NameError('Inputs inp_label and blgroups_reversemap must be provided')
if not isinstance(blgroups_reversemap, dict):
raise TypeError('Input blgroups_reversemap must be a dictionary')
if not isinstance(inp_labels, list):
inp_labels = [inp_labels]
blgrpkeys = []
flip_order = []
for lbl in inp_labels:
if lbl in blgroups_reversemap.keys():
if isinstance(blgroups_reversemap[lbl], NP.ndarray):
blgrpkeys += [tuple(blgroups_reversemap[lbl][0])]
elif isinstance(blgroups_reversemap[lbl], tuple):
blgrpkeys += [blgroups_reversemap[lbl]]
else:
raise TypeError('Invalid type found in blgroups_reversemap')
flip_order += [False]
elif lbl[::-1] in blgroups_reversemap.keys():
if isinstance(blgroups_reversemap[lbl[::-1]], NP.ndarray):
blgrpkeys += [tuple(blgroups_reversemap[lbl[::-1]][0])]
elif isinstance(blgroups_reversemap[lbl[::-1]], tuple):
blgrpkeys += [blgroups_reversemap[lbl[::-1]]]
else:
raise TypeError('Invalid type found in blgroups_reversemap')
flip_order += [True]
else:
blgrpkeys += [None]
flip_order += [None]
return (blgrpkeys, flip_order)
#################################################################################
def getBaselinesInGroups(inp_labels, blgroups_reversemap, blgroups):
"""
---------------------------------------------------------------------------
Find all redundant baseline labels in groups that contain the given input
baseline labels
Inputs:
inp_labels
[list] List where each element in the list is a two-element tuple
that corresponds to a baseline / antenna pair label.
e.g. [('1', '2'), ('3', '0'), ('2', '2'), ...]
blgroups_reversemap
[dictionary] Contains the baseline category for each baseline.
The keys are baseline labels as tuple and the value under each
key is the label of the unique baseline category that it falls
under. That label could be a two-element Numpy RecArray or a tuple.
Each element in this two-element tuple must be an antenna label
specified as a string. e.g. {('9','8'): ('2','3'),
('12','11'): ('2','3'), ('1','4'): ('6','7'),...} or {('9','8'):
array[('2','3')], ('12','11'): array[('2','3')],
('1','4'): array[('6','7')],...}
blgroups
[dictionary] Contains the grouping of unique baselines and the
redundant baselines as numpy recarray under each unique baseline
category/flavor. It contains as keys the labels (tuple of A1, A2)
of unique baselines and the value under each of these keys is a
list of baseline labels that are redundant under that category
Output:
Tuple with two elements where the first element is a list of numpy
RecArrays where each RecArray corresponds to the entry in inp_label and is
an array of two-element records corresponding to the baseline labels in
that redundant group. If the input baseline is not found, the corresponding
element in the list is None to indicate the baseline label was not found.
The second value in the tuple indicates if the ordering of the input label
had to be flipped in order to find the baseline group key. Positions where
an input label was found as is will contain False, but if it had to be
flipped will contain True. If the input label was not found, it will
contain a None entry.
Example:
list_blgrps, flipped = getBaselineGroupKeys(inplabels, bl_revmap, blgrps)
list_blgrps --> [array([('2','3'), ('11','16')]), None,
array([('5','1')]), ...],
flipped --> [False, True, None, ...])
---------------------------------------------------------------------------
"""
if not isinstance(blgroups, dict):
raise TypeError('Input blgroups must be a dictionary')
blkeys, flip_order = getBaselineGroupKeys(inp_labels, blgroups_reversemap)
blgrps = []
for blkey in blkeys:
if blkey is not None:
blgrps += [blgroups[blkey]]
else:
blgrps += [None]
return (blgrps, flip_order)
#################################################################################
def antenna_power(skymodel, telescope_info, pointing_info, freq_scale=None):
"""
---------------------------------------------------------------------------
Generate antenna power received from sky when a sky model, telescope and
pointing parameters are provided.
Inputs:
skymodel [instance of class SkyModel] Sky model specified as an instance
of class SkyModel
telescope_info
[dictionary] dictionary that specifies the type of element,
element size and orientation. It consists of the following keys
and values:
'latitude' [float] latitude of the telescope site (in degrees).
If this key is not present, the latitude of MWA
(-26.701 degrees) will be assumed.
'id' [string] If set, will ignore the other keys and use
telescope details for known telescopes. Accepted
values are 'mwa', 'vla', 'gmrt', 'ugmrt', 'hera',
'paper', 'hirax' and 'chime'
'shape' [string] Shape of antenna element. Accepted values
are 'dipole', 'delta', and 'dish'. Will be ignored
if key 'id' is set. 'delta' denotes a delta
function for the antenna element which has an
isotropic radiation pattern. 'delta' is the default
when keys 'id' and 'shape' are not set.
'size' [scalar] Diameter of the telescope dish (in meters)
if the key 'shape' is set to 'dish' or length of
the dipole if key 'shape' is set to 'dipole'. Will
be ignored if key 'shape' is set to 'delta'. Will
be ignored if key 'id' is set and a preset value
used for the diameter or dipole.
'orientation' [list or numpy array] If key 'shape' is set to
dipole, it refers to the orientation of the dipole
element unit vector whose magnitude is specified by
length. If key 'shape' is set to 'dish', it refers
to the position on the sky to which the dish is
pointed. For a dipole, this unit vector must be
provided in the local ENU coordinate system aligned
with the direction cosines coordinate system or in
the Alt-Az coordinate system. This will be
used only when key 'shape' is set to 'dipole'.
This could be a 2-element vector (transverse
direction cosines) where the third (line-of-sight)
component is determined, or a 3-element vector
specifying all three direction cosines or a two-
element coordinate in Alt-Az system. If not provided
it defaults to an eastward pointing dipole. If key
'shape' is set to 'dish', the orientation refers
to the pointing center of the dish on the sky. It
can be provided in Alt-Az system as a two-element
vector or in the direction cosine coordinate
system as a two- or three-element vector. If not
set in the case of a dish element, it defaults to
zenith. This is not to be confused with the key
'pointing_center' in dictionary 'pointing_info'
which refers to the beamformed pointing center of
the array. The coordinate system is specified by
the key 'ocoords'
'ocoords' [scalar string] specifies the coordinate system
for key 'orientation'. Accepted values are 'altaz'
and 'dircos'.
'element_locs'
[2- or 3-column array] Element locations that
constitute the tile. Each row specifies
location of one element in the tile. The
locations must be specified in local ENU
coordinate system. First column specifies along
local east, second along local north and the
third along local up. If only two columns are
specified, the third column is assumed to be
zeros. If 'elements_locs' is not provided, it
assumed to be a one-element system and not a
phased array as far as determination of primary
beam is concerned.
'groundplane' [scalar] height of telescope element above the
ground plane (in meteres). Default = None will
denote no ground plane effects.
'ground_modify'
[dictionary] contains specifications to modify
the analytically computed ground plane pattern. If
absent, the ground plane computed will not be
modified. If set, it may contain the following
keys:
'scale' [scalar] positive value to scale the
modifying factor with. If not set, the
scale factor to the modification is unity.
'max' [scalar] positive value to clip the
modified and scaled values to. If not set,
there is no upper limit
pointing_info
[dictionary] Contains information about the pointing. It carries
the following keys and values:
'lst' [numpy array] LST values (in degrees) for each pointing
'pointing_coords'
[string scalar] Coordinate system in which the
pointing_center is specified. Accepted values are
'radec', 'hadec', 'altaz' or 'dircos'. Must be specified
if pointing_center is specified
'pointing_center'
[numpy array] coordinates of pointing center (in the
coordinate system specified under key 'pointing_coords').
Mx2 array when value under key 'pointing_coords' is set
to 'radec', 'hadec' or 'altaz', or Mx3 array when the
value in 'pointing_coords' is set to 'dircos'. Number of
rows M should be equal to number of pointings and LST.
If only one row (M=1) is provided the same pointing
center in the given coordinate system will apply to all
pointings.
freq_scale
[string scalar] Units of frequency. Accepted values are 'Hz',
'kHz', 'MHz' or 'GHz'. If None provided, default is set to 'GHz'
Output:
2-dimensional numpy array containing the antenna power. The rows denote
the different pointings and columns denote the frequency spectrum obtained
from the frequencies specified in the sky model.
Notes:
For each pointing the visible sky spectrum is multiplied with the power
pattern and summed over all sky locations to obtain the received antenna
power as a function of pointings and frequency.
---------------------------------------------------------------------------
"""
try:
skymodel, telescope_info, pointing_info
except NameError:
raise NameError('Sky model, telescope and pointing information must be provided')
if not isinstance(skymodel, SM.SkyModel):
raise TypeError('Input parameter skymodel must be an instance of class SkyModel')
if not isinstance(telescope_info, dict):
raise TypeError('Input parameter telescope_info must be a dictionary')
if not isinstance(pointing_info, dict):
raise TypeError('Input parameter pointing_info must be a dictionary')
if 'latitude' in telescope_info:
latitude = telescope_info['latitude']
else:
latitude = -26.701
n_src = skymodel.location.shape[0]
nchan = skymodel.frequency.size
if 'lst' not in pointing_info:
raise KeyError('Key "lst" not provided in input parameter pointing_info')
else:
lst = NP.asarray(pointing_info['lst'])
n_lst = lst.size
if 'pointing_center' not in pointing_info:
pointing_center = NP.repeat(NP.asarray([90.0, 270.0]).reshape(1,-1), n_lst, axis=0)
pointing_coords = 'altaz'
else:
if 'pointing_coords' not in pointing_info:
raise KeyError('key "pointing_info" not found in input parameter pointing_info')
pointing_coords = pointing_info['pointing_coords']
if not isinstance(pointing_info['pointing_center'], NP.ndarray):
raise TypeError('Value in key "pointing_center" in input parameter pointing_info must be a numpy array')
pointing_center = pointing_info['pointing_center']
if len(pointing_center.shape) > 2:
raise ValueError('Value under key "pointing_center" in input parameter pointing_info cannot exceed two dimensions')
if len(pointing_center.shape) < 2:
pointing_center = pointing_center.reshape(1,-1)
if (pointing_coords == 'dircos') and (pointing_center.shape[1] != 3):
raise ValueError('Value under key "pointing_center" in input parameter pointing_info must be a 3-column array for direction cosine coordinate system')
elif pointing_center.shape[1] != 2:
raise ValueError('Value under key "pointing_center" in input parameter pointing_info must be a 2-column array for RA-Dec, HA-Dec and Alt-Az coordinate systems')
n_pointings = pointing_center.shape[0]
if (n_pointings != n_lst) and (n_pointings != 1):
raise ValueError('Number of pointing centers and number of LST must match')
if n_pointings < n_lst:
pointing_center = NP.repeat(pointing_center, n_lst, axis=0)
n_snaps = lst.size
if pointing_coords == 'dircos':
pointings_altaz = GEOM.dircos2altaz(pointing_center, units='degrees')
elif pointing_coords == 'hadec':
pointings_altaz = GEOM.hadec2altaz(pointing_center, latitude, units='degrees')
elif pointing_coords == 'radec':
pointings_altaz = GEOM.hadec2altaz(NP.hstack(((lst-pointing_center[:,0]).reshape(-1,1), pointing_center[:,1].reshape(-1,1))), latitude, units='degrees')
else:
pointings_altaz = NP.copy(pointing_center)
if skymodel.coords == 'radec':
lst_temp = NP.hstack((lst.reshape(-1,1),NP.zeros(n_snaps).reshape(-1,1))) # Prepare fake LST for numpy broadcasting
lst_temp = lst_temp.T
lst_temp = lst_temp[NP.newaxis,:,:]
sky_hadec = lst_temp - skymodel.location[:,:,NP.newaxis] # Reverses sign of declination
sky_hadec[:,1,:] *= -1 # Correct for the reversal of sign in the declination
sky_hadec = NP.concatenate(NP.split(sky_hadec, n_snaps, axis=2), axis=0)
sky_hadec = NP.squeeze(sky_hadec, axis=2)
sky_altaz = GEOM.hadec2altaz(sky_hadec, latitude, units='degrees')
elif skymodel.coords == 'hadec':
sky_altaz = GEOM.hadec2altaz(skymodel.location, latitude, units='degrees')
elif skymodel.coords == 'dircos':
sky_altaz = GEOM.dircos2altaz(skymodel.location, units='degrees')
else:
sky_altaz = NP.copy(skymodel.location)
sky_altaz = NP.split(sky_altaz, range(0,sky_altaz.shape[0],n_src)[1:], axis=0) # Split sky_altaz into a list of arrays
retval = []
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(), PGB.ETA()], maxval=len(sky_altaz)).start()
for i in xrange(len(sky_altaz)):
pinfo = {}
pinfo['pointing_center'] = pointings_altaz[i,:]
pinfo['pointing_coords'] = 'altaz'
# if 'element_locs' in telescope_info:
# pinfo['element_locs'] = telescope_info['element_locs']
upper_hemisphere_ind = sky_altaz[i][:,0] >= 0.0
upper_skymodel = skymodel.subset(indices=NP.where(upper_hemisphere_ind)[0])
pb = PB.primary_beam_generator(sky_altaz[i][upper_hemisphere_ind,:], skymodel.frequency, telescope_info, freq_scale=freq_scale, skyunits='altaz', pointing_info=pinfo)
spectrum = upper_skymodel.generate_spectrum(interp_method='pchip')
retval += [NP.sum(pb*spectrum, axis=0) / NP.sum(pb, axis=0)]
progress.update(i+1)
progress.finish()
return NP.asarray(retval)
#################################################################################
class GainInfo(object):
"""
----------------------------------------------------------------------------
Class to manage instrument gains
Attributes:
gaintable [None or dictionary] If set to None, all antenna- and
baseline-based gains will be set to unity. If returned as
dictionary, it contains the loaded gains. It contains the
following keys and values:
'antenna-based' [None or dictionary] Contains antenna-
based instrument gain information. If
set to None, all antenna-based gains are
set to unity. If returned as dictionary,
it has the following keys and values:
'ordering' [list or numpy array] Three
element list of strings
indicating the ordering of
axes - 'time', 'label',
and 'frequency' as specified
in input axes_order
'gains' [scalar or numpy array]
Complex antenna-based
instrument gains. Must be
of shape (nant, nchan, nts)
If there is no variations in
gains along an axis, then
the corresponding nax may be
set to 1 and the gains will
be replicated along that
axis using numpy array
broadcasting. For example,
shapes (nant,1,1), (1,1,1),
(1,nchan,nts) are
acceptable. If specified as
a scalar, it will be
replicated along all three
axes, namely, 'label',
'frequency' and 'time'.
'label' [None or list or numpy
array] List or antenna
labels that correspond to
nant along the 'label' axis.
If nant=1, this may be set
to None, else it will be
specified and will match the
nant.
'frequency' [None or list or numpy array]
Frequency channels that
correspond to the nax along
the 'frequency' axis. If the
nchan=1 along the 'frequency'
axis, this may be set to None,
else it must be specified and
must match the nchan
'time' [None or list or numpy array]
Observation times that
correspond to the nax along
the 'time' axis. If the
ntimes=1 along the 'time'
axis, this may be set to None,
else it must be specified and
must match the ntimes. It must
be a float and can be in
seconds, hours, days, etc.
'baseline-based' [None or dictionary] Contains baseline-
based instrument gain information. If
set to None, all baseline-based gains
are set to unity. If returned as
dictionary, it has the following keys
and values:
'ordering' [list or numpy array] Three
element list of strings
indicating the ordering of
axes - 'time', 'label',
and 'frequency' as
specified in input
axes_order
'gains' [scalar or numpy array]
Complex baseline-based
instrument gains. Must be
of shape (nbl, nchan, nts)
If there is no variations in
gains along an axis, then
the corresponding nax may be
set to 1 and the gains will
be replicated along that
axis using numpy array
broadcasting. For example,
shapes (nant,1,1), (1,1,1),
(1,nchan,nts) are
acceptable. If specified as
a scalar, it will be
replicated along all three
axes, namely, 'label',
'frequency' and 'time'.
'label' [None or list or numpy
array] List or baseline
labels that correspond to
nbl along the 'label' axis.
If nbl=1 along the 'label'
axis this may be set to
None, else it will be
specified and will match nbl
'frequency' [None or list or numpy array]
Frequency channels that
correspond to the nax along
the 'frequency' axis. If the
nchan=1 along the 'frequency'
axis, this may be set to None,
else it must be specified and
must match the nchan
'time' [None or list or numpy array]
Observation times that
correspond to the nax along
the 'time' axis. If the
ntimes=1 along the 'time'
axis, this may be set to None,
else it must be specified and
must match the ntimes. It must
be a float and can be in
seconds, hours, days, etc.
interpfuncs [dictionary] Determined in member function interpolator().
Contains interpolation information under two keys, namely,
'antenna-based' and 'baseline-based'. Under each of these keys
is another dictionary with the following keys and values:
'dims' [numpy array of strings] Contains the axes labels
of the interpolated axes for antenna or baseline
labels. It could contain a single element ['time'],
of ['frequency'] indicating 1D splines along that
axis or contain two elements 'time' and 'frequency'
indicating 2D splines. 1D splines will have been
obtained with scipy.interpolate.interp1d while
2D splines obtained with scipy.interpolate.interp2d
'interp' [numpy recArray] Holds the interpolation functions
(instances of scipy.interpolate.interp1d or
scipy.interpolate.interp2d depending on the value
under 'dims' key) for each antenna or baseline
label. It is of size nbl. Each entry in this
numpy recArray has two fields, 'real' for
interpolation of real part and 'imag' for the
imaginary part. If it is a one element recarray,
then it applies to all antennas and baselines
Member function interpolate_gains() uses this attribute to
return interpolated gains
splinefuncs [dictionary] Determined in member function splinator().
Contains spline information under two keys, namely,
'antenna-based' and 'baseline-based'. Under each of these keys
is another dictionary with the following keys and values:
'dims' [numpy array of strings] Contains the axes labels
of the interpolated axes for antenna or baseline
labels. It could contain a single element ['time'],
of ['frequency'] indicating 1D splines along that
axis or contain two elements 'time' and 'frequency'
indicating 2D splines. 1D splines will have been
obtained with scipy.interpolate.UnivariateSpline
while 2D splines obtained with
scipy.interpolate.RectBivariateSpline
'interp' [numpy recArray] Holds the spline functions
(instances of scipy.interpolate.UnivariateSpline or
scipy.interpolate.RectBivariateSpline depending on
the value under 'dims' key) for each antenna or
baseline label. It is of size nbl. Each entry in
this numpy recArray has two fields, 'real' for
interpolation of real part and 'imag' for the
imaginary part. If it is a one element recarray,
then it applies to all antennas and baselines.
Member function spline_gains() uses this attribute to return
spline-interpolated gains
Member functions:
__init__() Initialize an instance of class GainInfo from a file
read_gaintable()
Read gain table from file in HDF5 format and return and/or
store as attribute
eval_gains()
Extract complex instrument gains for given baselines from the
gain table
interpolator()
Sets up interpolation functions and stores them in the
attribute interpfuncs. Better alternative is to use splinator()
splinator() Sets up spline functions and stores them in the attribute
splinefuncs. Better alternative to interpolator()
interpolate_gains()
Interpolate at the specified baselines for the given
frequencies and times using attribute interpfuncs. Better
alternative is to use spline_gains()
spline_gains()
Evaluate spline at the specified baselines for the given
frequencies and times using attribute splinefuncs. Better
alternative to interpolate_gains()
nearest_gains()
Extract complex instrument gains for given baselines from the
gain table determined by nearest neighbor logic
write_gaintable()
Write gain table with specified axes ordering to external file
in HDF5 format
-----------------------------------------------------------------------------
"""
def __init__(self, init_file=None, axes_order=None):
"""
------------------------------------------------------------------------
Initialize an instance of class GainInfo from a file
Attributes initialized are:
gaintable, interpfuncs, splinefuncs
Read docstring of class GainInfo for details on these attributes
Keyword Inputs:
gainsfile [string] Filename including the full path that contains the
instrument gains. It must be in HDF5 format. It must contain
the following structure:
'antenna-based' [dictionary] Contains antenna-based
instrument gain information. It has the
following keys and values:
'ordering' [list or numpy array] Three
element list of strings
indicating the ordering of
axes - 'time', 'label',
and 'frequency'. Must be
specified (no defaults)
'gains' [scalar or numpy array]
Complex antenna-based
instrument gains. Must be
of shape (nax1, nax2, nax3)
where ax1, ax2 and ax3 are
specified by the axes
ordering under key
'ordering'. If there is no
variations in gains along an
axis, then the corresponding
nax may be set to 1 and the
gains will be replicated
along that axis using numpy
array broadcasting. For
example, shapes (nax1,1,1),
(1,1,1), (1,nax2,nax3) are
acceptable. If specified as
a scalar, it will be
replicated along all three
axes, namely, 'label',
'frequency' and 'time'.
'label' [None or list or numpy array]
List or antenna labels that
correspond to the nax along
the 'label' axis. If the
nax=1 along the 'label' axis,
this may be set to None, else
it must be specified and must
match the nax.
'frequency' [None or list or numpy
array] Frequency channels
that correspond to the
nax along the 'frequency'
axis. If the nax=1 along the
'frequency' axis, this may
be set to None, else it must
be specified and must match
the nax.
'time' [None or list or numpy
array] Observation times
that correspond to the nax
along the 'time' axis. If
the nax=1 along the 'time'
axis, this may be set to
None, else it must be
specified and must match the
nax. It must be a float and
can be in seconds, hours,
days, etc.
'baseline-based' [dictionary] Contains baseline-based
instrument gain information. It has the
following keys and values:
'ordering' [list or numpy array] Three
element list of strings
indicating the ordering of
axes - 'time', 'label',
and 'frequency'. Must be
specified (no defaults)
'gains' [scalar or numpy array]
Complex baseline-based
instrument gains. Must be
of shape (nax1, nax2, nax3)
where ax1, ax2 and ax3 are
specified by the axes
ordering under key
'ordering'. If there is no
variations in gains along an
axis, then the corresponding
nax may be set to 1 and the
gains will be replicated
along that axis using numpy
array broadcasting. For
example, shapes (nax1,1,1),
(1,1,1), (1,nax2,nax3) are
acceptable. If specified as
a scalar, it will be
replicated along all three
axes, namely, 'label',
'frequency' and 'time'.
'label' [None or list or numpy
array] List of baseline
labels that correspond to
the nax along the 'label'
axis. If the nax=1 along the
'label' axis this may be set
to None, else it must be
specified and must match the
nax.
'frequency' [None or list or numpy
array] Frequency channels
that correspond to the
nax along the 'frequency'
axis. If the nax=1 along the
'frequency' axis, this may
be set to None, else it must
be specified and must match
the nax.
'time' [None or list or numpy
array] Observation times
that correspond to the nax
along the 'time' axis. If
the nax=1 along the 'time'
axis, this may be set to
None, else it must be
specified and must match the
nax. It must be a float and
can be in seconds, hours,
days, etc.
axes_order [None or list or numpy array] The gaintable which is read is
stored in this axes ordering. If set to None, it will store
in this order ['label', 'frequency', 'time']
------------------------------------------------------------------------
"""
self.gaintable = None
self.interpfuncs = {key: None for key in ['antenna-based', 'baseline-based']}
self.splinefuncs = {key: None for key in ['antenna-based', 'baseline-based']}
if init_file is not None:
self.gaintable = self.read_gaintable(init_file, axes_order=axes_order, action='return')
self.interpolator()
self.splinator(smoothness=None)
#############################################################################
def read_gaintable(self, gainsfile, axes_order=None, action='return'):
"""
------------------------------------------------------------------------
Read gain table from file in HDF5 format and return and/or store as
attribute
Input:
gainsfile [string] Filename including the full path that contains the
instrument gains. It must be in HDF5 format. It must contain
the following structure:
'antenna-based' [dictionary] Contains antenna-based
instrument gain information. It has the
following keys and values:
'ordering' [list or numpy array] Three
element list of strings
indicating the ordering of
axes - 'time', 'label',
and 'frequency'. Must be
specified (no defaults)
'gains' [scalar or numpy array]
Complex antenna-based
instrument gains. Must be
of shape (nax1, nax2, nax3)
where ax1, ax2 and ax3 are
specified by the axes
ordering under key
'ordering'. If there is no
variations in gains along an
axis, then the corresponding
nax may be set to 1 and the
gains will be replicated
along that axis using numpy
array broadcasting. For
example, shapes (nax1,1,1),
(1,1,1), (1,nax2,nax3) are
acceptable. If specified as
a scalar, it will be
replicated along all three
axes, namely, 'label',
'frequency' and 'time'.
'label' [None or list or numpy
array] List or antenna
labels that correspond to
the nax along the 'label'
axis. If the nax=1 along the
'label' axis, this may be
set to None, else it must be
specified and must match the
nax.
'frequency' [None or list or numpy
array] Frequency channels
that correspond to the nax
along the 'frequency' axis.
If the nax=1 along the
'frequency' axis, this may
be set to None, else it must
be specified and must match
the nax.
'time' [None or list or numpy
array] Observation times
that correspond to the nax
along the 'time' axis. If
the nax=1 along the 'time'
axis, this may be set to
None, else it must be
specified and must match the
nax. It must be a float and
can be in seconds, hours,
days, etc.
'baseline-based' [dictionary] Contains baseline-based
instrument gain information. It has the
following keys and values:
'ordering' [list or numpy array] Three
element list of strings
indicating the ordering of
axes - 'time', 'label',
and 'frequency'. Must be
specified (no defaults)
'gains' [scalar or numpy array]
Complex baseline-based
instrument gains. Must be
of shape (nax1, nax2, nax3)
where ax1, ax2 and ax3 are
specified by the axes
ordering under key
'ordering'. If there is no
variations in gains along an
axis, then the corresponding
nax may be set to 1 and the
gains will be replicated
along that axis using numpy
array broadcasting. For
example, shapes (nax1,1,1),
(1,1,1), (1,nax2,nax3) are
acceptable. If specified as
a scalar, it will be
replicated along all three
axes, namely, 'label',
'frequency' and 'time'.
'label' [None or list or numpy
array] List of baseline
labels that correspond to
the nax along the 'label'
axis. If the nax=1 along the
'label' axis this may be set
to None, else it must be
specified and must match the
nax.
'frequency' [None or list or numpy
array] Frequency channels
that correspond to the nax
along the 'frequency' axis.
If the nax=1 along the
'frequency' axis, this may
be set to None, else it must
be specified and must match
the nax.
'time' [None or list or numpy
array] Observation times
that correspond to the nax
along the 'time' axis. If
the nax=1 along the 'time'
axis, this may be set to
None, else it must be
specified and must match the
nax. It must be a float and
can be in seconds, hours,
days, etc.
axes_order [None or list or numpy array] The gaintable which is read is
stored in this axes ordering. If set to None, it will store
in this order ['label', 'frequency', 'time']
action [string] If set to 'store' (default), the gain table will
be stored as attribute in addition to being returned. If set
to 'return' the gain table will be returned.
Output:
gaintable [None or dictionary] If set to None, all antenna- and
baseline-based gains will be set to unity. If returned as
dictionary, it contains the loaded gains. It contains the
following keys and values:
'antenna-based' [None or dictionary] Contains antenna-
based instrument gain information. If
set to None, all antenna-based gains are
set to unity. If returned as dictionary,
it has the following keys and values:
'ordering' [list or numpy array] Three
element list of strings
indicating the ordering of
axes - 'time', 'label',
and 'frequency' as specified
in input axes_order
'gains' [scalar or numpy array]
Complex antenna-based
instrument gains. Must be
of shape (nant, nchan, nts)
If there is no variations in
gains along an axis, then
the corresponding nax may be
set to 1 and the gains will
be replicated along that
axis using numpy array
broadcasting. For example,
shapes (nant,1,1), (1,1,1),
(1,nchan,nts) are
acceptable. If specified as
a scalar, it will be
replicated along all three
axes, namely, 'label',
'frequency' and 'time'.
'label' [None or list or numpy
array] List or antenna
labels that correspond to
nant along the 'label' axis.
If nant=1, this may be set
to None, else it will be
specified and will match the
nant.
'frequency' [None or list or numpy
array] Frequency channels
that correspond to the nax
along the 'frequency' axis.
If the nchan=1 along the
'frequency' axis, this may
be set to None, else it must
be specified and must match
the nchan.
'time' [None or list or numpy
array] Observation times
that correspond to the nax
along the 'time' axis. If
the ntimes=1 along the
'time' axis, this may be set
to None, else it must be
specified and must match the
ntimes. It will be a float
and in same units as given
in input
'baseline-based' [None or dictionary] Contains baseline-
based instrument gain information. If
set to None, all baseline-based gains
are set to unity. If returned as
dictionary, it has the following keys
and values:
'ordering' [list or numpy array] Three
element list of strings
indicating the ordering of
axes - 'time', 'label',
and 'frequency' as
specified in input
axes_order
'gains' [scalar or numpy array]
Complex baseline-based
instrument gains. Must be
of shape (nbl, nchan, nts)
If there is no variations in
gains along an axis, then
the corresponding nax may be
set to 1 and the gains will
be replicated along that
axis using numpy array
broadcasting. For example,
shapes (nant,1,1), (1,1,1),
(1,nchan,nts) are
acceptable. If specified as
a scalar, it will be
replicated along all three
axes, namely, 'label',
'frequency' and 'time'.
'label' [None or list or numpy
array] List or baseline
labels that correspond to
nbl along the 'label' axis.
If nbl=1 along the 'label'
axis this may be set to
None, else it will be
specified and will match nbl
'frequency' [None or list or numpy
array] Frequency channels
that correspond to the nax
along the 'frequency' axis.
If the nchan=1 along the
'frequency' axis, this may
be set to None, else it must
be specified and must match
the nchan.
'time' [None or list or numpy
array] Observation times
that correspond to the nax
along the 'time' axis. If
the ntimes=1 along the
'time' axis, this may be set
to None, else it must be
specified and must match the
ntimes. It will be a float
and in same units as given
in input
------------------------------------------------------------------------
"""
if not isinstance(action, str):
return TypeError('Input parameter action must be a string')
action = action.lower()
if action not in ['store', 'return']:
raise ValueError('Invalid value specified for input parameter action')
gaintable = read_gaintable(gainsfile, axes_order=axes_order)
if action == 'store':
self.gaintable = gaintable
return gaintable
#############################################################################
def interpolator(self, kind='linear'):
"""
------------------------------------------------------------------------
Sets up interpolation functions and stores them in the attribute
interpfuncs. Better alternative is to use splinator()
Inputs:
kind [string] Type of interpolation. Accepted values are
'linear' (default), 'cubic' or 'quintic'. See documentation
of scipy.interpolate.interp1d and scipy.interpolate.interp2d
for details
------------------------------------------------------------------------
"""
kind = kind.lower()
if kind not in ['linear', 'cubic', 'quintic']:
raise ValueError('Specified kind of interpolation invalid')
if self.gaintable is not None:
for gainkey in self.gaintable:
if self.gaintable[gainkey] is not None:
self.interpfuncs[gainkey] = None
if self.gaintable[gainkey]['gains'] is not None:
if isinstance(self.gaintable[gainkey]['gains'], NP.ndarray):
if self.gaintable[gainkey]['gains'].ndim != 3:
raise ValueError('Gains must be a 3D numpy array')
# if self.gaintable[gainkey]['gains'].size > 1:
if (self.gaintable[gainkey]['gains'].shape[self.gaintable[gainkey]['ordering'].index('frequency')] > 1) or (self.gaintable[gainkey]['gains'].shape[self.gaintable[gainkey]['ordering'].index('time')] > 1):
temp_axes_order = ['label', 'frequency', 'time']
inp_order = self.gaintable[gainkey]['ordering']
temp_transpose_order = NMO.find_list_in_list(inp_order, temp_axes_order)
if NP.all(inp_order == temp_axes_order):
gains = NP.copy(self.gaintable[gainkey]['gains'])
else:
gains = NP.transpose(NP.copy(self.gaintable[gainkey]['gains']), axes=temp_transpose_order)
dims = []
for ax in NP.arange(1,3):
if gains.shape[ax] > 1:
dims += [temp_axes_order[ax]]
dims = NP.asarray(dims)
interpf = []
for labelind in xrange(gains.shape[0]):
if dims.size == 1:
interpf_real = interpolate.interp1d(self.gaintable[gainkey][dims[0]], gains[labelind,:,:].real.ravel(), kind=kind, bounds_error=True)
interpf_imag = interpolate.interp1d(self.gaintable[gainkey][dims[0]], gains[labelind,:,:].imag.ravel(), kind=kind, bounds_error=True)
else:
interpf_real = interpolate.interp2d(self.gaintable[gainkey]['time'], self.gaintable[gainkey]['frequency'], gains[labelind,:,:].real, kind=kind, bounds_error=True)
interpf_imag = interpolate.interp2d(self.gaintable[gainkey]['time'], self.gaintable[gainkey]['frequency'], gains[labelind,:,:].imag, kind=kind, bounds_error=True)
interpf += [(copy.copy(interpf_real), copy.copy(interpf_imag))]
self.interpfuncs[gainkey] = {'interp': NP.asarray(interpf, dtype=[('real', NP.object), ('imag', NP.object)]), 'dims': dims}
############################################################################
def splinator(self, smoothness=None):
"""
-----------------------------------------------------------------------
Sets up spline functions and stores them in the attribute splinefuncs.
Better alternative to interpolator()
Inputs:
smoothness [integer or float] Smoothness of spline interpolation. Must
be positive. If set to None (default), it will set equal to
the number of samples using which the spline functions are
estimated. Read documentation of
scipy.interpolate.UnivariateSpline and
scipy.interpolate.RectBivariateSpline for more details
-----------------------------------------------------------------------
"""
if smoothness is not None:
if not isinstance(smoothness, (int,float)):
raise TypeError('Input smoothness must be a scalar')
if smoothness <= 0.0:
raise ValueError('Input smoothness must be a positive number')
if self.gaintable is not None:
for gainkey in self.gaintable:
if self.gaintable[gainkey] is not None:
self.splinefuncs[gainkey] = None
if self.gaintable[gainkey]['gains'] is not None:
if isinstance(self.gaintable[gainkey]['gains'], NP.ndarray):
if self.gaintable[gainkey]['gains'].ndim != 3:
raise ValueError('Gains must be a 3D numpy array')
# if self.gaintable[gainkey]['gains'].size > 1:
if (self.gaintable[gainkey]['gains'].shape[self.gaintable[gainkey]['ordering'].index('frequency')] > 1) or (self.gaintable[gainkey]['gains'].shape[self.gaintable[gainkey]['ordering'].index('time')] > 1):
temp_axes_order = ['label', 'frequency', 'time']
inp_order = self.gaintable[gainkey]['ordering']
temp_transpose_order = NMO.find_list_in_list(inp_order, temp_axes_order)
if NP.all(inp_order == temp_axes_order):
gains = NP.copy(self.gaintable[gainkey]['gains'])
else:
gains = NP.transpose(NP.copy(self.gaintable[gainkey]['gains']), axes=temp_transpose_order)
dims = []
for ax in NP.arange(1,3):
if gains.shape[ax] > 1:
dims += [temp_axes_order[ax]]
dims = NP.asarray(dims)
interpf = []
for labelind in xrange(gains.shape[0]):
if dims.size == 1:
if smoothness is None:
smoothness = self.gaintable[gainkey][dims[0]].size
interpf_real = interpolate.UnivariateSpline(self.gaintable[gainkey][dims[0]], gains[labelind,:,:].real.ravel(), s=smoothness, ext='raise')
interpf_imag = interpolate.UnivariateSpline(self.gaintable[gainkey][dims[0]], gains[labelind,:,:].imag.ravel(), s=smoothness, ext='raise')
else:
if smoothness is None:
smoothness = gains.shape[1]*gains.shape[2]
interpf_real = interpolate.RectBivariateSpline(self.gaintable[gainkey]['time'], self.gaintable[gainkey]['frequency'], gains[labelind,:,:].real.T, bbox=[self.gaintable[gainkey]['time'].min(), self.gaintable[gainkey]['time'].max(), self.gaintable[gainkey]['frequency'].min(), self.gaintable[gainkey]['frequency'].max()], s=smoothness)
interpf_imag = interpolate.RectBivariateSpline(self.gaintable[gainkey]['time'], self.gaintable[gainkey]['frequency'], gains[labelind,:,:].imag.T, bbox=[self.gaintable[gainkey]['time'].min(), self.gaintable[gainkey]['time'].max(), self.gaintable[gainkey]['frequency'].min(), self.gaintable[gainkey]['frequency'].max()], s=smoothness)
interpf += [(copy.copy(interpf_real), copy.copy(interpf_imag))]
self.splinefuncs[gainkey] = {'interp': NP.asarray(interpf, dtype=[('real', NP.object), ('imag', NP.object)]), 'dims': dims}
#############################################################################
def interpolate_gains(self, bl_labels, freqs=None, times=None,
axes_order=None):
"""
------------------------------------------------------------------------
Interpolate at the specified baselines for the given frequencies and
times using attribute interpfuncs. Better alternative is to use
spline_gains()
Inputs:
bl_labels [Numpy structured array tuples] Labels of antennas in the
pair used to produce the baseline vector under fields 'A2'
and 'A1' for second and first antenna respectively. The
baseline vector is obtained by position of antennas under
'A2' minus position of antennas under 'A1'. The array is of
size nbl
freqs [None or numpy array] Array of frequencies at which the
gains are to be interpolated using the attribute
interpfuncs. If set to None (default), all frequencies in
the gaintable are assumed. The specified frequencies must
always lie within the range which was used in creating the
interpolation functions, otherwise an exception will be
raised. The array is of size nchan
times [None or numpy array] Array of times at which the gains
are to be interpolated using the attribute interpfuncs. If
set to None (default), all times in the gaintable are
assumed. The specified times must always lie within the
range which was used in creating the interpolation
functions, otherwise an exception will be raised. The
array is of size nts
axes_order [None or list or numpy array] Axes ordering for extracted
gains. It must contain the three elements 'label',
'frequency', and 'time'. If set to None, it will be
returned in the same order as in the input gaintable.
Outputs:
[numpy array] Complex gains of shape nbl x nchan x nts for the
specified baselines, frequencies and times.
------------------------------------------------------------------------
"""
try:
bl_labels
except NameError:
raise NameError('Input bl_labels must be specified')
blgains = NP.asarray(1.0).reshape(1,1,1)
if self.gaintable is not None:
a1_labels = bl_labels['A1']
a2_labels = bl_labels['A2']
for key in ['antenna-based', 'baseline-based']:
if self.interpfuncs[key] is not None:
labels = self.gaintable[key]['label']
if freqs is None:
if self.gaintable[key]['frequency'] is not None:
freqs = self.gaintable[key]['frequency']
elif isinstance(freqs, (int,list,NP.ndarray)):
freqs = NP.asarray(freqs).ravel()
else:
raise TypeError('Input freqs must be a scalar, list or numpy array')
if times is None:
if self.gaintable[key]['time'] is not None:
times = self.gaintable[key]['time']
elif isinstance(times, (int,list,NP.ndarray)):
times = NP.asarray(times).ravel()
else:
raise TypeError('Input times must be a scalar, list or numpy array')
if self.gaintable[key]['frequency'] is not None:
ib_freq_index = NP.logical_and(freqs <= NP.amax(self.gaintable[key]['frequency']), freqs >= NP.amin(self.gaintable[key]['frequency']))
oobl_freq_index = freqs < NP.amin(self.gaintable[key]['frequency'])
oobr_freq_index = freqs > NP.amax(self.gaintable[key]['frequency'])
oob_freq_index = NP.logical_not(ib_freq_index)
if NP.any(oob_freq_index):
raise ValueError('One or more of the frequencies outside interpolation range')
else:
if freqs is not None:
ib_freq_index = NP.ones(freqs.size, dtype=NP.bool)
oob_freq_index = NP.zeros(freqs.size, dtype=NP.bool)
oobl_freq_index = NP.zeros(freqs.size, dtype=NP.bool)
oobr_freq_index = NP.zeros(freqs.size, dtype=NP.bool)
else:
ib_freq_index = None
oob_freq_index = None
if self.gaintable[key]['time'] is not None:
ib_time_index = NP.logical_and(times <= NP.amax(self.gaintable[key]['time']), times >= NP.amin(self.gaintable[key]['time']))
oobl_time_index = times < NP.amin(self.gaintable[key]['time'])
oobr_time_index = times > NP.amax(self.gaintable[key]['time'])
oob_time_index = NP.logical_not(ib_time_index)
if NP.any(oob_time_index):
raise ValueError('One or more of the times outside interpolation range')
else:
if times is not None:
ib_time_index = NP.ones(times.size, dtype=NP.bool)
oob_time_index = NP.zeros(times.size, dtype=NP.bool)
oobl_time_index = NP.zeros(times.size, dtype=NP.bool)
oobr_time_index = NP.zeros(times.size, dtype=NP.bool)
else:
ib_time_index = None
oob_time_index = None
if isinstance(self.interpfuncs[key], dict):
if 'dims' not in self.interpfuncs[key]:
raise KeyError('Key "dims" not found in attribute interpfuncs[{0}]'.format(key))
if not isinstance(self.interpfuncs[key]['dims'], NP.ndarray):
raise TypeError('Key "dims" in attribute interpfuncs[{0}] must contain a numpy array'.format(key))
if self.interpfuncs[key]['dims'].size == 1:
if self.interpfuncs[key]['dims'][0] == 'time':
ntimes = ib_time_index.size
if freqs is None:
nchan = 1
else:
nchan = ib_freq_index.size
inp = times[ib_time_index]
else:
nchan = ib_freq_index.size
if times is None:
ntimes = 1
else:
ntimes = ib_time_index.size
inp = freqs[ib_freq_index]
else:
inp_times = times[ib_time_index]
inp_freqs = freqs[ib_freq_index]
ntimes = ib_time_index.size
nchan = ib_freq_index.size
if key == 'antenna-based':
ind1 = NMO.find_list_in_list(labels, a1_labels)
ind2 = NMO.find_list_in_list(labels, a2_labels)
if NP.sum(ind1.mask) > 0:
raise IndexError('Some antenna gains could not be found')
if NP.sum(ind2.mask) > 0:
raise IndexError('Some antenna gains could not be found')
g1_conj = None
g2 = None
for i in xrange(ind1.size):
if self.interpfuncs[key]['dims'].size == 1:
if g1_conj is None:
g1_conj = (self.interpfuncs[key]['interp']['real'][ind1[i]](inp) - 1j * self.interpfuncs[key]['interp']['imag'][ind1[i]](inp)).reshape(1,nchan,ntimes)
g2 = (self.interpfuncs[key]['interp']['real'][ind2[i]](inp) + 1j * self.interpfuncs[key]['interp']['imag'][ind2[i]](inp)).reshape(1,nchan,ntimes)
else:
g1_conj = NP.concatenate((g1_conj, (self.interpfuncs[key]['interp']['real'][ind1[i]](inp) - 1j * self.interpfuncs[key]['interp']['imag'][ind1[i]](inp)).reshape(1,nchan,ntimes)), axis=0)
g2 = NP.concatenate((g2, (self.interpfuncs[key]['interp']['real'][ind2[i]](inp) + 1j * self.interpfuncs[key]['interp']['imag'][ind2[i]](inp)).reshape(1,nchan,ntimes)), axis=0)
else:
if g1_conj is None:
g1_conj = (self.interpfuncs[key]['interp']['real'][ind1[i]](inp_times,inp_freqs) - 1j * self.interpfuncs[key]['interp']['imag'][ind1[i]](inp_times,inp_freqs)).reshape(1,nchan,ntimes)
g2 = (self.interpfuncs[key]['interp']['real'][ind2[i]](inp_times,inp_freqs) + 1j * self.interpfuncs[key]['interp']['imag'][ind2[i]](inp_times,inp_freqs)).reshape(1,nchan,ntimes)
else:
g1_conj = NP.concatenate((g1_conj, (self.interpfuncs[key]['interp']['real'][ind1[i]](inp_times,inp_freqs) - 1j * self.interpfuncs[key]['interp']['imag'][ind1[i]](inp_times,inp_freqs)).reshape(1,nchan,ntimes)), axis=0)
g2 = NP.concatenate((g2, (self.interpfuncs[key]['interp']['real'][ind2[i]](inp_times,inp_freqs) + 1j * self.interpfuncs[key]['interp']['imag'][ind2[i]](inp_times,inp_freqs)).reshape(1,nchan,ntimes)), axis=0)
blgains = blgains * g1_conj * g2 * NP.ones((1,nchan,ntimes), dtype=NP.complex)
else:
g12 = None
for labelind,label in enumerate(bl_labels):
if label in labels:
ind = NP.where(self.gaintable[key]['label'] == label)[0]
if self.interpfuncs[key]['dims'].size == 1:
if g12 is None:
g12 = (self.interpfuncs[key]['interp']['real'][ind[0]](inp) + 1j * self.interpfuncs[key]['interp']['imag'][ind[0]](inp)).reshape(1,nchan,ntimes)
else:
g12 = NP.concatenate((g12, (self.interpfuncs[key]['interp']['real'][ind[0]](inp) + 1j * self.interpfuncs[key]['interp']['imag'][ind[0]](inp)).reshape(1,nchan,ntimes)), axis=0)
else:
if g12 is None:
g12 = (self.interpfuncs[key]['interp']['real'][ind[0]](inp_times,inp_freqs) + 1j * self.interpfuncs[key]['interp']['imag'][ind[0]](inp_times,inp_freqs)).reshape(1,nchan,ntimes)
else:
g12 = NP.concatenate((g12, (self.interpfuncs[key]['interp']['real'][ind[0]](inp_times,inp_freqs) + 1j * self.interpfuncs[key]['interp']['imag'][ind[0]](inp_times,inp_freqs)).reshape(1,nchan,ntimes)), axis=0)
elif NP.asarray([tuple(reversed(label))], dtype=bl_labels.dtype)[0] in labels:
ind = NP.where(labels == NP.asarray([tuple(reversed(label))], dtype=bl_labels.dtype)[0])[0]
if self.interpfuncs[key]['dims'].size == 1:
if g12 is None:
g12 = (self.interpfuncs[key]['interp']['real'][ind[0]](inp) - 1j * self.interpfuncs[key]['interp']['imag'][ind[0]](inp)).reshape(1,nchan,ntimes)
else:
g12 = NP.concatenate((g12, (self.interpfuncs[key]['interp']['real'][ind[0]](inp) - 1j * self.interpfuncs[key]['interp']['imag'][ind[0]](inp)).reshape(1,nchan,ntimes)), axis=0)
else:
if g12 is None:
g12 = (self.interpfuncs[key]['interp']['real'][ind[0]](inp_times,inp_freqs) - 1j * self.interpfuncs[key]['interp']['imag'][ind[0]](inp_times,inp_freqs)).reshape(1,nchan,ntimes)
else:
g12 = NP.concatenate((g12, (self.interpfuncs[key]['interp']['real'][ind[0]](inp_times,inp_freqs) - 1j * self.interpfuncs[key]['interp']['imag'][ind[0]](inp_times,inp_freqs)).reshape(1,nchan,ntimes)), axis=0)
else:
if g12 is None:
g12 = NP.ones((1,nchan,ntimes), dtype=NP.complex)
else:
g12 = NP.concatenate((g12, NP.ones((1,nchan,ntimes), dtype=NP.complex)), axis=0)
blgains = blgains * g12 * NP.ones((1,nchan,ntimes), dtype=NP.complex)
interp_axes_order = ['label', 'frequency', 'time']
if axes_order is None:
axes_order = self.gaintable['antenna-based']['ordering']
elif not isinstance(axes_order, (list, NP.ndarray)):
raise TypeError('axes_order must be a list')
else:
if len(axes_order) != 3:
raise ValueError('axes_order must be a three element list')
for orderkey in ['label', 'frequency', 'time']:
if orderkey not in axes_order:
raise ValueError('axes_order does not contain key "{0}"'.format(orderkey))
transpose_order = NMO.find_list_in_list(interp_axes_order, axes_order)
blgains = NP.transpose(blgains, axes=transpose_order)
return blgains
#############################################################################
def spline_gains(self, bl_labels, freqs=None, times=None, axes_order=None):
"""
------------------------------------------------------------------------
Evaluate spline at the specified baselines for the given frequencies and
times using attribute splinefuncs. Better alternative to
interpolate_gains()
Inputs:
bl_labels [Numpy structured array tuples] Labels of antennas in the
pair used to produce the baseline vector under fields 'A2'
and 'A1' for second and first antenna respectively. The
baseline vector is obtained by position of antennas under
'A2' minus position of antennas under 'A1'. The array is of
size nbl
freqs [None or numpy array] Array of frequencies at which the
gains are to be interpolated using the attribute
splinefuncs. If set to None (default), all frequencies in
the gaintable are assumed. The specified frequencies must
always lie within the range which was used in creating the
interpolation functions, otherwise an exception will be
raised. The array is of size nchan
times [None or numpy array] Array of times at which the gains
are to be interpolated using the attribute splinefuncs. If
set to None (default), all times in the gaintable are
assumed. The specified times must always lie within the
range which was used in creating the interpolation
functions, otherwise an exception will be raised. The array
is of size nts
axes_order [None or list or numpy array] Axes ordering for extracted
gains. It must contain the three elements 'label',
'frequency', and 'time'. If set to None, it will be
returned in the same order as in the input gaintable.
Outputs:
[numpy array] Complex gains of shape nbl x nchan x nts for the specified
baselines, frequencies and times.
---------------------------------------------------------------------------
"""
try:
bl_labels
except NameError:
raise NameError('Input bl_labels must be specified')
blgains = NP.asarray(1.0).reshape(1,1,1)
if self.gaintable is not None:
a1_labels = bl_labels['A1']
a2_labels = bl_labels['A2']
for key in ['antenna-based', 'baseline-based']:
if self.splinefuncs[key] is not None:
labels = self.gaintable[key]['label']
if freqs is None:
if self.gaintable[key]['frequency'] is not None:
freqs = self.gaintable[key]['frequency']
elif isinstance(freqs, (int,list,NP.ndarray)):
freqs = NP.asarray(freqs).ravel()
else:
raise TypeError('Input freqs must be a scalar, list or numpy array')
if times is None:
if self.gaintable[key]['time'] is not None:
times = self.gaintable[key]['time']
elif isinstance(times, (int,list,NP.ndarray)):
times = NP.asarray(times).ravel()
else:
raise TypeError('Input times must be a scalar, list or numpy array')
if self.gaintable[key]['frequency'] is not None:
ib_freq_index = NP.logical_and(freqs <= NP.amax(self.gaintable[key]['frequency']), freqs >= NP.amin(self.gaintable[key]['frequency']))
oobl_freq_index = freqs < NP.amin(self.gaintable[key]['frequency'])
oobr_freq_index = freqs > NP.amax(self.gaintable[key]['frequency'])
oob_freq_index = NP.logical_not(ib_freq_index)
if NP.any(oob_freq_index):
raise IndexError('One or more of the frequencies outside interpolation range')
else:
if freqs is not None:
ib_freq_index = NP.ones(freqs.size, dtype=NP.bool)
oob_freq_index = NP.zeros(freqs.size, dtype=NP.bool)
oobl_freq_index = NP.zeros(freqs.size, dtype=NP.bool)
oobr_freq_index = NP.zeros(freqs.size, dtype=NP.bool)
else:
ib_freq_index = None
oob_freq_index = None
if self.gaintable[key]['time'] is not None:
ib_time_index = NP.logical_and(times <= NP.amax(self.gaintable[key]['time']), times >= NP.amin(self.gaintable[key]['time']))
oobl_time_index = times < NP.amin(self.gaintable[key]['time'])
oobr_time_index = times > NP.amax(self.gaintable[key]['time'])
oob_time_index = NP.logical_not(ib_time_index)
if NP.any(oob_time_index):
raise IndexError('One or more of the times outside interpolation range')
else:
if times is not None:
ib_time_index = NP.ones(times.size, dtype=NP.bool)
oob_time_index = NP.zeros(times.size, dtype=NP.bool)
oobl_time_index = NP.zeros(times.size, dtype=NP.bool)
oobr_time_index = NP.zeros(times.size, dtype=NP.bool)
else:
ib_time_index = None
oob_time_index = None
if isinstance(self.splinefuncs[key], dict):
if 'dims' not in self.splinefuncs[key]:
raise KeyError('Key "dims" not found in attribute splinefuncs[{0}]'.format(key))
if not isinstance(self.splinefuncs[key]['dims'], NP.ndarray):
raise TypeError('Key "dims" in attribute splinefuncs[{0}] must contain a numpy array'.format(key))
if self.splinefuncs[key]['dims'].size == 1:
if self.splinefuncs[key]['dims'][0] == 'time':
ntimes = ib_time_index.size
if freqs is None:
nchan = 1
else:
nchan = ib_freq_index.size
inp = times[ib_time_index]
else:
nchan = ib_freq_index.size
if times is None:
ntimes = 1
else:
ntimes = ib_time_index.size
inp = freqs[ib_freq_index]
else:
inp_times = times[ib_time_index]
inp_freqs = freqs[ib_freq_index]
ntimes = ib_time_index.size
nchan = ib_freq_index.size
tgrid, fgrid = NP.meshgrid(inp_times, inp_freqs)
tvec = tgrid.ravel()
fvec = fgrid.ravel()
if key == 'antenna-based':
ind1 = NMO.find_list_in_list(labels, a1_labels)
ind2 = NMO.find_list_in_list(labels, a2_labels)
if NP.sum(ind1.mask) > 0:
raise IndexError('Some antenna gains could not be found')
if NP.sum(ind2.mask) > 0:
raise IndexError('Some antenna gains could not be found')
g1_conj = None
g2 = None
for i in xrange(ind1.size):
if self.splinefuncs[key]['dims'].size == 1:
if g1_conj is None:
g1_conj = (self.splinefuncs[key]['interp']['real'][ind1[i]](inp) - 1j * self.splinefuncs[key]['interp']['imag'][ind1[i]](inp)).reshape(1,nchan,ntimes)
g2 = (self.splinefuncs[key]['interp']['real'][ind2[i]](inp) + 1j * self.splinefuncs[key]['interp']['imag'][ind2[i]](inp)).reshape(1,nchan,ntimes)
else:
g1_conj = NP.concatenate((g1_conj, (self.splinefuncs[key]['interp']['real'][ind1[i]](inp) - 1j * self.splinefuncs[key]['interp']['imag'][ind1[i]](inp)).reshape(1,nchan,ntimes)), axis=0)
g2 = NP.concatenate((g2, (self.splinefuncs[key]['interp']['real'][ind2[i]](inp) + 1j * self.splinefuncs[key]['interp']['imag'][ind2[i]](inp)).reshape(1,nchan,ntimes)), axis=0)
else:
if g1_conj is None:
g1_conj = (self.splinefuncs[key]['interp']['real'][ind1[i]].ev(tvec,fvec) - 1j * self.splinefuncs[key]['interp']['imag'][ind1[i]].ev(tvec,fvec)).reshape(1,nchan,ntimes)
g2 = (self.splinefuncs[key]['interp']['real'][ind2[i]].ev(tvec,fvec) + 1j * self.splinefuncs[key]['interp']['imag'][ind2[i]].ev(tvec,fvec)).reshape(1,nchan,ntimes)
else:
g1_conj = NP.concatenate((g1_conj, (self.splinefuncs[key]['interp']['real'][ind1[i]].ev(tvec,fvec) - 1j * self.splinefuncs[key]['interp']['imag'][ind1[i]].ev(tvec,fvec)).reshape(1,nchan,ntimes)), axis=0)
g2 = NP.concatenate((g2, (self.splinefuncs[key]['interp']['real'][ind2[i]].ev(tvec,fvec) + 1j * self.splinefuncs[key]['interp']['imag'][ind2[i]].ev(tvec,fvec)).reshape(1,nchan,ntimes)), axis=0)
blgains = blgains * g1_conj * g2 * NP.ones((1,nchan,ntimes), dtype=NP.complex)
else:
g12 = None
for labelind,label in enumerate(bl_labels):
if label in labels:
ind = NP.where(self.gaintable[key]['label'] == label)[0]
if self.splinefuncs[key]['dims'].size == 1:
if g12 is None:
g12 = (self.splinefuncs[key]['interp']['real'][ind[0]](inp) + 1j * self.splinefuncs[key]['interp']['imag'][ind[0]](inp)).reshape(1,nchan,ntimes)
else:
g12 = NP.concatenate((g12, (self.splinefuncs[key]['interp']['real'][ind[0]](inp) + 1j * self.splinefuncs[key]['interp']['imag'][ind[0]](inp)).reshape(1,nchan,ntimes)), axis=0)
else:
if g12 is None:
g12 = (self.splinefuncs[key]['interp']['real'][ind[0]].ev(tvec,fvec) + 1j * self.splinefuncs[key]['interp']['imag'][ind[0]].ev(tvec,fvec)).reshape(1,nchan,ntimes)
else:
g12 = NP.concatenate((g12, (self.splinefuncs[key]['interp']['real'][ind[0]].ev(tvec,fvec) + 1j * self.splinefuncs[key]['interp']['imag'][ind[0]].ev(tvec,fvec)).reshape(1,nchan,ntimes)), axis=0)
elif NP.asarray([tuple(reversed(label))], dtype=bl_labels.dtype)[0] in labels:
ind = NP.where(labels == NP.asarray([tuple(reversed(label))], dtype=bl_labels.dtype)[0])[0]
if self.splinefuncs[key]['dims'].size == 1:
if g12 is None:
g12 = (self.splinefuncs[key]['interp']['real'][ind[0]](inp) - 1j * self.splinefuncs[key]['interp']['imag'][ind[0]](inp)).reshape(1,nchan,ntimes)
else:
g12 = NP.concatenate((g12, (self.splinefuncs[key]['interp']['real'][ind[0]](inp) - 1j * self.splinefuncs[key]['interp']['imag'][ind[0]](inp)).reshape(1,nchan,ntimes)), axis=0)
else:
if g12 is None:
g12 = (self.splinefuncs[key]['interp']['real'][ind[0]].ev(tvec,fvec) - 1j * self.splinefuncs[key]['interp']['imag'][ind[0]].ev(tvec,fvec)).reshape(1,nchan,ntimes)
else:
g12 = NP.concatenate((g12, (self.splinefuncs[key]['interp']['real'][ind[0]].ev(tvec,fvec) - 1j * self.splinefuncs[key]['interp']['imag'][ind[0]].ev(tvec,fvec)).reshape(1,nchan,ntimes)), axis=0)
else:
if g12 is None:
g12 = NP.ones((1,nchan,ntimes), dtype=NP.complex)
else:
g12 = NP.concatenate((g12, NP.ones((1,nchan,ntimes), dtype=NP.complex)), axis=0)
blgains = blgains * g12 * NP.ones((1,nchan,ntimes), dtype=NP.complex)
interp_axes_order = ['label', 'frequency', 'time']
if axes_order is None:
axes_order = self.gaintable['antenna-based']['ordering']
elif not isinstance(axes_order, (list, NP.ndarray)):
raise TypeError('axes_order must be a list')
else:
if len(axes_order) != 3:
raise ValueError('axes_order must be a three element list')
for orderkey in ['label', 'frequency', 'time']:
if orderkey not in axes_order:
raise ValueError('axes_order does not contain key "{0}"'.format(orderkey))
transpose_order = NMO.find_list_in_list(interp_axes_order, axes_order)
blgains = NP.transpose(blgains, axes=transpose_order)
return blgains
#############################################################################
def nearest_gains(self, bl_labels, freqs=None, times=None, axes_order=None):
"""
------------------------------------------------------------------------
Extract complex instrument gains for given baselines from the gain table
determined by nearest neighbor logic
Inputs:
bl_labels [Numpy structured array tuples] Labels of antennas in the
pair used to produce the baseline vector under fields 'A2'
and 'A1' for second and first antenna respectively. The
baseline vector is obtained by position of antennas under
'A2' minus position of antennas under 'A1'
freqs [None or numpy array] Array of frequencies at which the
gains are to be interpolated using the attribute
splinefuncs. If set to None (default), all frequencies in
the gaintable are assumed. The specified frequencies must
always lie within the range which was used in creating the
interpolation functions, otherwise an exception will be
raised. The array is of size nchan
times [None or numpy array] Array of times at which the gains
are to be interpolated using the attribute splinefuncs. If
set to None (default), all times in the gaintable are
assumed. The specified times must always lie within the
range which was used in creating the interpolation
functions, otherwise an exception will be raised. The array
is of size nts
axes_order [None or list or numpy array] Axes ordering for extracted
gains. It must contain the three elements 'label',
'frequency', and 'time'. If set to None, it will be
returned in the same order as in the input gaintable.
Outputs:
[numpy array] Complex gains of shape nbl x nchan x nts for the specified
baselines, frequencies and times.
------------------------------------------------------------------------
"""
try:
bl_labels
except NameError:
raise NameError('Input bl_labels must be specified')
blgains = NP.asarray(1.0).reshape(1,1,1)
if self.gaintable is not None:
a1_labels = bl_labels['A1']
a2_labels = bl_labels['A2']
for gainkey in ['antenna-based', 'baseline-based']:
if gainkey in self.gaintable:
temp_axes_order = ['label', 'frequency', 'time']
inp_order = self.gaintable[gainkey]['ordering']
temp_transpose_order = NMO.find_list_in_list(inp_order, temp_axes_order)
if NP.all(inp_order == temp_axes_order):
gains = NP.copy(self.gaintable[gainkey]['gains'])
else:
gains = NP.transpose(NP.copy(self.gaintable[gainkey]['gains']), axes=temp_transpose_order)
freqs_to_search = copy.copy(freqs)
if freqs_to_search is None:
freqs_to_search = copy.copy(self.gaintable[gainkey]['frequency'])
if freqs_to_search is not None:
if self.gaintable[gainkey]['frequency'] is not None:
inpind, refind_freqs, distNN= LKP.find_1NN(self.gaintable[gainkey]['frequency'].reshape(-1,1), freqs_to_search.reshape(-1,1), remove_oob=True)
else:
refind_freqs = None
if refind_freqs is None:
refind_freqs = NP.arange(gains.shape[1])
times_to_search = copy.copy(times)
if times_to_search is None:
times_to_search = copy.copy(self.gaintable[gainkey]['time'])
if times_to_search is not None:
if self.gaintable[gainkey]['time'] is not None:
inpind, refind_times, distNN = LKP.find_1NN(self.gaintable[gainkey]['time'].reshape(-1,1), times_to_search.reshape(-1,1), remove_oob=True)
else:
refind_times = None
if refind_times is None:
refind_times = NP.arange(gains.shape[2])
if gains.shape[0] == 1:
blgains = blgains * gains[:,refind_freqs,refind_times].reshape(1,refind_freqs.size,refind_times.size)
else:
labels = self.gaintable[gainkey]['label']
if gainkey == 'antenna-based':
ind1 = NMO.find_list_in_list(labels, a1_labels)
ind2 = NMO.find_list_in_list(labels, a2_labels)
if NP.sum(ind1.mask) > 0:
raise IndexError('Some antenna gains could not be found')
if NP.sum(ind2.mask) > 0:
raise IndexError('Some antenna gains could not be found')
blgains = blgains * gains[NP.ix_(ind2,refind_freqs,refind_times)].reshape(ind2.size,refind_freqs.size,refind_times.size) * gains[NP.ix_(ind1,refind_freqs,refind_times)].conj().reshape(ind1.size,refind_freqs.size,refind_times.size)
else:
labels_conj = [tuple(reversed(label)) for label in labels]
labels_conj = NP.asarray(labels_conj, dtype=labels.dtype)
labels_conj_appended = NP.concatenate((labels, labels_conj), axis=0)
gains_conj_appended = NP.concatenate((gains, gains.conj()), axis=0)
ind = NMO.find_list_in_list(labels_conj_appended, bl_labels)
selected_gains = gains_conj_appended[NP.ix_(ind.compressed(),refind_freqs,refind_times)]
if ind.compressed().size == 1:
selected_gains = selected_gains.reshape(NP.sum(~ind.mask),refind_freqs.size,refind_times.size)
blgains[~ind.mask, ...] = blgains[~ind.mask, ...] * selected_gains
if axes_order is None:
axes_order = inp_order
elif not isinstance(axes_order, (list, NP.ndarray)):
raise TypeError('axes_order must be a list')
else:
if len(axes_order) != 3:
raise ValueError('axes_order must be a three element list')
for orderkey in ['label', 'frequency', 'time']:
if orderkey not in axes_order:
raise ValueError('axes_order does not contain key "{0}"'.format(orderkey))
transpose_order = NMO.find_list_in_list(inp_order, axes_order)
blgains = NP.transpose(blgains, axes=transpose_order)
return blgains
#############################################################################
def eval_gains(self, bl_labels, freq_index=None, time_index=None,
axes_order=None):
"""
------------------------------------------------------------------------
Extract complex instrument gains for given baselines from the gain table
Inputs:
bl_labels [Numpy structured array tuples] Labels of antennas in the
pair used to produce the baseline vector under fields 'A2'
and 'A1' for second and first antenna respectively. The
baseline vector is obtained by position of antennas under
'A2' minus position of antennas under 'A1'
freq_index [None, int, list or numpy array] Index (scalar) or indices
(list or numpy array) along the frequency axis at which
gains are to be extracted. If set to None, gains at all
frequencies in the gain table will be extracted.
time_index [None, int, list or numpy array] Index (scalar) or indices
(list or numpy array) along the time axis at which gains
are to be extracted. If set to None, gains at all timesin
the gain table will be extracted.
axes_order [None or list or numpy array] Axes ordering for extracted
gains. It must contain the three elements 'label',
'frequency', and 'time'. If set to None, it will be
returned in the same order as in the input gaintable.
Outputs:
[numpy array] Complex gains of shape nbl x nchan x nts for the specified
baselines, frequencies and times.
------------------------------------------------------------------------
"""
return extract_gains(self.gaintable, bl_labels, freq_index=None,
time_index=None, axes_order=None)
#############################################################################
def write_gaintable(self, outfile, axes_order=None, compress=True,
compress_fmt='gzip', compress_opts=9):
"""
------------------------------------------------------------------------
Write gain table with specified axes ordering to external file in HDF5
format
Inputs:
outfile [string] Filename including full path into which the gain
table will be written
axes_order [None or list or numpy array] The axes ordering of gain
table that will be written to external file specified in
outfile. If set to None, it will store in the same order
as in the attribute gaintable
compress [boolean] Specifies if the gain table is written in
compressed format. The compression format and compression
parameters are specified in compress_fmt and compress_opts
respectively
compress_fmt
[string] Accepted values are 'gzip' (default) or 'lzf'. See
h5py module documentation for comparison of these
compression formats
compress_opts
[integer] Applies only if compress_fmt is set to 'gzip'. It
must be an integer in the range 0 to 9. Default=9 implies
maximum compression
------------------------------------------------------------------------
"""
try:
outfile
except NameError:
raise NameError('outfile not specified')
if axes_order is not None:
if not isinstance(axes_order, (list, NP.ndarray)):
raise TypeError('axes_order must be a list')
else:
if len(axes_order) != 3:
raise ValueError('axes_order must be a three element list')
for orderkey in ['label', 'frequency', 'time']:
if orderkey not in axes_order:
raise ValueError('axes_order does not contain key "{0}"'.format(orderkey))
if not isinstance(compress, bool):
raise TypeError('Input parameter compress must be boolean')
if compress:
if not isinstance(compress_fmt, str):
raise TypeError('Input parameter compress_fmt must be a string')
compress_fmt = compress_fmt.lower()
if compress_fmt not in ['gzip', 'lzf']:
raise ValueError('Input parameter compress_fmt invalid')
if compress_fmt == 'gzip':
if not isinstance(compress_opts, int):
raise TypeError('Input parameter compress_opts must be an integer')
compress_opts = NP.clip(compress_opts, 0, 9)
with h5py.File(outfile, 'w') as fileobj:
for gainkey in self.gaintable:
if self.gaintable[gainkey] is not None:
if axes_order is not None:
transpose_order = NMO.find_list_in_list(self.gaintable[gainkey]['ordering'], axes_order)
else:
axes_order = self.gaintable[gainkey]['ordering']
if NP.all(self.gaintable[gainkey]['ordering'] == axes_order):
gains = NP.copy(self.gaintable[gainkey]['gains'])
else:
gains = NP.transpose(NP.copy(self.gaintable[gainkey]['gains']), axes=transpose_order)
grp = fileobj.create_group(gainkey)
for subkey in self.gaintable[gainkey]:
if subkey == 'gains':
if compress:
chunkshape = []
for ind,axis in enumerate(axes_order):
if axis == 'frequency':
chunkshape += [gains.shape[ind]]
else:
chunkshape += [1]
chunkshape = tuple(chunkshape)
if compress_fmt == 'gzip':
dset = grp.create_dataset(subkey, data=gains, chunks=chunkshape, compression=compress_fmt, compression_opts=compress_opts)
else:
dset = grp.create_dataset(subkey, data=gains, chunks=chunkshape, compression=compress_fmt)
else:
grp.create_dataset(subkey, data=gains, chunks=chunkshape)
elif subkey == 'ordering':
dset = grp.create_dataset(subkey, data=axes_order)
else:
if isinstance(self.gaintable[gainkey][subkey], NP.ndarray):
dset = grp.create_dataset(subkey, data=self.gaintable[gainkey][subkey])
#################################################################################
class ROI_parameters(object):
"""
----------------------------------------------------------------------------
Class to manage information on the regions of interest for different
snapshots in an observation.
Attributes:
skymodel [instance of class SkyModel] The common sky model for all the
observing instances from which the ROI is determined based on
a subset corresponding to each snapshot observation.
freq [numpy vector] Frequency channels (with units specified by the
attribute freq_scale)
freq_scale [string] string specifying the units of frequency. Accepted
values are 'GHz', 'MHz' and 'Hz'. Default = 'GHz'
telescope [dictionary] Contains information about the telescope parameters
using which the primary beams in the regions of interest are
determined. It specifies the type of element, element size and
orientation. It consists of the following keys and information:
'id' [string] If set, will ignore the other keys and use
telescope details for known telescopes. Accepted
values are 'mwa', 'vla', 'gmrt', 'ugmrt', 'hera',
'paper', 'hirax', 'chime' and 'mwa_tools'. If using
'mwa_tools', the MWA_Tools and mwapb modules must
be installed and imported.
'shape' [string] Shape of antenna element. Accepted values
are 'dipole', 'delta', and 'dish'. Will be ignored
if key 'id' is set. 'delta' denotes a delta
function for the antenna element which has an
isotropic radiation pattern. 'delta' is the default
when keys 'id' and 'shape' are not set.
'size' [scalar] Diameter of the telescope dish (in meters)
if the key 'shape' is set to 'dish' or length of
the dipole if key 'shape' is set to 'dipole'. Will
be ignored if key 'shape' is set to 'delta'. Will
be ignored if key 'id' is set and a preset value
used for the diameter or dipole.
'orientation' [list or numpy array] If key 'shape' is set to
dipole, it refers to the orientation of the dipole
element unit vector whose magnitude is specified by
length. If key 'shape' is set to 'dish', it refers
to the position on the sky to which the dish is
pointed. For a dipole, this unit vector must be
provided in the local ENU coordinate system aligned
with the direction cosines coordinate system or in
the Alt-Az coordinate system. This will be
used only when key 'shape' is set to 'dipole'.
This could be a 2-element vector (transverse
direction cosines) where the third (line-of-sight)
component is determined, or a 3-element vector
specifying all three direction cosines or a two-
element coordinate in Alt-Az system. If not provided
it defaults to an eastward pointing dipole. If key
'shape' is set to 'dish', the orientation refers
to the pointing center of the dish on the sky. It
can be provided in Alt-Az system as a two-element
vector or in the direction cosine coordinate
system as a two- or three-element vector. If not
set in the case of a dish element, it defaults to
zenith. This is not to be confused with the key
'pointing_center' in dictionary 'pointing_info'
which refers to the beamformed pointing center of
the array. The coordinate system is specified by
the key 'ocoords'
'ocoords' [scalar string] specifies the coordinate system
for key 'orientation'. Accepted values are 'altaz'
and 'dircos'.
'element_locs'
[2- or 3-column array] Element locations that
constitute the tile. Each row specifies
location of one element in the tile. The
locations must be specified in local ENU
coordinate system. First column specifies along
local east, second along local north and the
third along local up. If only two columns are
specified, the third column is assumed to be
zeros. If 'elements_locs' is not provided, it
assumed to be a one-element system and not a
phased array as far as determination of primary
beam is concerned.
'groundplane' [scalar] height of telescope element above the
ground plane (in meteres). Default = None will
denote no ground plane effects.
'ground_modify'
[dictionary] contains specifications to modify
the analytically computed ground plane pattern. If
absent, the ground plane computed will not be
modified. If set, it may contain the following
keys:
'scale' [scalar] positive value to scale the
modifying factor with. If not set, the
scale factor to the modification is unity.
'max' [scalar] positive value to clip the
modified and scaled values to. If not set,
there is no upper limit
'latitude' [scalar] specifies latitude of the telescope site
(in degrees). Default = None (advisable to specify
a real value)
'longitude' [scalar] specifies latitude of the telescope site
(in degrees). Default = 0 (GMT)
'altitude' [scalar] Specifies altitude of the telescope site
(in m) above the surface of the Earth. Default=0m
'pol' [string] specifies polarization when using
MWA_Tools for primary beam computation. Value of
key 'id' in attribute dictionary telescope must be
set to 'mwa_tools'. 'X' or 'x' denotes
X-polarization. Y-polarization is specified by 'Y'
or 'y'. If polarization is not specified when 'id'
of telescope is set to 'mwa_tools', it defaults
to X-polarization.
info [dictionary] contains information about the region of interest.
It consists of the following keys and information:
'radius' [list of scalars] list of angular radii (in degrees),
one entry for each snapshot observation which defines
the region of interest.
'center' [list of numpy vectors] list of centers of regions of
interest. For each snapshot, there is one element in
the list each of which is a center of corresponding
region of interest. Each numpy vector could be made of
two elements (Alt-Az) or three elements (direction
cosines).
'ind' [list of numpy vectors] list of vectors of indices
that define the region of interest as a subset of the
sky model. Each element of the list is a numpy vector
of indices indexing into the sky model corresponding
to each snapshot.
'pbeam' [list of numpy arrays] list of array of primary beam
values in the region of interest. The size of each
element in the list corresponding to each snapshot is
n_roi x nchan where n_roi is the number of pixels in
region of interest.
pinfo [list of dictionaries] Each dictionary element in the list
corresponds to a specific snapshot. It contains information
relating to the pointing center. The pointing center can be
specified either via element delay compensation or by directly
specifying the pointing center in a certain coordinate system.
Default = None (pointing centered at zenith). Each dictionary
element may consist of the following keys and information:
'gains' [numpy array] Complex element gains. Must be of
size equal to the number of elements as
specified by the number of rows in
'element_locs'. If set to None (default), all
element gains are assumed to be unity.
'delays' [numpy array] Delays (in seconds) to be applied
to the tile elements. Size should be equal to
number of tile elements (number of rows in
antpos). Default = None will set all element
delays to zero phasing them to zenith.
'pointing_center' [numpy array] This will apply in the absence of
key 'delays'. This can be specified as a row
vector. Should have two-columns if using Alt-Az
coordinates, or two or three columns if using
direction cosines. There is no default. The
coordinate system must be specified in
'pointing_coords' if 'pointing_center' is to be
used.
'pointing_coords' [string scalar] Coordinate system in which the
pointing_center is specified. Accepted values
are 'altaz' or 'dircos'. Must be provided if
'pointing_center' is to be used. No default.
'delayerr' [int, float] RMS jitter in delays used in the
beamformer. Random jitters are drawn from a
normal distribution with this rms. Must be
a non-negative scalar. If not provided, it
defaults to 0 (no jitter).
Member functions:
__init__() Initializes an instance of class ROI_parameters using default
values or using a specified initialization file
append_settings()
Determines and appends ROI (regions of interest) parameter
information for each snapshot observation using the input
parameters provided. Optionally also computes the primary beam
values in the region of interest using the telescope parameters.
save() Saves the information about the regions of interest to a FITS
file on disk
-----------------------------------------------------------------------------
"""
def __init__(self, init_file=None):
"""
-------------------------------------------------------------------------
Initializes an instance of class ROI_parameters using default values or
using a specified initialization file
Class attribute initialized are:
skymodel, freq, freq_scale, telescope, info, and pinfo
Read docstring of class ROI_parameters for details on these attributes.
Keyword input(s):
init_file [string] Location of the initialization file from which an
instance of class ROI_parameters will be created. File
format must be compatible with the one saved to disk by
member function save()
-------------------------------------------------------------------------
"""
argument_init = False
init_file_success = False
if init_file is not None:
try:
hdulist = fits.open(init_file)
except IOError:
argument_init = True
warnings.warn('\tinit_file provided but could not open the initialization file. Attempting to initialize with input parameters...')
if not argument_init:
n_obs = hdulist[0].header['n_obs']
extnames = [hdulist[i].header['EXTNAME'] for i in xrange(1,len(hdulist))]
self.info = {}
self.info['radius'] = []
self.info['center'] = []
self.info['ind'] = []
self.info['pbeam'] = []
self.telescope = {}
if 'id' in hdulist[0].header:
self.telescope['id'] = hdulist[0].header['telescope']
if 'latitude' in hdulist[0].header:
self.telescope['latitude'] = hdulist[0].header['latitude']
else:
self.telescope['latitude'] = None
if 'longitude' in hdulist[0].header:
self.telescope['longitude'] = hdulist[0].header['longitude']
else:
self.telescope['longitude'] = 0.0
if 'altitude' in hdulist[0].header:
self.telescope['altitude'] = hdulist[0].header['altitude']
else:
self.telescope['altitude'] = 0.0
try:
self.telescope['shape'] = hdulist[0].header['element_shape']
except KeyError:
raise KeyError('Antenna element shape not found in the init_file header')
try:
self.telescope['size'] = hdulist[0].header['element_size']
except KeyError:
raise KeyError('Antenna element size not found in the init_file header')
try:
self.telescope['ocoords'] = hdulist[0].header['element_ocoords']
except KeyError:
raise KeyError('Antenna element orientation coordinate system not found in the init_file header')
if 'ANTENNA ELEMENT ORIENTATION' in extnames:
self.telescope['orientation'] = hdulist['ANTENNA ELEMENT ORIENTATION'].data.reshape(1,-1)
else:
raise KeyError('Extension named "orientation" not found in init_file.')
if 'ANTENNA ELEMENT LOCATIONS' in extnames:
self.telescope['element_locs'] = hdulist['ANTENNA ELEMENT LOCATIONS'].data
if 'ground_plane' in hdulist[0].header:
self.telescope['groundplane'] = hdulist[0].header['ground_plane']
if 'ground_modify_scale' in hdulist[0].header:
if 'ground_modify' not in self.telescope:
self.telescope['ground_modify'] = {}
self.telescope['ground_modify']['scale'] = hdulist[0].header['ground_modify_scale']
if 'ground_modify_max' in hdulist[0].header:
if 'ground_modify' not in self.telescope:
self.telescope['ground_modify'] = {}
self.telescope['ground_modify']['max'] = hdulist[0].header['ground_modify_max']
else:
self.telescope['groundplane'] = None
if 'FREQ' in extnames:
self.freq = hdulist['FREQ'].data
else:
raise KeyError('Extension named "FREQ" not found in init_file.')
self.info['ind'] = [hdulist['IND_{0:0d}'.format(i)].data for i in range(n_obs)]
self.info['pbeam'] = [hdulist['PB_{0:0d}'.format(i)].data for i in range(n_obs)]
self.pinfo = []
if 'ANTENNA ELEMENT LOCATIONS' in extnames:
for i in range(n_obs):
self.pinfo += [{}]
# try:
# self.pinfo[-1]['delays'] = hdulist['DELAYS_{0:0d}'.format(i)].data
# except KeyError:
# raise KeyError('Extension DELAYS_{0:0d} for phased array beamforming not found in init_file'.format(i))
if 'DELAYS_{0:0d}'.format(i) in extnames:
self.pinfo[-1]['delays'] = hdulist['DELAYS_{0:0d}'.format(i)].data
if 'DELAYERR' in hdulist['DELAYS_{0:0d}'.format(i)].header:
delayerr = hdulist['DELAYS_{0:0d}'.format(i)].header['delayerr']
if delayerr <= 0.0:
self.pinfo[-1]['delayerr'] = None
else:
self.pinfo[-1]['delayerr'] = delayerr
len_pinfo = len(self.pinfo)
if len_pinfo > 0:
if len_pinfo != n_obs:
raise ValueError('Inconsistency in number of pointings in header and number of phased array delay settings')
for i in range(n_obs):
if 'POINTING_CENTER_{0:0d}'.format(i) in extnames:
if len_pinfo == 0:
self.pinfo += [{}]
self.pinfo[i]['pointing_center'] = hdulist['POINTING_CENTER_{0:0d}'.format(i)].data
try:
self.pinfo[i]['pointing_coords'] = hdulist['POINTING_CENTER_{0:0d}'.format(i)].header['pointing_coords']
except KeyError:
raise KeyError('Header of extension POINTING_CENTER_{0:0d} not found to contain key "pointing_coords" in init_file'.format(i))
len_pinfo = len(self.pinfo)
if len_pinfo > 0:
if len_pinfo != n_obs:
raise ValueError('Inconsistency in number of pointings in header and number of pointing centers')
hdulist.close()
init_file_success = True
return
else:
argument_init = True
if (not argument_init) and (not init_file_success):
raise ValueError('Initialization failed with the use of init_file.')
self.skymodel = None
self.telescope = None
self.info = {}
self.info['radius'] = []
self.info['ind'] = []
self.info['pbeam'] = []
self.info['center'] = []
self.info['center_coords'] = None
self.pinfo = []
self.freq = None
#############################################################################
def append_settings(self, skymodel, freq, pinfo=None, lst=None,
time_jd=None, roi_info=None, telescope=None,
freq_scale='GHz'):
"""
------------------------------------------------------------------------
Determines and appends ROI (regions of interest) parameter information
for each snapshot observation using the input parameters provided.
Optionally also computes the primary beam values in the region of
interest using the telescope parameters.
Inputs:
skymodel
[instance of class SkyModel] The common sky model for all the
observing instances from which the ROI is determined based on
a subset corresponding to each snapshot observation. If set
to None, the corresponding entries are all set to empty values
freq [numpy vector] Frequency channels (with units specified by the
attribute freq_scale)
pinfo [list of dictionaries] Each dictionary element in the list
corresponds to a specific snapshot. It contains information
relating to the pointing center. The pointing center can be
specified either via element delay compensation or by directly
specifying the pointing center in a certain coordinate system.
Default = None (pointing centered at zenith). Each dictionary
element may consist of the following keys and information:
'gains' [numpy array] Complex element gains. Must be
of size equal to the number of elements as
specified by the number of rows in
'element_locs'. If set to None (default), all
element gains are assumed to be unity.
'delays' [numpy array] Delays (in seconds) to be
applied to the tile elements. Size should be
equal to number of tile elements (number of
rows in antpos). Default = None will set all
element delays to zero phasing them to zenith
'pointing_center' [numpy array] This will apply in the absence
of key 'delays'. This can be specified as a
row vector. Should have two-columns if using
Alt-Az coordinates, or two or three columns
if using direction cosines. There is no
default. The coordinate system must be
specified in 'pointing_coords' if
'pointing_center' is to be used.
'pointing_coords' [string scalar] Coordinate system in which
the pointing_center is specified. Accepted
values are 'altaz' or 'dircos'. Must be
provided if 'pointing_center' is to be used.
No default.
'delayerr' [int, float] RMS jitter in delays used in
the beamformer. Random jitters are drawn
from a normal distribution with this rms.
Must be a non-negative scalar. If not
provided, it defaults to 0 (no jitter).
lst [scalar] LST in degrees. Will be used in determination of sky
coordinates inside ROI if not provided. Default=None.
time_jd [scalar] Time of the snapshot in JD. Will be used in
determination of sky coordinates inside ROI if not provided.
Default=None.
telescope
[dictionary] Contains information about the telescope parameters
using which the primary beams in the regions of interest are
determined. It specifies the type of element, element size and
orientation. It consists of the following keys and information:
'id' [string] If set, will ignore the other keys and
use telescope details for known telescopes.
Accepted values are 'mwa', 'vla', 'gmrt', 'ugmrt',
'hera', 'paper', 'hirax', 'chime' and 'mwa_tools'. If
using 'mwa_tools', the MWA_Tools and mwapb modules
must be installed and imported.
'shape' [string] Shape of antenna element. Accepted values
are 'dipole', 'delta', and 'dish'. Will be ignored
if key 'id' is set. 'delta' denotes a delta
function for the antenna element which has an
isotropic radiation pattern. 'delta' is the
default when keys 'id' and 'shape' are not set.
'size' [scalar] Diameter of the telescope dish (in
meters) if the key 'shape' is set to 'dish' or
length of the dipole if key 'shape' is set to
'dipole'. Will be ignored if key 'shape' is set to
'delta'. Will be ignored if key 'id' is set and a
preset value used for the diameter or dipole.
'orientation' [list or numpy array] If key 'shape' is set to
dipole, it refers to the orientation of the dipole
element unit vector whose magnitude is specified
by length. If key 'shape' is set to 'dish', it
refers to the position on the sky to which the
dish is pointed. For a dipole, this unit vector
must be provided in the local ENU coordinate
system aligned with the direction cosines
coordinate system or in the Alt-Az coordinate
system. This will be used only when key 'shape'
is set to 'dipole'. This could be a 2-element
vector (transverse direction cosines) where the
third (line-of-sight) component is determined,
or a 3-element vector specifying all three
direction cosines or a two-element coordinate in
Alt-Az system. If not provided it defaults to an
eastward pointing dipole. If key
'shape' is set to 'dish', the orientation refers
to the pointing center of the dish on the sky. It
can be provided in Alt-Az system as a two-element
vector or in the direction cosine coordinate
system as a two- or three-element vector. If not
set in the case of a dish element, it defaults to
zenith. This is not to be confused with the key
'pointing_center' in dictionary 'pointing_info'
which refers to the beamformed pointing center of
the array. The coordinate system is specified by
the key 'ocoords'
'ocoords' [scalar string] specifies the coordinate system
for key 'orientation'. Accepted values are 'altaz'
and 'dircos'.
'element_locs'
[2- or 3-column array] Element locations that
constitute the tile. Each row specifies
location of one element in the tile. The
locations must be specified in local ENU
coordinate system. First column specifies along
local east, second along local north and the
third along local up. If only two columns are
specified, the third column is assumed to be
zeros. If 'elements_locs' is not provided, it
assumed to be a one-element system and not a
phased array as far as determination of primary
beam is concerned.
'groundplane' [scalar] height of telescope element above the
ground plane (in meteres). Default = None will
denote no ground plane effects.
'ground_modify'
[dictionary] contains specifications to modify
the analytically computed ground plane pattern. If
absent, the ground plane computed will not be
modified. If set, it may contain the following
keys:
'scale' [scalar] positive value to scale the
modifying factor with. If not set, the
scale factor to the modification is unity.
'max' [scalar] positive value to clip the
modified and scaled values to. If not set,
there is no upper limit
'latitude' [scalar] specifies latitude of the telescope site
(in degrees). Default = None, otherwise should
equal the value specified during initialization
of the instance
'longitude' [scalar] specifies longitude of the telescope site
(in degrees). Default = None, otherwise should
equal the value specified during initialization
of the instance
'altitude' [scalar] specifies altitude of the telescope site
(in m). Default = None, otherwise should
equal the value specified during initialization
of the instance
'pol' [string] specifies polarization when using
MWA_Tools for primary beam computation. Value of
key 'id' in attribute dictionary telescope must be
set to 'mwa_tools'. 'X' or 'x' denotes
X-polarization. Y-polarization is specified by 'Y'
or 'y'. If polarization is not specified when 'id'
of telescope is set to 'mwa_tools', it defaults
to X-polarization.
------------------------------------------------------------------------
"""
try:
skymodel, freq, pinfo
except NameError:
raise NameError('skymodel, freq, and pinfo must be specified.')
if self.freq is None:
if freq is None:
raise ValueError('freq must be specified using a numpy array')
elif not isinstance(freq, NP.ndarray):
raise TypeError('freq must be specified using a numpy array')
self.freq = freq.ravel()
if (freq_scale is None) or (freq_scale == 'Hz') or (freq_scale == 'hz'):
self.freq = NP.asarray(freq)
elif freq_scale == 'GHz' or freq_scale == 'ghz':
self.freq = NP.asarray(freq) * 1.0e9
elif freq_scale == 'MHz' or freq_scale == 'mhz':
self.freq = NP.asarray(freq) * 1.0e6
elif freq_scale == 'kHz' or freq_scale == 'khz':
self.freq = NP.asarray(freq) * 1.0e3
else:
raise ValueError('Frequency units must be "GHz", "MHz", "kHz" or "Hz". If not set, it defaults to "Hz"')
self.freq_scale = 'Hz'
if self.telescope is None:
if isinstance(telescope, dict):
self.telescope = telescope
else:
raise TypeError('Input telescope must be a dictionary.')
if skymodel is None:
self.info['pbeam'] += [NP.asarray([])]
self.info['ind'] += [NP.asarray([])]
self.pinfo += [None]
elif not isinstance(skymodel, SM.SkyModel):
raise TypeError('skymodel should be an instance of class SkyModel.')
else:
self.skymodel = skymodel
if self.freq is None:
if freq is None:
raise ValueError('freq must be specified using a numpy array')
elif not isinstance(freq, NP.ndarray):
raise TypeError('freq must be specified using a numpy array')
self.freq = freq.ravel()
if (freq_scale is None) or (freq_scale == 'Hz') or (freq_scale == 'hz'):
self.freq = NP.asarray(freq)
elif freq_scale == 'GHz' or freq_scale == 'ghz':
self.freq = NP.asarray(freq) * 1.0e9
elif freq_scale == 'MHz' or freq_scale == 'mhz':
self.freq = NP.asarray(freq) * 1.0e6
elif freq_scale == 'kHz' or freq_scale == 'khz':
self.freq = NP.asarray(freq) * 1.0e3
else:
raise ValueError('Frequency units must be "GHz", "MHz", "kHz" or "Hz". If not set, it defaults to "Hz"')
self.freq_scale = 'Hz'
if roi_info is None:
raise ValueError('roi_info dictionary must be set.')
pbeam_input = False
if 'ind' in roi_info:
if roi_info['ind'] is not None:
self.info['ind'] += [roi_info['ind']]
if roi_info['ind'].size > 0:
if 'pbeam' in roi_info:
if roi_info['pbeam'] is not None:
try:
pb = roi_info['pbeam'].reshape(-1,self.freq.size)
except ValueError:
raise ValueError('Number of columns of primary beam in key "pbeam" of dictionary roi_info must be equal to number of frequency channels.')
if NP.asarray(roi_info['ind']).size == pb.shape[0]:
self.info['pbeam'] += [roi_info['pbeam'].astype(NP.float32)]
else:
raise ValueError('Number of elements in values in key "ind" and number of rows of values in key "pbeam" must be identical.')
pbeam_input = True
if not pbeam_input: # Will require sky positions in Alt-Az coordinates
if skymodel.coords == 'radec':
skycoords = SkyCoord(ra=skymodel.location[:,0]*units.deg, dec=skymodel.location[:,1]*units.deg, frame='fk5', equinox=Time(skymodel.epoch, format='jyear_str', scale='utc'))
if self.telescope['latitude'] is None:
raise ValueError('Latitude of the observatory must be provided.')
if lst is None:
raise ValueError('LST must be provided.')
if time_jd is None:
raise ValueError('Time in JD must be provided')
skycoords_altaz = skycoords.transform_to(AltAz(obstime=Time(time_jd, format='jd', scale='utc'), location=EarthLocation(lon=self.telescope['longitude']*units.deg, lat=self.telescope['latitude']*units.deg, height=self.telescope['altitude']*units.m)))
skypos_altaz = NP.hstack((skycoords_altaz.alt.deg.reshape(-1,1), skycoords_altaz.az.deg.reshape(-1,1)))
# skypos_altaz = GEOM.hadec2altaz(NP.hstack((NP.asarray(lst-skymodel.location[:,0]).reshape(-1,1), skymodel.location[:,1].reshape(-1,1))), self.telescope['latitude'], units='degrees') # Need to accurately take ephemeris into account
elif skymodel.coords == 'hadec':
if self.telescope['latitude'] is None:
raise ValueError('Latitude of the observatory must be provided.')
skypos_altaz = GEOM.hadec2altaz(skymodel.location, self.telescope['latitude'], units='degrees')
elif skymodel.coords == 'dircos':
skypos_altaz = GEOM.dircos2altaz(skymodel.location, units='degrees')
elif skymodel.coords == 'altaz':
skypos_altaz = skymodel.location
else:
raise KeyError('skycoords invalid or unspecified in skymodel')
if 'radius' in roi_info:
self.info['radius'] += [roi_info['radius']]
if 'center' in roi_info:
self.info['center'] += [roi_info['center']]
else:
if roi_info['radius'] is None:
roi_info['radius'] = 90.0
else:
roi_info['radius'] = max(0.0, min(roi_info['radius'], 90.0))
self.info['radius'] += [roi_info['radius']]
if roi_info['center'] is None:
self.info['center'] += [NP.asarray([90.0, 270.0]).reshape(1,-1)]
else:
roi_info['center'] = NP.asarray(roi_info['center']).reshape(1,-1)
if roi_info['center_coords'] == 'dircos':
self.info['center'] += [GEOM.dircos2altaz(roi_info['center'], units='degrees')]
elif roi_info['center_coords'] == 'altaz':
self.info['center'] += [roi_info['center']]
elif roi_info['center_coords'] == 'hadec':
self.info['center'] += [GEOM.hadec2altaz(roi_info['center'], self.telescope['latitude'], units='degrees')]
elif roi_info['center_coords'] == 'radec':
if lst is None:
raise KeyError('LST not provided for coordinate conversion')
hadec = NP.asarray([lst-roi_info['center'][0,0], roi_info['center'][0,1]]).reshape(1,-1)
self.info['center'] += [GEOM.hadec2altaz(hadec, self.telescope['latitude'], units='degrees')]
elif roi_info['center_coords'] == 'dircos':
self.info['center'] += [GEOM.dircos2altaz(roi_info['center'], units='degrees')]
else:
raise ValueError('Invalid coordinate system specified for center')
if skymodel.coords == 'radec':
if self.telescope['latitude'] is None:
raise ValueError('Latitude of the observatory must be provided.')
if lst is None:
raise ValueError('LST must be provided.')
if time_jd is None:
raise ValueError('Time in JD must be provided')
skycoords = SkyCoord(ra=skymodel.location[:,0]*units.deg, dec=skymodel.location[:,1]*units.deg, frame='fk5', equinox=Time(skymodel.epoch, format='jyear_str', scale='utc'))
skycoords_altaz = skycoords.transform_to(AltAz(obstime=Time(time_jd, format='jd', scale='utc'), location=EarthLocation(lon=self.telescope['longitude']*units.deg, lat=self.telescope['latitude']*units.deg, height=self.telescope['altitude']*units.m)))
skypos_altaz = NP.hstack((skycoords_altaz.alt.deg.reshape(-1,1), skycoords_altaz.az.deg.reshape(-1,1)))
# skypos_altaz = GEOM.hadec2altaz(NP.hstack((NP.asarray(lst-skymodel.location[:,0]).reshape(-1,1), skymodel.location[:,1].reshape(-1,1))), self.telescope['latitude'], units='degrees')
elif skymodel.coords == 'hadec':
if self.telescope['latitude'] is None:
raise ValueError('Latitude of the observatory must be provided.')
skypos_altaz = GEOM.hadec2altaz(skymodel.location, self.telescope['latitude'], units='degrees')
elif skymodel.coords == 'dircos':
skypos_altaz = GEOM.dircos2altaz(skymodel.location, units='degrees')
elif skymodel.coords == 'altaz':
skypos_altaz = skymodel.location
else:
raise KeyError('skycoords invalid or unspecified in skymodel')
dtheta = GEOM.sphdist(self.info['center'][-1][0,1], self.info['center'][-1][0,0], 270.0, 90.0)
if dtheta > 1e-2: # ROI center is not zenith
m1, m2, d12 = GEOM.spherematch(self.info['center'][-1][0,0], self.info['center'][-1][0,1], skypos_altaz[:,0], skypos_altaz[:,1], roi_info['radius'], maxmatches=0)
else:
m2, = NP.where(skypos_altaz[:,0] >= 90.0-roi_info['radius']) # select sources whose altitude (angle above horizon) is 90-radius
self.info['ind'] += [m2]
if self.info['center_coords'] is None:
if 'center_coords' in roi_info:
if (roi_info['center_coords'] == 'altaz') or (roi_info['center_coords'] == 'dircos') or (roi_info['center_coords'] == 'hadec') or (roi_info['center_coords'] == 'radec'):
self.info['center_coords'] = roi_info['center_coords']
if not pbeam_input:
if pinfo is None:
raise ValueError('Pointing info dictionary pinfo must be specified.')
self.pinfo += [pinfo]
if 'pointing_coords' in pinfo: # Convert pointing coordinate to Alt-Az
if (pinfo['pointing_coords'] != 'dircos') and (pinfo['pointing_coords'] != 'altaz'):
if self.telescope['latitude'] is None:
raise ValueError('Latitude of the observatory must be provided.')
if pinfo['pointing_coords'] == 'radec':
if lst is None:
raise ValueError('LST must be provided.')
self.pinfo[-1]['pointing_center'] = NP.asarray([lst-pinfo['pointing_center'][0,0], pinfo['pointing_center'][0,1]]).reshape(1,-1)
self.pinfo[-1]['pointing_center'] = GEOM.hadec2altaz(self.pinfo[-1]['pointing_center'], self.telescope['latitude'], units='degrees')
elif pinfo[-1]['pointing_coords'] == 'hadec':
self.pinfo[-1]['pointing_center'] = GEOM.hadec2altaz(pinfo[-1]['pointing_center'], self.telescope['latitude'], units='degrees')
else:
raise ValueError('pointing_coords in dictionary pinfo must be "dircos", "altaz", "hadec" or "radec".')
self.pinfo[-1]['pointing_coords'] = 'altaz'
if 'pbeam_chromaticity' not in roi_info:
roi_info['pbeam_chromaticity'] = False
if 'pbeam_reffreq' not in roi_info:
roi_info['pbeam_reffreq'] = self.freq[self.freq.size//2]
beam_chromaticity = roi_info['pbeam_chromaticity']
if beam_chromaticity:
freqs_to_compute = self.freq
else:
nearest_freq_ind = NP.argmin(NP.abs(self.freq - roi_info['pbeam_reffreq']))
freqs_to_compute = NP.asarray(roi_info['pbeam_reffreq']).reshape(-1)
ind = self.info['ind'][-1]
if ind.size > 0:
if 'id' in self.telescope:
if self.telescope['id'] == 'mwa_tools':
if not mwa_tools_found:
raise ImportError('MWA_Tools could not be imported which is required for power pattern computation.')
pbeam = NP.empty((ind.size, self.freq.size))
for i in range(freqs_to_compute.size):
pbx_MWA, pby_MWA = MWAPB.MWA_Tile_advanced(NP.radians(90.0-skypos_altaz[ind,0]).reshape(-1,1), NP.radians(skypos_altaz[ind,1]).reshape(-1,1), freq=freqs_to_compute[i], delays=self.pinfo[-1]['delays']/435e-12)
if 'pol' in self.telescope:
if (self.telescope['pol'] == 'X') or (self.telescope['pol'] == 'x'):
pbeam[:,i] = pbx_MWA.ravel()
elif (self.telescope['pol'] == 'Y') or (self.telescope['pol'] == 'y'):
pbeam[:,i] = pby_MWA.ravel()
else:
raise ValueError('Key "pol" in attribute dictionary telescope is invalid.')
else:
self.telescope['pol'] = 'X'
pbeam[:,i] = pbx_MWA.ravel()
else:
pbeam = PB.primary_beam_generator(skypos_altaz[ind,:], freqs_to_compute, self.telescope, freq_scale=self.freq_scale, skyunits='altaz', pointing_info=self.pinfo[-1])
else:
pbeam = PB.primary_beam_generator(skypos_altaz[ind,:], freqs_to_compute, self.telescope, freq_scale=self.freq_scale, skyunits='altaz', pointing_info=self.pinfo[-1])
self.info['pbeam'] += [pbeam.astype(NP.float64) * NP.ones(self.freq.size).reshape(1,-1)]
else:
self.info['pbeam'] += [NP.asarray([])]
#############################################################################
def save(self, infile, tabtype='BinTableHDU', overwrite=False, verbose=True):
"""
------------------------------------------------------------------------
Saves the information about the regions of interest to a FITS file on
disk
Inputs:
infile [string] Filename with full path to be saved to. Will be
appended with '.fits' extension
Keyword Input(s):
tabtype [string] indicates table type for one of the extensions in
the FITS file. Allowed values are 'BinTableHDU' and
'TableHDU' for binary ascii tables respectively. Default is
'BinTableHDU'.
overwrite [boolean] True indicates overwrite even if a file already
exists. Default = False (does not overwrite)
verbose [boolean] If True (default), prints diagnostic and progress
messages. If False, suppress printing such messages.
----------------------------------------------------------------------------
"""
try:
infile
except NameError:
raise NameError('No filename provided. Aborting ROI_parameters.save()...')
filename = infile + '.fits'
if verbose:
print('\nSaving information about regions of interest...')
hdulist = []
hdulist += [fits.PrimaryHDU()]
hdulist[0].header['EXTNAME'] = 'PRIMARY'
hdulist[0].header['n_obs'] = (len(self.info['ind']), 'Number of observations')
if 'id' in self.telescope:
hdulist[0].header['telescope'] = (self.telescope['id'], 'Telescope Name')
hdulist[0].header['element_shape'] = (self.telescope['shape'], 'Antenna element shape')
hdulist[0].header['element_size'] = (self.telescope['size'], 'Antenna element size [m]')
hdulist[0].header['element_ocoords'] = (self.telescope['ocoords'], 'Antenna element orientation coordinates')
if self.telescope['latitude'] is not None:
hdulist[0].header['latitude'] = (self.telescope['latitude'], 'Latitude (in degrees)')
hdulist[0].header['longitude'] = (self.telescope['longitude'], 'Longitude (in degrees)')
if self.telescope['altitude'] is not None:
hdulist[0].header['altitude'] = (self.telescope['altitude'], 'Altitude (in m)')
if self.telescope['groundplane'] is not None:
hdulist[0].header['ground_plane'] = (self.telescope['groundplane'], 'Antenna element height above ground plane [m]')
if 'ground_modify' in self.telescope:
if 'scale' in self.telescope['ground_modify']:
hdulist[0].header['ground_modify_scale'] = (self.telescope['ground_modify']['scale'], 'Ground plane modification scale factor')
if 'max' in self.telescope['ground_modify']:
hdulist[0].header['ground_modify_max'] = (self.telescope['ground_modify']['max'], 'Maximum ground plane modification')
hdulist += [fits.ImageHDU(self.telescope['orientation'], name='Antenna element orientation')]
if verbose:
print('\tCreated an extension for antenna element orientation.')
if 'element_locs' in self.telescope:
hdulist += [fits.ImageHDU(self.telescope['element_locs'], name='Antenna element locations')]
hdulist += [fits.ImageHDU(self.freq, name='FREQ')]
if verbose:
print('\t\tCreated an extension HDU of {0:0d} frequency channels'.format(self.freq.size))
for i in range(len(self.info['ind'])):
if self.info['ind'][i].size > 0:
hdulist += [fits.ImageHDU(self.info['ind'][i], name='IND_{0:0d}'.format(i))]
hdulist += [fits.ImageHDU(self.info['pbeam'][i], name='PB_{0:0d}'.format(i))]
if self.pinfo: # if self.pinfo is not empty
if self.pinfo[i] is not None: # if the specific i-th entry in self.pinfo is not empty
if 'delays' in self.pinfo[i]:
hdulist += [fits.ImageHDU(self.pinfo[i]['delays'], name='DELAYS_{0:0d}'.format(i))]
if 'delayerr' in self.pinfo[i]:
if self.pinfo[i]['delayerr'] is not None:
hdulist[-1].header['delayerr'] = (self.pinfo[i]['delayerr'], 'Jitter in delays [s]')
else:
hdulist[-1].header['delayerr'] = (0.0, 'Jitter in delays [s]')
if 'pointing_center' in self.pinfo[i]:
hdulist += [fits.ImageHDU(self.pinfo[i]['pointing_center'], name='POINTING_CENTER_{0:0d}'.format(i))]
if 'pointing_coords' in self.pinfo[i]:
hdulist[-1].header['pointing_coords'] = (self.pinfo[i]['pointing_coords'], 'Pointing coordinate system')
else:
raise KeyError('Key "pointing_coords" not found in attribute pinfo.')
if verbose:
print('\t\tCreated HDU extensions for {0:0d} observations containing ROI indices and primary beams'.format(len(self.info['ind'])))
if verbose:
print('\tNow writing FITS file to disk...')
hdu = fits.HDUList(hdulist)
hdu.writeto(filename, overwrite=overwrite)
if verbose:
print('\tRegions of interest information written successfully to FITS file on disk:\n\t\t{0}\n'.format(filename))
#################################################################################
class InterferometerArray(object):
"""
----------------------------------------------------------------------------
Class to manage information on a multi-element interferometer array.
Attributes:
astroutils_githash
[string] Git# of the AstroUtils version used to create/save
the instance of class InterferometerArray
prisim_githash
[string] Git# of the PRISim version used to create/save
the instance of class InterferometerArray
A_eff [scalar, list or numpy vector] Effective area of the
interferometers (in m^2). If a scalar is provided, it is assumed
to be identical for all interferometers. Otherwise, one value
must be specified for each interferometer. Default is
pi * (25/2)^2, appropriate for a 25 m VLA dish.
baselines: [M x 3 Numpy array] The baseline vectors associated with the
M interferometers in SI units. The coordinate system of these
vectors is specified by another attribute baseline_coords.
baseline_coords
[string] Coordinate system for the baseline vectors. Default is
'localenu'. Other accepted values are 'equatorial'
baseline_lengths
[M-element numpy array] Lengths of the baseline in SI units
projected_baselines
[M x 3 x n_snaps Numpy array] The projected baseline vectors
associated with the M interferometers and number of snapshots in
SI units. The coordinate system of these vectors is specified by
either pointing_center, phase_center or as specified in input to
member function project_baselines().
bp [numpy array] Bandpass weights of size n_baselines x nchan x
n_acc, where n_acc is the number of accumulations in the
observation, nchan is the number of frequency channels, and
n_baselines is the number of baselines
bp_wts [numpy array] Additional weighting to be applied to the bandpass
shapes during the application of the member function
delay_transform(). Same size as attribute bp.
channels [list or numpy vector] frequency channels in Hz
eff_Q [scalar, list or numpy vector] Efficiency of the interferometers,
one value for each interferometer. Default = 0.89, appropriate
for the VLA. Has to be between 0 and 1. If only a scalar value
provided, it will be assumed to be identical for all the
interferometers. Otherwise, one value must be provided for each
of the interferometers.
freq_resolution
[scalar] Frequency resolution (in Hz)
labels [list of 2-element tuples] A unique identifier (tuple of
strings) for each of the interferometers.
lags [numpy vector] Time axis obtained when the frequency axis is
inverted using a FFT. Same size as channels. This is
computed in member function delay_transform().
lag_kernel [numpy array] Inverse Fourier Transform of the frequency
bandpass shape. In other words, it is the impulse response
corresponding to frequency bandpass. Same size as attributes
bp and bp_wts. It is initialized in __init__() member function
but effectively computed in member function delay_transform()
latitude [Scalar] Latitude of the interferometer's location. Default
is 34.0790 degrees North corresponding to that of the VLA.
altitude [Scalar] Altitude of the interferometer's location. Default
is 0 m.
lst [list] List of LST (in degrees) for each timestamp
n_acc [scalar] Number of accumulations
groups [dictionary] Contains the grouping of unique baselines and the
redundant baselines as numpy recarray under each unique baseline
category/flavor. It contains as keys the labels (tuple of A1, A2)
of unique baselines and the value under each of these keys is a
list of baseline labels that are redundant under that category
bl_reversemap
[dictionary] Contains the baseline category for each baseline.
The keys are baseline labels as tuple and the value under each
key is the label of the unique baseline category that it falls
under.
gaininfo [None or instance of class GainInfo] Instance of class
Gaininfo. If set to None, default gains assumed to be unity.
gradient_mode
[string] If set to None, visibilities will be simulated as
usual. If set to string, both visibilities and visibility
gradients with respect to the quantity specified in the
string will be simulated. Currently accepted value is
'baseline'. Plan to incorporate gradients with respect to
'skypos' and 'frequency' as well in the future.
gradient [dictionary] If gradient_mode is set to None, it is an empty
dictionary. If gradient_mode is not None, this quantity holds
the gradient under the key specified by gradient_mode.
Currently, supports 'baseline' key. Other gradients will be
supported in future. It contains the following keys and values.
If gradient_mode == 'baseline':
'baseline' [numpy array] Visibility gradients with respect to
baseline vector. Complex numpy array of shape
3 x nbl x nchan x nts
obs_catalog_indices
[list of lists] Each element in the top list corresponds to a
timestamp. Inside each top list is a list of indices of sources
from the catalog which are observed inside the region of
interest. This is computed inside member function observe().
pointing_center
[2-column numpy array] Pointing center (latitude and
longitude) of the observation at a given timestamp. This is
where the telescopes will be phased up to as reference.
Coordinate system for the pointing_center is specified by another
attribute pointing_coords.
phase_center
[2-column numpy array] Phase center (latitude and
longitude) of the observation at a given timestamp. This is
where the telescopes will be phased up to as reference.
Coordinate system for the phase_center is specified by another
attribute phase_center_coords.
pointing_coords
[string] Coordinate system for telescope pointing. Accepted
values are 'radec' (RA-Dec), 'hadec' (HA-Dec) or 'altaz'
(Altitude-Azimuth). Default = 'hadec'.
phase_center_coords
[string] Coordinate system for array phase center. Accepted
values are 'radec' (RA-Dec), 'hadec' (HA-Dec) or 'altaz'
(Altitude-Azimuth). Default = 'hadec'.
skycoords [string] Coordinate system for the sky positions of sources.
Accepted values are 'radec' (RA-Dec), 'hadec' (HA-Dec) or
'altaz' (Altitude-Azimuth). Default = 'radec'.
skyvis_freq [numpy array] Complex visibility due to sky emission (in Jy or K)
along frequency axis for each interferometer estimated from the
specified external catalog. Same size as vis_freq. Used in the
member function observe(). Read its docstring for more details.
Has dimensions n_baselines x nchan x n_snaps.
skyvis_lag [numpy array] Complex visibility due to sky emission (in Jy Hz or
K Hz) along the delay axis for each interferometer obtained by
FFT of skyvis_freq along frequency axis. Same size as vis_freq.
Created in the member function delay_transform(). Read its
docstring for more details. Same dimensions as skyvis_freq
telescope [dictionary] dictionary that specifies the type of element,
element size and orientation. It consists of the following keys
and values:
'id' [string] If set, will ignore the other keys and use
telescope details for known telescopes. Accepted
values are 'mwa', 'vla', 'gmrt', 'ugmrt', 'hera',
'paper', 'hirax', 'chime'and other custom values.
Default = 'mwa'
'shape' [string] Shape of antenna element. Accepted values
are 'dipole', 'delta', and 'dish'. Will be ignored
if key 'id' is set. 'delta' denotes a delta
function for the antenna element which has an
isotropic radiation pattern. 'dish' is the default
when keys 'id' and 'shape' are not set.
'size' [scalar] Diameter of the telescope dish (in meters)
if the key 'shape' is set to 'dish' or length of
the dipole if key 'shape' is set to 'dipole'. Will
be ignored if key 'shape' is set to 'delta'. Will
be ignored if key 'id' is set and a preset value
used for the diameter or dipole. Default = 25.0.
'orientation' [list or numpy array] If key 'shape' is set to
dipole, it refers to the orientation of the dipole
element unit vector whose magnitude is specified by
length. If key 'shape' is set to 'dish', it refers
to the position on the sky to which the dish is
pointed. For a dipole, this unit vector must be
provided in the local ENU coordinate system aligned
with the direction cosines coordinate system or in
the Alt-Az coordinate system.
This could be a 2-element vector (transverse
direction cosines) where the third (line-of-sight)
component is determined, or a 3-element vector
specifying all three direction cosines or a two-
element coordinate in Alt-Az system. If not provided
it defaults to an eastward pointing dipole. If key
'shape' is set to 'dish', the orientation refers
to the pointing center of the dish on the sky. It
can be provided in Alt-Az system as a two-element
vector or in the direction cosine coordinate
system as a two- or three-element vector. If not
set in the case of a dish element, it defaults to
zenith. The coordinate system is specified by
the key 'ocoords'
'ocoords' [scalar string] specifies the coordinate system
for key 'orientation'. Accepted values are 'altaz'
and 'dircos'.
'groundplane' [scalar] height of telescope element above the
ground plane (in meteres). Default = None will
denote no ground plane effects.
'ground_modify'
[dictionary] contains specifications to modify
the analytically computed ground plane pattern. If
absent, the ground plane computed will not be
modified. If set, it may contain the following
keys:
'scale' [scalar] positive value to scale the
modifying factor with. If not set, the
scale factor to the modification is unity.
'max' [scalar] positive value to clip the
modified and scaled values to. If not set,
there is no upper limit
layout [dictionary] contains array layout information (on the full
array even if only a subset of antennas or baselines are used
in the simulation). It contains the following keys and
information:
'positions' [numpy array] Antenna positions (in m) as a
nant x 3 array in coordinates specified by key
'coords'
'coords' [string] Coordinate system in which antenna
positions are specified. Currently accepts 'ENU'
for local ENU system
'labels' [list or numpy array of strings] Unique string
identifiers for antennas. Must be of same length
as nant.
'ids' [list or numpy array of integers] Unique integer
identifiers for antennas. Must be of same length
as nant.
timestamp [list] List of timestamps during the observation (Julian date)
t_acc [list] Accumulation time (sec) corresponding to each timestamp
t_obs [scalar] Total observing duration (sec)
Tsys [scalar, list or numpy vector] System temperature in Kelvin. At
end of the simulation, it will be a numpy array of size
n_baselines x nchan x n_snaps.
Tsysinfo [list of dictionaries] Contains a list of system temperature
information for each timestamp of observation. Each dictionary
element in the list following keys and values:
'Trx' [scalar] Recevier temperature (in K) that is
applicable to all frequencies and baselines
'Tant' [dictionary] contains antenna temperature info
from which the antenna temperature is estimated.
Used only if the key 'Tnet' is absent or set to
None. It has the following keys and values:
'f0' [scalar] Reference frequency (in Hz)
from which antenna temperature will
be estimated (see formula below)
'T0' [scalar] Antenna temperature (in K) at
the reference frequency specified in
key 'f0'. See formula below.
'spindex' [scalar] Antenna temperature spectral
index. See formula below.
Tsys = Trx + Tant['T0'] * (f/Tant['f0'])**spindex
'Tnet' [numpy array] Pre-computed Tsys (in K)
information that will be used directly to set the
Tsys. If specified, the information under keys
'Trx' and 'Tant' will be ignored. If a scalar
value is provided, it will be assumed to be
identical for all interferometers and all
frequencies. If a vector is provided whose length
is equal to the number of interferoemters, it
will be assumed identical for all frequencies. If
a vector is provided whose length is equal to the
number of frequency channels, it will be assumed
identical for all interferometers. If a 2D array
is provided, it should be of size
n_baselines x nchan. Tsys = Tnet
vis_freq [numpy array] The simulated complex visibility (in Jy or K)
observed by each of the interferometers along frequency axis for
each timestamp of observation per frequency channel. It is the
sum of skyvis_freq and vis_noise_freq. It can be either directly
initialized or simulated in observe(). Same dimensions as
skyvis_freq.
vis_lag [numpy array] The simulated complex visibility (in Jy Hz or K Hz)
along delay axis for each interferometer obtained by FFT of
vis_freq along frequency axis. Same size as vis_noise_lag and
skyis_lag. It is evaluated in member function delay_transform().
vis_noise_freq
[numpy array] Complex visibility noise (in Jy or K) generated
using an rms of vis_rms_freq along frequency axis for each
interferometer which is then added to the generated sky
visibility. Same dimensions as skyvis_freq. Used in the member
function observe(). Read its docstring for more details.
vis_noise_lag
[numpy array] Complex visibility noise (in Jy Hz or K Hz) along
delay axis for each interferometer generated using an FFT of
vis_noise_freq along frequency axis. Same size as vis_noise_freq.
Created in the member function delay_transform(). Read its
docstring for more details.
vis_rms_freq
[list of float] Theoretically estimated thermal noise rms (in Jy
or K) in visibility measurements. Same size as vis_freq. This
will be estimated and used to inject simulated noise when a call
to member function observe() is made. Read the docstring of
observe() for more details. The noise rms is estimated from the
instrument parameters as:
(2 k T_sys / (A_eff x sqrt(2 x channel_width x t_acc))) / Jy, or
T_sys / sqrt(2 x channel_width x t_acc)
simparms_file
[string] Full path to filename containing simulation parameters
in YAML format
Member functions:
__init__() Initializes an instance of class InterferometerArray
observe() Simulates an observing run with the interferometer
specifications and an external sky catalog thus producing
visibilities. The simulation generates visibilities
observed by the interferometer for the specified
parameters.
observing_run() Simulate an extended observing run in 'track' or 'drift'
mode, by an instance of the InterferometerArray class, of
the sky when a sky catalog is provided. The simulation
generates visibilities observed by the interferometer
array for the specified parameters. Uses member function
observe() and builds the observation from snapshots. The
timestamp for each snapshot is the current time at which
the snapshot is generated.
generate_noise() Generates thermal noise from attributes that describe
system parameters which can be added to sky visibilities
add_noise() Adds the thermal noise generated in member function
generate_noise() to the sky visibilities after
extracting and applying complex instrument gains
apply_gradients() Apply the perturbations in combination with the
gradients to determine perturbed visibilities
duplicate_measurements()
Duplicate visibilities based on redundant baselines
specified. This saves time when compared to simulating
visibilities over redundant baselines. Thus, it is more
efficient to simulate unique baselines and duplicate
measurements for redundant baselines
getBaselineGroupKeys()
Find redundant baseline group keys of groups that
contain the input baseline labels
getBaselinesInGroups()
Find all redundant baseline labels in groups that
contain the given input baseline labels
getThreePointCombinations()
Return all or class Inonly unique 3-point combinations of
baselines
getClosurePhase() Get closure phases of visibilities from triplets of
antennas
rotate_visibilities()
Centers the phase of visibilities around any given phase
center. Project baseline vectors with respect to a
reference point on the sky. Essentially a wrapper to
member functions phase_centering() and
project_baselines()
phase_centering() Centers the phase of visibilities around any given phase
center.
project_baselines() Project baseline vectors with respect to a reference
point on the sky. Assigns the projected baselines to the
attribute projected_baselines
conjugate() Flips the baseline vectors and conjugates the visibilies
for a specified subset of baselines.
delay_transform() Transforms the visibilities from frequency axis onto
delay (time) axis using an IFFT. This is performed for
noiseless sky visibilities, thermal noise in visibilities,
and observed visibilities.
concatenate() Concatenates different visibility data sets from instances
of class InterferometerArray along baseline, frequency or
time axis.
save() Saves the interferometer array information to disk in
HDF5, FITS, NPZ and UVFITS formats
pyuvdata_write() Saves the interferometer array information to disk in
various formats through pyuvdata module
----------------------------------------------------------------------------
"""
def __init__(self, labels, baselines, channels, telescope=None, eff_Q=0.89,
latitude=34.0790, longitude=0.0, altitude=0.0,
skycoords='radec', A_eff=NP.pi*(25.0/2)**2,
pointing_coords='hadec', layout=None, blgroupinfo=None,
baseline_coords='localenu', freq_scale=None, gaininfo=None,
init_file=None, simparms_file=None):
"""
------------------------------------------------------------------------
Intialize the InterferometerArray class which manages information on a
multi-element interferometer.
Class attributes initialized are:
astroutils_githash, prisim_githash, labels, baselines, channels,
telescope, latitude, longitude, altitude, skycoords, eff_Q, A_eff,
pointing_coords, baseline_coords, baseline_lengths, channels, bp,
bp_wts, freq_resolution, lags, lst, obs_catalog_indices,
pointing_center, skyvis_freq, skyvis_lag, timestamp, t_acc, Tsys,
Tsysinfo, vis_freq, vis_lag, t_obs, n_acc, vis_noise_freq,
vis_noise_lag, vis_rms_freq, geometric_delays, projected_baselines,
simparms_file, layout, gradient, gradient_mode, gaininfo, blgroups,
bl_reversemap
Read docstring of class InterferometerArray for details on these
attributes.
Keyword input(s):
init_file [string] Location of the initialization file from which an
instance of class InterferometerArray will be created.
File format must be compatible with the one saved to disk
by member function save().
simparms_file
[string] Location of the simulation parameters in YAML
format that went into making the simulated data product
Other input parameters have their usual meanings. Read the docstring of
class InterferometerArray for details on these inputs.
------------------------------------------------------------------------
"""
argument_init = False
init_file_success = False
if init_file is not None:
try:
with h5py.File(init_file+'.hdf5', 'r') as fileobj:
self.astroutils_githash = None
self.prisim_githash = None
self.simparms_file = None
self.latitude = 0.0
self.longitude = 0.0
self.altitude = 0.0
self.skycoords = 'radec'
self.flux_unit = 'JY'
self.telescope = {}
self.telescope['shape'] = 'delta'
self.telescope['size'] = 1.0
self.telescope['groundplane'] = None
self.Tsysinfo = []
self.layout = {}
self.blgroups = None
self.bl_reversemap = None
self.lags = None
self.vis_lag = None
self.skyvis_lag = None
self.vis_noise_lag = None
self.gradient_mode = None
self.gradient = {}
self.gaininfo = None
for key in ['header', 'telescope_parms', 'spectral_info', 'simparms', 'antenna_element', 'timing', 'skyparms', 'array', 'layout', 'instrument', 'visibilities', 'gradients', 'gaininfo', 'blgroupinfo']:
try:
grp = fileobj[key]
except KeyError:
if key in ['gradients', 'gaininfo']:
pass
elif key not in ['simparms', 'blgroupinfo']:
raise KeyError('Key {0} not found in init_file'.format(key))
if key == 'header':
self.flux_unit = grp['flux_unit'].value
if 'AstroUtils#' in grp:
self.astroutils_githash = grp['AstroUtils#'].value
else:
self.astroutils_githash = astroutils.__githash__
if 'PRISim#' in grp:
self.prisim_githash = grp['PRISim#'].value
else:
self.prisim_githash = prisim.__githash__
if key == 'telescope_parms':
if 'latitude' in grp:
self.latitude = grp['latitude'].value
if 'longitude' in grp:
self.longitude = grp['longitude'].value
if 'altitude' in grp:
self.altitude = grp['altitude'].value
if 'id' in grp:
self.telescope['id'] = grp['id'].value
if key == 'layout':
if 'positions' in grp:
self.layout['positions'] = grp['positions'].value
else:
raise KeyError('Antenna layout positions is missing')
try:
self.layout['coords'] = grp['positions'].attrs['coords']
except KeyError:
raise KeyError('Antenna layout position coordinate system is missing')
if 'labels' in grp:
self.layout['labels'] = grp['labels'].value
else:
raise KeyError('Layout antenna labels is missing')
if 'ids' in grp:
self.layout['ids'] = grp['ids'].value
else:
raise KeyError('Layout antenna ids is missing')
if key == 'antenna_element':
if 'shape' in grp:
self.telescope['shape'] = grp['shape'].value
if 'size' in grp:
self.telescope['size'] = grp['size'].value
if 'ocoords' in grp:
self.telescope['ocoords'] = grp['ocoords'].value
else:
raise KeyError('Keyword "ocoords" not found in init_file')
if 'orientation' in grp:
self.telescope['orientation'] = grp['orientation'].value.reshape(1,-1)
else:
raise KeyError('Key "orientation" not found in init_file')
if 'groundplane' in grp:
self.telescope['groundplane'] = grp['groundplane'].value
if key == 'simparms':
if 'simfile' in grp:
self.simparms_file = grp['simfile'].value
if key == 'spectral_info':
self.freq_resolution = grp['freq_resolution'].value
self.channels = grp['freqs'].value
if 'lags' in grp:
self.lags = grp['lags'].value
if 'bp' in grp:
self.bp = grp['bp'].value
else:
raise KeyError('Key "bp" not found in init_file')
if 'bp_wts' in grp:
self.bp_wts = grp['bp_wts'].value
else:
self.bp_wts = NP.ones_like(self.bp)
self.bp_wts = grp['bp_wts'].value
if key == 'skyparms':
if 'pointing_coords' in grp:
self.pointing_coords = grp['pointing_coords'].value
if 'phase_center_coords' in grp:
self.phase_center_coords = grp['phase_center_coords'].value
if 'skycoords' in grp:
self.skycoords = grp['skycoords'].value
self.lst = grp['LST'].value
self.pointing_center = grp['pointing_center'].value
self.phase_center = grp['phase_center'].value
if key == 'timing':
if 'timestamps' in grp:
self.timestamp = grp['timestamps'].value.tolist()
else:
raise KeyError('Key "timestamps" not found in init_file')
if 't_acc' in grp:
self.t_acc = grp['t_acc'].value.tolist()
self.t_obs = grp['t_obs'].value
self.n_acc = grp['n_acc'].value
else:
raise KeyError('Key "t_acc" not found in init_file')
if key == 'instrument':
if ('Trx' in grp) or ('Tant' in grp) or ('spindex' in grp) or ('Tnet' in grp):
for ti in range(grp['Trx'].value.size):
tsysinfo = {}
tsysinfo['Trx'] = grp['Trx'].value[ti]
tsysinfo['Tant'] = {'T0': grp['Tant0'].value[ti], 'f0': grp['f0'].value[ti], 'spindex': grp['spindex'].value[ti]}
tsysinfo['Tnet'] = None
if 'Tnet' in grp:
if grp['Tnet'].value[ti] > 0:
tsysinfo['Tnet'] = grp['Tnet'].value[ti]
self.Tsysinfo += [tsysinfo]
if 'Tsys' in grp:
self.Tsys = grp['Tsys'].value
else:
raise KeyError('Key "Tsys" not found in init_file')
if 'effective_area' in grp:
self.A_eff = grp['effective_area'].value
else:
raise KeyError('Key "effective_area" not found in init_file')
if 'efficiency' in grp:
self.eff_Q = grp['efficiency'].value
else:
raise KeyError('Key "effeciency" not found in init_file')
if key == 'array':
if 'labels' in grp:
self.labels = grp['labels'].value
else:
self.labels = ['B{0:0d}'.format(i+1) for i in range(self.baseline_lengths.size)]
if 'baselines' in grp:
self.baselines = grp['baselines'].value
self.baseline_lengths = NP.sqrt(NP.sum(self.baselines**2, axis=1))
else:
raise KeyError('Key "baselines" not found in init_file')
if 'baseline_coords' in grp:
self.baseline_coords = grp['baseline_coords'].value
else:
self.baseline_coords = 'localenu'
if 'projected_baselines' in grp:
self.projected_baselines = grp['projected_baselines'].value
if key == 'visibilities':
if 'freq_spectrum' in grp:
subgrp = grp['freq_spectrum']
if 'rms' in subgrp:
self.vis_rms_freq = subgrp['rms'].value
else:
self.vis_rms_freq = None
# raise KeyError('Key "rms" not found in init_file')
if 'vis' in subgrp:
self.vis_freq = subgrp['vis'].value
else:
self.vis_freq = None
if 'skyvis' in subgrp:
self.skyvis_freq = subgrp['skyvis'].value
else:
raise KeyError('Key "skyvis" not found in init_file')
if 'noise' in subgrp:
self.vis_noise_freq = subgrp['noise'].value
else:
self.vis_noise_freq = None
else:
raise KeyError('Key "freq_spectrum" not found in init_file')
if 'delay_spectrum' in grp:
subgrp = grp['delay_spectrum']
if 'vis' in subgrp:
self.vis_lag = subgrp['vis'].value
if 'skyvis' in subgrp:
self.skyvis_lag = subgrp['skyvis'].value
if 'noise' in subgrp:
self.vis_noise_lag = subgrp['noise'].value
if key == 'gradients':
if key in fileobj:
for gradkey in grp:
self.gradient_mode = gradkey
self.gradient[gradkey] = grp[gradkey].value
if key == 'gaininfo':
if key in fileobj:
self.gaininfo = GainInfo(init_file=grp['gainsfile'].value)
if key == 'blgroupinfo':
if key in fileobj:
self.blgroups = {}
self.bl_reversemap = {}
for blkey in grp['groups']:
self.blgroups[ast.literal_eval(blkey)] = grp['groups'][blkey].value
for blkey in grp['reversemap']:
self.bl_reversemap[ast.literal_eval(blkey)] = grp['reversemap'][blkey].value
except IOError: # Check if a FITS file is available
try:
hdulist = fits.open(init_file+'.fits')
except IOError:
argument_init = True
warnings.warn('\tinit_file provided but could not open the initialization file. Attempting to initialize with input parameters...')
extnames = [hdulist[i].header['EXTNAME'] for i in xrange(1,len(hdulist))]
self.simparms_file = None
if 'simparms' in hdulist[0].header:
if isinstance(hdulist[0].header['simparms'], str):
self.simparms_file = hdulist[0].header['simparms']
else:
warnings.warn('\tInvalid specification found in header for simulation parameters file. Proceeding with None as default.')
try:
self.gradient_mode = hdulist[0].header['gradient_mode']
except KeyError:
self.gradient_mode = None
self.gradient = {}
try:
self.freq_resolution = hdulist[0].header['freq_resolution']
except KeyError:
hdulist.close()
raise KeyError('Keyword "freq_resolution" not found in header.')
try:
self.latitude = hdulist[0].header['latitude']
except KeyError:
warnings.warn('\tKeyword "latitude" not found in header. Assuming 34.0790 degrees for attribute latitude.')
self.latitude = 34.0790
try:
self.longitude = hdulist[0].header['longitude']
except KeyError:
warnings.warn('\tKeyword "longitude" not found in header. Assuming 0.0 degrees for attribute longitude.')
self.longitude = 0.0
try:
self.altitude = hdulist[0].header['altitude']
except KeyError:
warnings.warn('\tKeyword "altitude" not found in header. Assuming 0m for attribute altitude.')
self.altitude = 0.0
self.telescope = {}
if 'telescope' in hdulist[0].header:
self.telescope['id'] = hdulist[0].header['telescope']
try:
self.telescope['shape'] = hdulist[0].header['element_shape']
except KeyError:
warnings.warn('\tKeyword "element_shape" not found in header. Assuming "delta" for attribute antenna element shape.')
self.telescope['shape'] = 'delta'
try:
self.telescope['size'] = hdulist[0].header['element_size']
except KeyError:
warnings.warn('\tKeyword "element_size" not found in header. Assuming 25.0m for attribute antenna element size.')
self.telescope['size'] = 1.0
try:
self.telescope['ocoords'] = hdulist[0].header['element_ocoords']
except KeyError:
raise KeyError('\tKeyword "element_ocoords" not found in header. No defaults.')
try:
self.telescope['groundplane'] = hdulist[0].header['groundplane']
except KeyError:
self.telescope['groundplane'] = None
if 'ANTENNA ELEMENT ORIENTATION' not in extnames:
raise KeyError('No extension found containing information on element orientation.')
else:
self.telescope['orientation'] = hdulist['ANTENNA ELEMENT ORIENTATION'].data.reshape(1,-1)
try:
self.baseline_coords = hdulist[0].header['baseline_coords']
except KeyError:
warnings.warn('\tKeyword "baseline_coords" not found in header. Assuming "localenu" for attribute baseline_coords.')
self.baseline_coords = 'localenu'
try:
self.pointing_coords = hdulist[0].header['pointing_coords']
except KeyError:
warnings.warn('\tKeyword "pointing_coords" not found in header. Assuming "hadec" for attribute pointing_coords.')
self.pointing_coords = 'hadec'
try:
self.phase_center_coords = hdulist[0].header['phase_center_coords']
except KeyError:
warnings.warn('\tKeyword "phase_center_coords" not found in header. Assuming "hadec" for attribute phase_center_coords.')
self.phase_center_coords = 'hadec'
try:
self.skycoords = hdulist[0].header['skycoords']
except KeyError:
warnings.warn('\tKeyword "skycoords" not found in header. Assuming "radec" for attribute skycoords.')
self.skycoords = 'radec'
try:
self.flux_unit = hdulist[0].header['flux_unit']
except KeyError:
warnings.warn('\tKeyword "flux_unit" not found in header. Assuming "jy" for attribute flux_unit.')
self.flux_unit = 'JY'
if 'POINTING AND PHASE CENTER INFO' not in extnames:
raise KeyError('No extension table found containing pointing information.')
else:
self.lst = hdulist['POINTING AND PHASE CENTER INFO'].data['LST'].tolist()
self.pointing_center = NP.hstack((hdulist['POINTING AND PHASE CENTER INFO'].data['pointing_longitude'].reshape(-1,1), hdulist['POINTING AND PHASE CENTER INFO'].data['pointing_latitude'].reshape(-1,1)))
self.phase_center = NP.hstack((hdulist['POINTING AND PHASE CENTER INFO'].data['phase_center_longitude'].reshape(-1,1), hdulist['POINTING AND PHASE CENTER INFO'].data['phase_center_latitude'].reshape(-1,1)))
if 'TIMESTAMPS' in extnames:
self.timestamp = hdulist['TIMESTAMPS'].data['timestamps'].tolist()
else:
raise KeyError('Extension named "TIMESTAMPS" not found in init_file.')
self.Tsysinfo = []
if 'TSYSINFO' in extnames:
self.Tsysinfo = [{'Trx': elem['Trx'], 'Tant': {'T0': elem['Tant0'], 'f0': elem['f0'], 'spindex': elem['spindex']}, 'Tnet': None} for elem in hdulist['TSYSINFO'].data]
if 'TSYS' in extnames:
self.Tsys = hdulist['Tsys'].data
else:
raise KeyError('Extension named "Tsys" not found in init_file.')
if 'BASELINES' in extnames:
self.baselines = hdulist['BASELINES'].data.reshape(-1,3)
self.baseline_lengths = NP.sqrt(NP.sum(self.baselines**2, axis=1))
else:
raise KeyError('Extension named "BASELINES" not found in init_file.')
if 'PROJ_BASELINES' in extnames:
self.projected_baselines = hdulist['PROJ_BASELINES'].data
if 'LABELS' in extnames:
# self.labels = hdulist['LABELS'].data.tolist()
a1 = hdulist['LABELS'].data['A1']
a2 = hdulist['LABELS'].data['A2']
self.labels = zip(a2,a1)
else:
self.labels = ['B{0:0d}'.format(i+1) for i in range(self.baseline_lengths.size)]
self.layout = {}
if 'LAYOUT' in extnames:
for key in ['positions', 'ids', 'labels']:
self.layout[key] = hdulist['LAYOUT'].data[key]
self.layout['coords'] = hdulist['LAYOUT'].header['COORDS']
if 'EFFECTIVE AREA' in extnames:
self.A_eff = hdulist['EFFECTIVE AREA'].data
else:
raise KeyError('Extension named "EFFECTIVE AREA" not found in init_file.')
if 'INTERFEROMETER EFFICIENCY' in extnames:
self.eff_Q = hdulist['INTERFEROMETER EFFICIENCY'].data
else:
raise KeyError('Extension named "INTERFEROMETER EFFICIENCY" not found in init_file.')
if 'SPECTRAL INFO' not in extnames:
raise KeyError('No extension table found containing spectral information.')
else:
self.channels = hdulist['SPECTRAL INFO'].data['frequency']
try:
self.lags = hdulist['SPECTRAL INFO'].data['lag']
except KeyError:
self.lags = None
if 'BANDPASS' in extnames:
self.bp = hdulist['BANDPASS'].data
else:
raise KeyError('Extension named "BANDPASS" not found in init_file.')
if 'BANDPASS_WEIGHTS' in extnames:
self.bp_wts = hdulist['BANDPASS_WEIGHTS'].data
else:
self.bp_wts = NP.ones_like(self.bp)
if 'T_ACC' in extnames:
self.t_acc = hdulist['t_acc'].data.tolist()
self.n_acc = len(self.t_acc)
self.t_obs = sum(self.t_acc)
else:
raise KeyError('Extension named "T_ACC" not found in init_file.')
if 'FREQ_CHANNEL_NOISE_RMS_VISIBILITY' in extnames:
self.vis_rms_freq = hdulist['freq_channel_noise_rms_visibility'].data
else:
self.vis_rms_freq = None
if 'REAL_FREQ_OBS_VISIBILITY' in extnames:
self.vis_freq = hdulist['real_freq_obs_visibility'].data
if 'IMAG_FREQ_OBS_VISIBILITY' in extnames:
self.vis_freq = self.vis_freq.astype(NP.complex128)
self.vis_freq += 1j * hdulist['imag_freq_obs_visibility'].data
else:
self.vis_freq = None
if 'REAL_FREQ_SKY_VISIBILITY' in extnames:
self.skyvis_freq = hdulist['real_freq_sky_visibility'].data
if 'IMAG_FREQ_SKY_VISIBILITY' in extnames:
self.skyvis_freq = self.skyvis_freq.astype(NP.complex128)
self.skyvis_freq += 1j * hdulist['imag_freq_sky_visibility'].data
else:
raise KeyError('Extension named "REAL_FREQ_SKY_VISIBILITY" not found in init_file.')
if 'REAL_FREQ_NOISE_VISIBILITY' in extnames:
self.vis_noise_freq = hdulist['real_freq_noise_visibility'].data
if 'IMAG_FREQ_NOISE_VISIBILITY' in extnames:
self.vis_noise_freq = self.vis_noise_freq.astype(NP.complex128)
self.vis_noise_freq += 1j * hdulist['imag_freq_noise_visibility'].data
else:
self.vis_noise_freq = None
if self.gradient_mode is not None:
self.gradient = {}
if 'real_freq_sky_visibility_gradient_wrt_{0}'.format(self.gradient_mode) in extnames:
self.gradient[self.gradient_mode] = hdulist['real_freq_sky_visibility_gradient_wrt_{0}'.format(self.gradient_mode)].data
if 'imag_freq_sky_visibility_gradient_wrt_{0}'.format(self.gradient_mode) in extnames:
self.gradient[self.gradient_mode] = self.gradient[self.gradient_mode].astype(NP.complex128)
self.gradient[self.gradient_mode] += 1j * hdulist['imag_freq_sky_visibility_gradient_wrt_{0}'.format(self.gradient_mode)].data
try:
gainsfile = hdulist[0].header['gainsfile']
except KeyError:
warnings.warn('\tKeyword "gainsfile" not found in header. Assuming default unity gains.')
self.gaininfo = None
else:
self.gaininfo = GainInfo(init_file=gainsfile, axes_order=['label', 'frequency', 'time'])
if 'REAL_LAG_VISIBILITY' in extnames:
self.vis_lag = hdulist['real_lag_visibility'].data
if 'IMAG_LAG_VISIBILITY' in extnames:
self.vis_lag = self.vis_lag.astype(NP.complex128)
self.vis_lag += 1j * hdulist['imag_lag_visibility'].data
else:
self.vis_lag = None
if 'REAL_LAG_SKY_VISIBILITY' in extnames:
self.skyvis_lag = hdulist['real_lag_sky_visibility'].data
if 'IMAG_LAG_SKY_VISIBILITY' in extnames:
self.skyvis_lag = self.skyvis_lag.astype(NP.complex128)
self.skyvis_lag += 1j * hdulist['imag_lag_sky_visibility'].data
else:
self.skyvis_lag = None
if 'REAL_LAG_NOISE_VISIBILITY' in extnames:
self.vis_noise_lag = hdulist['real_lag_noise_visibility'].data
if 'IMAG_LAG_NOISE_VISIBILITY' in extnames:
self.vis_noise_lag = self.vis_noise_lag.astype(NP.complex128)
self.vis_noise_lag += 1j * hdulist['imag_lag_noise_visibility'].data
else:
self.vis_noise_lag = None
hdulist.close()
init_file_success = True
return
else:
argument_init = True
if (not argument_init) and (not init_file_success):
raise ValueError('Initialization failed with the use of init_file.')
self.astroutils_githash = astroutils.__githash__
self.prisim_githash = prisim.__githash__
self.baselines = NP.asarray(baselines)
if len(self.baselines.shape) == 1:
if self.baselines.size == 2:
self.baselines = NP.hstack((self.baselines.reshape(1,-1), NP.zeros(1)))
elif self.baselines.size == 3:
self.baselines = self.baselines.reshape(1,-1)
else:
raise ValueError('Baseline(s) must be a 2- or 3-column array.')
elif len(self.baselines.shape) == 2:
if self.baselines.shape[1] == 2:
self.baselines = NP.hstack((self.baselines, NP.zeros(self.baselines.shape[0]).reshape(-1,1)))
elif self.baselines.shape[1] != 3:
raise ValueError('Baseline(s) must be a 2- or 3-column array')
else:
raise ValueError('Baseline(s) array contains more than 2 dimensions.')
self.baseline_lengths = NP.sqrt(NP.sum(self.baselines**2, axis=1))
self.baseline_orientations = NP.angle(self.baselines[:,0] + 1j * self.baselines[:,1])
self.projected_baselines = None
if not isinstance(labels, (list, tuple, NP.ndarray)):
raise TypeError('Interferometer array labels must be a list or tuple of unique identifiers')
elif len(labels) != self.baselines.shape[0]:
raise ValueError('Number of labels do not match the number of baselines specified.')
else:
self.labels = labels
self.simparms_file = None
if isinstance(simparms_file, str):
self.simparms_file = simparms_file
else:
warnings.warn('\tInvalid specification found in header for simulation parameters file. Proceeding with None as default.')
if isinstance(telescope, dict):
self.telescope = telescope
else:
self.telescope = {}
self.telescope['id'] = 'vla'
self.telescope['shape'] = 'dish'
self.telescope['size'] = 25.0
self.telescope['ocoords'] = 'altaz'
self.telescope['orientation'] = NP.asarray([90.0, 270.0]).reshape(1,-1)
self.telescope['groundplane'] = None
self.layout = {}
if isinstance(layout, dict):
if 'positions' in layout:
if isinstance(layout['positions'], NP.ndarray):
if layout['positions'].ndim == 2:
if (layout['positions'].shape[1] == 2) or (layout['positions'].shape[1] == 3):
if layout['positions'].shape[1] == 2:
layout['positions'] = NP.hstack((layout['positions'], NP.zeros(layout['positions'].shape[0]).reshape(-1,1)))
self.layout['positions'] = layout['positions']
else:
raise ValueError('Incompatible shape in array layout')
else:
raise ValueError('Incompatible shape in array layout')
else:
raise TypeError('Array layout positions must be a numpy array')
else:
raise KeyError('Array layout positions missing')
if 'coords' in layout:
if isinstance(layout['coords'], str):
self.layout['coords'] = layout['coords']
else:
raise TypeError('Array layout coordinates must be a string')
else:
raise KeyError('Array layout coordinates missing')
if 'labels' in layout:
if isinstance(layout['labels'], (list,NP.ndarray)):
self.layout['labels'] = layout['labels']
else:
raise TypeError('Array antenna labels must be a list or numpy array')
else:
raise KeyError('Array antenna labels missing')
if 'ids' in layout:
if isinstance(layout['ids'], (list,NP.ndarray)):
self.layout['ids'] = layout['ids']
else:
raise TypeError('Array antenna ids must be a list or numpy array')
else:
raise KeyError('Array antenna ids missing')
if (layout['positions'].shape[0] != layout['labels'].size) or (layout['ids'].size != layout['labels'].size):
raise ValueError('Antenna layout positions, labels and IDs must all be for same number of antennas')
if self.layout:
antlabel_dtype = self.layout['labels'].dtype
self.labels = NP.asarray(self.labels, dtype=[('A2', antlabel_dtype), ('A1', antlabel_dtype)])
self.blgroups = None
self.bl_reversemap = None
if blgroupinfo is not None:
if not isinstance(blgroupinfo, dict):
raise TypeError('Input blgroupinfo must be a dictionary')
self.blgroups = blgroupinfo['groups']
self.bl_reversemap = blgroupinfo['reversemap']
self.latitude = latitude
self.longitude = longitude
self.altitude = altitude
self.vis_freq = None
self.skyvis_freq = None
self.vis_noise_freq = None
self.gradient_mode = None
self.gradient = {}
self.gaininfo = None
if gaininfo is not None:
if not isinstance(gaininfo, GainInfo):
raise TypeError('Input gaininfo must be an instance of class GainInfo')
self.gaininfo = gaininfo
if (freq_scale is None) or (freq_scale == 'Hz') or (freq_scale == 'hz'):
self.channels = NP.asarray(channels)
elif freq_scale == 'GHz' or freq_scale == 'ghz':
self.channels = NP.asarray(channels) * 1.0e9
elif freq_scale == 'MHz' or freq_scale == 'mhz':
self.channels = NP.asarray(channels) * 1.0e6
elif freq_scale == 'kHz' or freq_scale == 'khz':
self.channels = NP.asarray(channels) * 1.0e3
else:
raise ValueError('Frequency units must be "GHz", "MHz", "kHz" or "Hz". If not set, it defaults to "Hz"')
self.bp = NP.ones((self.baselines.shape[0],self.channels.size)) # Inherent bandpass shape
self.bp_wts = NP.ones((self.baselines.shape[0],self.channels.size)) # Additional bandpass weights
self.lag_kernel = DSP.FT1D(self.bp*self.bp_wts, ax=1, inverse=True, use_real=False, shift=True)
self.Tsys = NP.zeros((self.baselines.shape[0],self.channels.size))
self.Tsysinfo = []
self.flux_unit = 'JY'
self.timestamp = []
self.t_acc = []
self.t_obs = 0.0
self.n_acc = 0
self.pointing_center = NP.empty([1,2])
self.phase_center = NP.empty([1,2])
self.lst = []
if isinstance(eff_Q, (int, float)):
if (eff_Q >= 0.0) or (eff_Q <= 1.0):
self.eff_Q = eff_Q * NP.ones((self.baselines.shape[0], self.channels.size))
else:
raise ValueError('Efficiency value of interferometer is invalid.')
elif isinstance(eff_Q, (list, tuple, NP.ndarray)):
eff_Q = NP.asarray(eff_Q)
if (NP.any(eff_Q < 0.0)) or (NP.any(eff_Q > 1.0)):
raise ValueError('One or more values of eff_Q found to be outside the range [0,1].')
if eff_Q.size == self.baselines.shape[0]:
self.eff_Q = NP.repeat(eff_Q.reshape(-1,1), self.channels.size, axis=1)
elif eff_Q.size == self.channels.size:
self.eff_Q = NP.repeat(eff_Q.reshape(1,-1), self.channels.size, axis=0)
elif eff_Q.size == self.baselines.shape[0]*self.channels.size:
self.eff_Q = eff_Q.reshape(-1,self.channels.size)
else:
raise ValueError('Efficiency values of interferometers incompatible with the number of interferometers and/or frequency channels.')
else:
raise TypeError('Efficiency values of interferometers must be provided as a scalar, list, tuple or numpy array.')
if isinstance(A_eff, (int, float)):
if A_eff >= 0.0:
self.A_eff = A_eff * NP.ones((self.baselines.shape[0], self.channels.size))
else:
raise ValueError('Negative value for effective area is invalid.')
elif isinstance(A_eff, (list, tuple, NP.ndarray)):
A_eff = NP.asarray(A_eff)
if NP.any(A_eff < 0.0):
raise ValueError('One or more values of A_eff found to be negative.')
if A_eff.size == self.baselines.shape[0]:
self.A_eff = NP.repeat(A_eff.reshape(-1,1), self.channels.size, axis=1)
elif A_eff.size == self.channels.size:
self.A_eff = NP.repeat(A_eff.reshape(1,-1), self.channels.size, axis=0)
elif A_eff.size == self.baselines.shape[0]*self.channels.size:
self.A_eff = A_eff.reshape(-1,self.channels.size)
else:
raise ValueError('Effective area(s) of interferometers incompatible with the number of interferometers and/or frequency channels.')
else:
raise TypeError('Effective area(s) of interferometers must be provided as a scalar, list, tuple or numpy array.')
self.vis_rms_freq = None
self.freq_resolution = self.channels[1] - self.channels[0]
self.baseline_coords = baseline_coords
self.lags = None
self.skyvis_lag = None
self.vis_noise_lag = None
self.vis_lag = None
self.obs_catalog_indices = []
self.geometric_delays = []
if (pointing_coords == 'radec') or (pointing_coords == 'hadec') or (pointing_coords == 'altaz'):
self.pointing_coords = pointing_coords
self.phase_center_coords = pointing_coords
else:
raise ValueError('Pointing center of the interferometer must be "radec", "hadec" or "altaz". Check inputs.')
if (skycoords == 'radec') or (skycoords == 'hadec') or (skycoords == 'altaz'):
self.skycoords = skycoords
else:
raise ValueError('Sky coordinates must be "radec", "hadec" or "altaz". Check inputs.')
if (baseline_coords == 'equatorial') or (baseline_coords == 'localenu'):
self.baseline_coords = baseline_coords
else:
raise ValueError('Baseline coordinates must be "equatorial" or "local". Check inputs.')
#############################################################################
def observe(self, timeobj, Tsysinfo, bandpass, pointing_center, skymodel,
t_acc, pb_info=None, brightness_units=None, bpcorrect=None,
roi_info=None, roi_radius=None, roi_center=None, lst=None,
gradient_mode=None, memsave=False, vmemavail=None,
store_prev_skymodel_file=None):
"""
-------------------------------------------------------------------------
Simulate a snapshot observation, by an instance of the
InterferometerArray class, of the sky when a sky catalog is provided. The
simulation generates visibilities observed by the interferometers for the
specified parameters. See member function observing_run() for simulating
an extended observing run in 'track' or 'drift' mode.
Inputs:
timeobj [instance of class astropy.time.Time] Time object
associated with each integration in the observation
Tsysinfo [dictionary] Contains system temperature information for
specified timestamp of observation. It contains the
following keys and values:
'Trx' [scalar] Recevier temperature (in K) that is
applicable to all frequencies and baselines
'Tant' [dictionary] contains antenna temperature info
from which the antenna temperature is estimated.
Used only if the key 'Tnet' is absent or set to
None. It has the following keys and values:
'f0' [scalar] Reference frequency (in Hz)
from which antenna temperature will
be estimated (see formula below)
'T0' [scalar] Antenna temperature (in K) at
the reference frequency specified in
key 'f0'. See formula below.
'spindex' [scalar] Antenna temperature spectral
index. See formula below.
Tsys = Trx + Tant['T0'] * (f/Tant['f0'])**spindex
'Tnet' [numpy array] Pre-computed Tsys (in K)
information that will be used directly to set the
Tsys. If specified, the information under keys
'Trx' and 'Tant' will be ignored. If a scalar
value is provided, it will be assumed to be
identical for all interferometers and all
frequencies. If a vector is provided whose length
is equal to the number of interferoemters, it
will be assumed identical for all frequencies. If
a vector is provided whose length is equal to the
number of frequency channels, it will be assumed
identical for all interferometers. If a 2D array
is provided, it should be of size
n_baselines x nchan. Tsys = Tnet
bandpass [numpy array] Bandpass weights associated with the
interferometers for the specified timestamp of observation
pointing_center
[2-element numpy vector or list] Pointing center (latitude
and longitude) of the observation at a given timestamp.
This is where the telescopes will be phased up to as
reference. Coordinate system for the pointing_center is
specified by the attribute pointing_coords initialized in
__init__().
skymodel [instance of class SkyModel] It consists of source flux
densities, their positions, and spectral indices. Read
class SkyModel docstring for more information.
t_acc [scalar] Accumulation time (sec) corresponding to timestamp
brightness_units
[string] Units of flux density in the catalog and for the
generated visibilities. Accepted values are 'Jy' (Jansky)
and 'K' (Kelvin for temperature). If None set, it defaults
to 'Jy'
Keyword Inputs:
roi_info [instance of class ROI_parameters] It consists of indices
in the polskymodel object, polarized beams for different
baseline types for every time stamp that will be simulated
roi_radius [scalar] Radius of the region of interest (degrees) inside
which sources are to be observed. Default = 90 degrees,
which is the entire horizon.
roi_center [string] Center of the region of interest around which
roi_radius is used. Accepted values are 'pointing_center'
and 'zenith'. If set to None, it defaults to 'zenith'.
gradient_mode
[string] If set to None, visibilities will be simulated as
usual. If set to string, both visibilities and visibility
gradients with respect to the quantity specified in the
string will be simulated. Currently accepted value is
'baseline'. Plan to incorporate gradients with respect to
'skypos' and 'frequency' as well in the future.
memsave [boolean] If set to True, enforce computations in single
precision, otherwise enforce double precision (default)
vmemavail [NoneType, int or float] Amount of virtual memory available
(in bytes). If set to None (default), it will be determined
using psutil functions though that may be less reliable
than setting it explicitly if the available virtual memory
is known.
store_prev_skymodel_file
[string] Filename including full path to store source
indices and spectrum from previous computation which can
be read during the next iteration to generate spectrum
only of new sources that come into the field of view thus
saving computations. If set to None (default), the full
spectrum of all sources in the field of view will be
computed in each iteration.
------------------------------------------------------------------------
"""
if len(bandpass.shape) == 1:
if bandpass.size != self.channels.size:
raise ValueError('Specified bandpass incompatible with the number of frequency channels')
if len(self.bp.shape) == 2:
self.bp = NP.expand_dims(NP.repeat(bandpass.reshape(1,-1), self.baselines.shape[0], axis=0), axis=2)
else:
self.bp = NP.dstack((self.bp, NP.repeat(bandpass.reshape(1,-1), self.baselines.shape[0], axis=0)))
elif len(bandpass.shape) == 2:
if bandpass.shape[1] != self.channels.size:
raise ValueError('Specified bandpass incompatible with the number of frequency channels')
elif bandpass.shape[0] != self.baselines.shape[0]:
raise ValueError('Specified bandpass incompatible with the number of interferometers')
if len(self.bp.shape) == 2:
self.bp = NP.expand_dims(bandpass, axis=2)
else:
self.bp = NP.dstack((self.bp, bandpass))
elif len(bandpass.shape) == 3:
if bandpass.shape[1] != self.channels.size:
raise ValueError('Specified bandpass incompatible with the number of frequency channels')
elif bandpass.shape[0] != self.baselines.shape[0]:
raise ValueError('Specified bandpass incompatible with the number of interferometers')
elif bandpass.shape[2] != 1:
raise ValueError('Bandpass can have only one layer for this instance of accumulation.')
if len(self.bp.shape) == 2:
self.bp = bandpass
else:
self.bp = NP.dstack((self.bp, bandpass))
self.bp_wts = NP.ones_like(self.bp) # All additional bandpass shaping weights are set to unity.
if isinstance(Tsysinfo, dict):
set_Tsys = False
if 'Tnet' in Tsysinfo:
if Tsysinfo['Tnet'] is not None:
Tsys = Tsysinfo['Tnet']
set_Tsys = True
if not set_Tsys:
try:
Tsys = Tsysinfo['Trx'] + Tsysinfo['Tant']['T0'] * (self.channels/Tsysinfo['Tant']['f0']) ** Tsysinfo['Tant']['spindex']
except KeyError:
raise KeyError('One or more keys not found in input Tsysinfo')
Tsys = Tsys.reshape(1,-1) + NP.zeros(self.baselines.shape[0]).reshape(-1,1) # nbl x nchan
else:
raise TypeError('Input Tsysinfo must be a dictionary')
self.Tsysinfo += [Tsysinfo]
if bpcorrect is not None:
if not isinstance(bpcorrect, NP.ndarray):
raise TypeError('Input specifying bandpass correction must be a numpy array')
if bpcorrect.size == self.channels.size:
bpcorrect = bpcorrect.reshape(1,-1)
elif bpcorrect.size == self.baselines.shape[0]:
bpcorrect = bpcorrect.reshape(-1,1)
elif bpcorrect.size == self.baselines.shape[0] * self.channels.size:
bpcorrect = bpcorrect.reshape(-1,self.channels.size)
else:
raise ValueError('Input bpcorrect has dimensions incompatible with the number of baselines and frequencies')
Tsys = Tsys * bpcorrect
if isinstance(Tsys, (int,float)):
if Tsys < 0.0:
raise ValueError('Tsys found to be negative.')
if len(self.Tsys.shape) == 2:
self.Tsys = Tsys + NP.zeros((self.baselines.shape[0], self.channels.size, 1))
else:
self.Tsys = NP.dstack((self.Tsys, Tsys + NP.zeros((self.baselines.shape[0], self.channels.size, 1))))
elif isinstance(Tsys, (list, tuple, NP.ndarray)):
Tsys = NP.asarray(Tsys)
if NP.any(Tsys < 0.0):
raise ValueError('Tsys should be non-negative.')
if Tsys.size == self.baselines.shape[0]:
if self.Tsys.ndim == 2:
self.Tsys = NP.expand_dims(NP.repeat(Tsys.reshape(-1,1), self.channels.size, axis=1), axis=2)
elif self.Tsys.ndim == 3:
self.Tsys = NP.dstack((self.Tsys, NP.expand_dims(NP.repeat(Tsys.reshape(-1,1), self.channels.size, axis=1), axis=2)))
elif Tsys.size == self.channels.size:
if self.Tsys.ndim == 2:
self.Tsys = NP.expand_dims(NP.repeat(Tsys.reshape(1,-1), self.baselines.shape[0], axis=0), axis=2)
elif self.Tsys.ndim == 3:
self.Tsys = NP.dstack((self.Tsys, NP.expand_dims(NP.repeat(Tsys.reshape(1,-1), self.baselines.shape[0], axis=0), axis=2)))
elif Tsys.size == self.baselines.shape[0]*self.channels.size:
if self.Tsys.ndim == 2:
self.Tsys = NP.expand_dims(Tsys.reshape(-1,self.channels.size), axis=2)
elif self.Tsys.ndim == 3:
self.Tsys = NP.dstack((self.Tsys, NP.expand_dims(Tsys.reshape(-1,self.channels.size), axis=2)))
else:
raise ValueError('Specified Tsys has incompatible dimensions with the number of baselines and/or number of frequency channels.')
else:
raise TypeError('Tsys should be a scalar, list, tuple, or numpy array')
# if (brightness_units is None) or (brightness_units=='Jy') or (brightness_units=='JY') or (brightness_units=='jy'):
# if self.vis_rms_freq is None:
# self.vis_rms_freq = 2.0 * FCNST.k / NP.sqrt(2.0*t_acc*self.freq_resolution) * NP.expand_dims(self.Tsys[:,:,-1]/self.A_eff/self.eff_Q, axis=2) / CNST.Jy
# elif len(self.vis_rms_freq.shape) == 3:
# self.vis_rms_freq = NP.dstack((self.vis_rms_freq, 2.0 * FCNST.k / NP.sqrt(2.0*t_acc*self.freq_resolution) * NP.expand_dims(self.Tsys[:,:,-1]/self.A_eff/self.eff_Q, axis=2)/CNST.Jy))
# self.flux_unit = 'JY'
# elif (brightness_units=='K') or (brightness_units=='k'):
# if len(self.vis_rms_freq.shape) == 2:
# self.vis_rms_freq = 1 / NP.sqrt(2.0*t_acc*self.freq_resolution) * NP.expand_dims(self.Tsys[:,:,-1]/self.eff_Q, axis=2)
# elif len(self.vis_rms_freq.shape) == 3:
# self.vis_rms_freq = NP.dstack((self.vis_rms_freq, 1 / NP.sqrt(2.0*t_acc*self.freq_resolution) * NP.expand_dims(self.Tsys[:,:,-1]/self.eff_Q, axis=2)))
# self.flux_unit = 'K'
# else:
# raise ValueError('Invalid brightness temperature units specified.')
if not self.timestamp:
self.pointing_center = NP.asarray(pointing_center).reshape(1,-1)
self.phase_center = NP.asarray(pointing_center).reshape(1,-1)
else:
self.pointing_center = NP.vstack((self.pointing_center, NP.asarray(pointing_center).reshape(1,-1)))
self.phase_center = NP.vstack((self.phase_center, NP.asarray(pointing_center).reshape(1,-1)))
pointing_lon = self.pointing_center[-1,0]
pointing_lat = self.pointing_center[-1,1]
lst = timeobj.sidereal_time('apparent').deg
if self.skycoords == 'radec':
if self.pointing_coords == 'hadec':
if lst is not None:
pointing_lon = lst - self.pointing_center[-1,0]
pointing_lat = self.pointing_center[-1,1]
else:
raise ValueError('LST must be provided. Sky coordinates are in RA-Dec format while pointing center is in HA-Dec format.')
elif self.pointing_coords == 'altaz':
pointing_lonlat = GEOM.altaz2hadec(self.pointing_center[[-1],:], self.latitude, units='degrees').squeeze() # Should now be of shape (2,)
pointing_lon = lst - pointing_lonlat[0]
pointing_lat = pointing_lonlat[1]
elif self.skycoords == 'hadec':
if self.pointing_coords == 'radec':
if lst is not None:
pointing_lon = lst - self.pointing_center[-1,0]
pointing_lat = self.pointing_center[-1,1]
else:
raise ValueError('LST must be provided. Sky coordinates are in RA-Dec format while pointing center is in HA-Dec format.')
elif self.pointing_coords == 'altaz':
pointing_lonlat = lst - GEOM.altaz2hadec(self.pointing_center[[-1],:], self.latitude, units='degrees').squeeze()
pointing_lon = pointing_lonlat[0]
pointing_lat = pointing_lonlat[1]
else:
if self.pointing_coords == 'radec':
if lst is not None:
pointing_lonlat = GEOM.hadec2altaz(NP.asarray([lst-self.pointing_center[-1,0], self.pointing_center[-1,1]]), self.latitude, units='degrees')
pointing_lon = pointing_lonlat[0]
pointing_lat = pointing_lonlat[1]
else:
raise ValueError('LST must be provided. Sky coordinates are in Alt-Az format while pointing center is in RA-Dec format.')
elif self.pointing_coords == 'hadec':
pointing_lonlat = GEOM.hadec2altaz(self.pointing_center,
self.latitude,
units='degrees').squeeze()
pointing_lon = pointing_lonlat[0]
pointing_lat = pointing_lonlat[1]
baselines_in_local_frame = self.baselines
if self.baseline_coords == 'equatorial':
baselines_in_local_frame = GEOM.xyz2enu(self.baselines, self.latitude, 'degrees')
pc_altaz = self.pointing_center[-1,:] # Convert pointing center to Alt-Az coordinates
if self.pointing_coords == 'hadec':
pc_altaz = GEOM.hadec2altaz(self.pointing_center[-1,:], self.latitude, units='degrees')
elif self.pointing_coords == 'radec':
if lst is not None:
pc_altaz = GEOM.hadec2altaz(NP.asarray([lst-self.pointing_center[-1,0], self.pointing_center[-1,1]]), self.latitude, units='degrees')
else:
raise ValueError('LST must be provided. Sky coordinates are in Alt-Az format while pointing center is in RA-Dec format.')
pc_dircos = GEOM.altaz2dircos(pc_altaz, 'degrees') # Convert pointing center to direction cosine coordinates
pc_delay_offsets = DLY.geometric_delay(baselines_in_local_frame, pc_dircos, altaz=False, hadec=False, dircos=True, latitude=self.latitude)
if memsave:
pc_delay_offsets = pc_delay_offsets.astype(NP.float32)
# pointing_phase = 2.0 * NP.pi * NP.repeat(NP.dot(baselines_in_local_frame, pc_dircos.reshape(-1,1)), self.channels.size, axis=1) * NP.repeat(self.channels.reshape(1,-1), self.baselines.shape[0], axis=0)/FCNST.c
if not isinstance(skymodel, SM.SkyModel):
raise TypeError('skymodel should be an instance of class SkyModel.')
skycoords = SkyCoord(ra=skymodel.location[:,0]*units.deg, dec=skymodel.location[:,1]*units.deg, frame='fk5', equinox=Time(skymodel.epoch, format='jyear_str', scale='utc')).transform_to(FK5(equinox=timeobj))
if self.skycoords == 'hadec':
skypos_altaz = GEOM.hadec2altaz(skymodel.location, self.latitude, units='degrees')
elif self.skycoords == 'radec':
src_altaz = skycoords.transform_to(AltAz(obstime=timeobj, location=EarthLocation(lon=self.longitude*units.deg, lat=self.latitude*units.deg, height=self.altitude*units.m)))
skypos_altaz = NP.hstack((src_altaz.alt.deg.reshape(-1,1), src_altaz.az.deg.reshape(-1,1)))
if memsave:
datatype = NP.complex64
else:
datatype = NP.complex128
skyvis = NP.zeros( (self.baselines.shape[0], self.channels.size), dtype=datatype)
pb = None
if roi_info is not None:
if ('ind' not in roi_info) or ('pbeam' not in roi_info):
raise KeyError('Both "ind" and "pbeam" keys must be present in dictionary roi_info')
if (roi_info['ind'] is not None) and (roi_info['pbeam'] is not None):
m2 = roi_info['ind']
if m2.size > 0:
try:
pb = roi_info['pbeam'].reshape(-1,len(self.channels))
except ValueError:
raise ValueError('Number of columns of primary beam in key "pbeam" of dictionary roi_info must be equal to number of frequency channels.')
if NP.asarray(roi_info['ind']).size != pb.shape[0]:
raise ValueError('Values in keys ind and pbeam in must carry same number of elements.')
else:
if roi_radius is None:
roi_radius = 90.0
if roi_center is None:
roi_center = 'zenith'
elif (roi_center != 'zenith') and (roi_center != 'pointing_center'):
raise ValueError('Center of region of interest, roi_center, must be set to "zenith" or "pointing_center".')
if roi_center == 'pointing_center':
m1, m2, d12 = GEOM.spherematch(pointing_lon, pointing_lat, skycoords.ra.deg, skycoords.dec.deg, roi_radius, maxmatches=0)
else: # roi_center = 'zenith'
m2 = NP.arange(skypos_altaz.shape[0])
m2 = m2[NP.where(skypos_altaz[:,0] >= 90.0-roi_radius)] # select sources whose altitude (angle above horizon) is 90-roi_radius
if len(m2) > 0:
skypos_altaz_roi = skypos_altaz[m2,:]
coords_str = 'altaz'
prev_skymodel_success = False
if store_prev_skymodel_file is not None:
if not isinstance(store_prev_skymodel_file, str):
raise TypeError('Input store_prev_skymodel_file must be a string')
try:
with h5py.File(store_prev_skymodel_file, 'a') as fileobj:
if 'ind' in fileobj:
stored_ind_dset = fileobj['ind']
stored_spectrum_dset = fileobj['spectrum']
stored_ind = stored_ind_dset.value
stored_spectrum = stored_spectrum_dset.value
ind_of_m2_in_prev = NMO.find_list_in_list(stored_ind, m2)
fluxes = NP.zeros((m2.size, self.channels.size))
if NP.sum(~ind_of_m2_in_prev.mask) > 0: # Previously stored
fluxes[NP.where(~ind_of_m2_in_prev.mask)[0],:] = stored_spectrum[ind_of_m2_in_prev[~ind_of_m2_in_prev.mask],:]
if NP.sum(ind_of_m2_in_prev.mask) > 0: # Previously unavailable and have to be generated fresh
fluxes[NP.where(ind_of_m2_in_prev.mask)[0],:] = skymodel.generate_spectrum(ind=m2[NP.where(ind_of_m2_in_prev.mask)[0]], frequency=self.channels, interp_method='pchip')
del fileobj['ind']
del fileobj['spectrum']
else:
fluxes = skymodel.generate_spectrum(ind=m2, frequency=self.channels, interp_method='pchip')
ind_dset = fileobj.create_dataset('ind', data=m2)
spec_dset = fileobj.create_dataset('spectrum', data=fluxes, compression='gzip', compression_opts=9)
prev_skymodel_success = True
except:
prev_skymodel_success = False
if not prev_skymodel_success:
fluxes = skymodel.generate_spectrum(ind=m2, frequency=self.channels, interp_method='pchip')
if pb is None:
pb = PB.primary_beam_generator(skypos_altaz_roi, self.channels/1.0e9, skyunits='altaz', telescope=self.telescope, pointing_info=pb_info, pointing_center=pc_altaz, freq_scale='GHz')
pbfluxes = pb * fluxes
geometric_delays = DLY.geometric_delay(baselines_in_local_frame, skypos_altaz_roi, altaz=(coords_str=='altaz'), hadec=(coords_str=='hadec'), latitude=self.latitude)
vis_wts = None
if skymodel.src_shape is not None:
eps = 1.0e-13
f0 = self.channels[int(0.5*self.channels.size)]
wl0 = FCNST.c / f0
wl = FCNST.c / self.channels
skypos_dircos_roi = GEOM.altaz2dircos(skypos_altaz_roi, units='degrees')
# projected_spatial_frequencies = NP.sqrt(self.baseline_lengths.reshape(1,-1)**2 - (FCNST.c * geometric_delays)**2) / wl0
projected_spatial_frequencies = NP.sqrt(self.baseline_lengths.reshape(1,-1,1)**2 - (FCNST.c * geometric_delays[:,:,NP.newaxis])**2) / wl.reshape(1,1,-1)
src_FWHM = NP.sqrt(skymodel.src_shape[m2,0] * skymodel.src_shape[m2,1])
src_FWHM_dircos = 2.0 * NP.sin(0.5*NP.radians(src_FWHM)).reshape(-1,1) # assuming the projected baseline is perpendicular to source direction
# src_sigma_spatial_frequencies = 2.0 * NP.sqrt(2.0 * NP.log(2.0)) / (2 * NP.pi * src_FWHM_dircos) # estimate 1
src_sigma_spatial_frequencies = 1.0 / NP.sqrt(2.0*NP.log(2.0)) / src_FWHM_dircos # estimate 2 created by constraint that at lambda/D_proj, visibility weights are half
# # Tried deriving below an alternate expression but previous expression for src_FWHM_dircos seems better
# dtheta_radial = NP.radians(src_FWHM).reshape(-1,1)
# dtheta_circum = NP.radians(src_FWHM).reshape(-1,1)
# src_FWHM_dircos = NP.sqrt(skypos_dircos_roi[:,2].reshape(-1,1)**2 * dtheta_radial**2 + dtheta_circum**2) / NP.sqrt(2.0) # from 2D error propagation (another approximation to commented expression above for the same quantity). Add in quadrature and divide by sqrt(2) to get radius of error circle
# arbitrary_factor_for_src_width = NP.sqrt(2.0) # An arbitrary factor that can be adjusted based on what the longest baseline measures for a source of certain finite width
# src_sigma_spatial_frequencies = 2.0 * NP.sqrt(2.0 * NP.log(2.0)) / (2 * NP.pi * src_FWHM_dircos) * arbitrary_factor_for_src_width
# extended_sources_flag = 1/NP.clip(projected_spatial_frequencies, 0.5, NP.amax(projected_spatial_frequencies)) < src_FWHM_dircos
vis_wts = NP.ones_like(projected_spatial_frequencies)
# vis_wts = NP.exp(-0.5 * (projected_spatial_frequencies/src_sigma_spatial_frequencies)**2)
vis_wts = NP.exp(-0.5 * (projected_spatial_frequencies/src_sigma_spatial_frequencies[:,:,NP.newaxis])**2) # nsrc x nbl x nchan
if memsave:
pbfluxes = pbfluxes.astype(NP.float32, copy=False)
self.geometric_delays = self.geometric_delays + [geometric_delays.astype(NP.float32)]
if vis_wts is not None:
vis_wts = vis_wts.astype(NP.float32, copy=False)
else:
self.geometric_delays = self.geometric_delays + [geometric_delays]
# memory_available = psutil.phymem_usage().available
if vmemavail is None:
memory_available = psutil.virtual_memory().available
else:
memory_available = vmemavail
# memory_available = min([vmemavail, psutil.virtual_memory().available])
if gradient_mode is None:
if memsave:
memory_required = len(m2) * self.channels.size * self.baselines.shape[0] * 4.0 * 2 # bytes, 4 bytes per float, factor 2 is because the phase involves complex values
else:
memory_required = len(m2) * self.channels.size * self.baselines.shape[0] * 8.0 * 2 # bytes, 8 bytes per float, factor 2 is because the phase involves complex values
else:
if not isinstance(gradient_mode, str):
raise TypeError('Input gradient_mode must be a string')
if gradient_mode.lower() not in ['baseline', 'skypos', 'frequency']:
raise ValueError('Invalid value specified in input gradient_mode')
if self.gradient_mode is None:
self.gradient_mode = gradient_mode
if gradient_mode.lower() == 'baseline':
skyvis_gradient = NP.zeros((3, self.baselines.shape[0], self.channels.size), dtype=datatype)
if memsave:
memory_required = 3 * len(m2) * self.channels.size * self.baselines.shape[0] * 4.0 * 2 # bytes, 4 bytes per float, factor 2 is because the phase involves complex values, factor 3 because of three vector components of the gradient
else:
memory_required = 3 * len(m2) * self.channels.size * self.baselines.shape[0] * 8.0 * 2 # bytes, 8 bytes per float, factor 2 is because the phase involves complex values, factor 3 because of three vector components of the gradient
memory_sufficient = float(memory_available) > memory_required
if memory_sufficient:
try:
if memsave:
phase_matrix = NP.exp(-1j * NP.asarray(2.0 * NP.pi).astype(NP.float32) * (self.geometric_delays[-1][:,:,NP.newaxis].astype(NP.float32) - pc_delay_offsets.astype(NP.float32).reshape(1,-1,1)) * self.channels.astype(NP.float32).reshape(1,1,-1)).astype(NP.complex64)
if vis_wts is not None:
# phase_matrix *= vis_wts[:,:,NP.newaxis]
phase_matrix *= vis_wts
skyvis = NP.sum(pbfluxes[:,NP.newaxis,:] * phase_matrix, axis=0) # SUM(nsrc x nbl x nchan, axis=0) = nbl x nchan
if gradient_mode is not None:
if gradient_mode.lower() == 'baseline':
skyvis_gradient = NP.sum(skypos_dircos_roi[:,:,NP.newaxis,NP.newaxis].astype(NP.float32) * pbfluxes[:,NP.newaxis,NP.newaxis,:] * phase_matrix[:,NP.newaxis,:,:], axis=0) # SUM(nsrc x 3 x nbl x nchan, axis=0) = 3 x nbl x nchan
else:
phase_matrix = 2.0 * NP.pi * (self.geometric_delays[-1][:,:,NP.newaxis].astype(NP.float64) - pc_delay_offsets.astype(NP.float64).reshape(1,-1,1)) * self.channels.astype(NP.float64).reshape(1,1,-1)
if vis_wts is not None:
# skyvis = NP.sum(pbfluxes[:,NP.newaxis,:] * NP.exp(-1j*phase_matrix) * vis_wts[:,:,NP.newaxis], axis=0) # Don't apply bandpass here
skyvis = NP.sum(pbfluxes[:,NP.newaxis,:] * NP.exp(-1j*phase_matrix) * vis_wts, axis=0) # SUM(nsrc x nbl x nchan, axis=0) = nbl x nchan
if gradient_mode is not None:
if gradient_mode.lower() == 'baseline':
skyvis_gradient = NP.sum(skypos_dircos_roi[:,:,NP.newaxis,NP.newaxis].astype(NP.float64) * pbfluxes[:,NP.newaxis,NP.newaxis,:] * NP.exp(-1j*phase_matrix[:,NP.newaxis,:,:]) * vis_wts[:,NP.newaxis,:,:], axis=0) # SUM(nsrc x 3 x nbl x nchan, axis=0) = 3 x nbl x nchan
else:
skyvis = NP.sum(pbfluxes[:,NP.newaxis,:] * NP.exp(-1j*phase_matrix), axis=0) # SUM(nsrc x nbl x nchan, axis=0) = nbl x nchan
if gradient_mode is not None:
if gradient_mode.lower() == 'baseline':
skyvis_gradient = NP.sum(skypos_dircos_roi[:,:,NP.newaxis,NP.newaxis].astype(NP.float64) * pbfluxes[:,NP.newaxis,NP.newaxis,:] * NP.exp(-1j*phase_matrix[:,NP.newaxis,:,:]), axis=0) # SUM(nsrc x 3 x nbl x nchan, axis=0) = 3 x nbl x nchan
except MemoryError as memxption:
print(memxption)
memory_sufficient = False
raise
if not memory_sufficient:
warnings.warn('\t\tDetecting memory shortage. Serializing over sky direction.')
downsize_factor = NP.ceil(memory_required/float(memory_available))
n_src_stepsize = int(len(m2)/downsize_factor)
src_indices = range(0,len(m2),n_src_stepsize)
if memsave:
warnings.warn('\t\tEnforcing single precision computations.')
for i in xrange(len(src_indices)):
phase_matrix = NP.exp(-1j * NP.asarray(2.0 * NP.pi).astype(NP.float32) * (self.geometric_delays[-1][src_indices[i]:min(src_indices[i]+n_src_stepsize,len(m2)),:,NP.newaxis].astype(NP.float32) - pc_delay_offsets.astype(NP.float32).reshape(1,-1,1)) * self.channels.astype(NP.float32).reshape(1,1,-1)).astype(NP.complex64, copy=False)
if vis_wts is not None:
phase_matrix *= vis_wts[src_indices[i]:min(src_indices[i]+n_src_stepsize,len(m2)),:,:].astype(NP.float32)
# phase_matrix *= vis_wts[src_indices[i]:min(src_indices[i]+n_src_stepsize,len(m2)),:,NP.newaxis].astype(NP.float32)
phase_matrix *= pbfluxes[src_indices[i]:min(src_indices[i]+n_src_stepsize,len(m2)),NP.newaxis,:].astype(NP.float32)
skyvis += NP.sum(phase_matrix, axis=0)
if gradient_mode is not None:
if gradient_mode.lower() == 'baseline':
skyvis_gradient += NP.sum(skypos_dircos_roi[src_indices[i]:min(src_indices[i]+n_src_stepsize,len(m2)),:,NP.newaxis,NP.newaxis].astype(NP.float32) * phase_matrix[:,NP.newaxis,:,:], axis=0)
else:
for i in xrange(len(src_indices)):
phase_matrix = NP.exp(-1j * NP.asarray(2.0 * NP.pi).astype(NP.float64) * (self.geometric_delays[-1][src_indices[i]:min(src_indices[i]+n_src_stepsize,len(m2)),:,NP.newaxis].astype(NP.float64) - pc_delay_offsets.astype(NP.float64).reshape(1,-1,1)) * self.channels.astype(NP.float64).reshape(1,1,-1)).astype(NP.complex128, copy=False)
if vis_wts is not None:
phase_matrix *= vis_wts[src_indices[i]:min(src_indices[i]+n_src_stepsize,len(m2)),:,:].astype(NP.float64)
phase_matrix *= pbfluxes[src_indices[i]:min(src_indices[i]+n_src_stepsize,len(m2)),NP.newaxis,:].astype(NP.float64)
skyvis += NP.sum(phase_matrix, axis=0)
if gradient_mode is not None:
if gradient_mode.lower() == 'baseline':
skyvis_gradient += NP.sum(skypos_dircos_roi[src_indices[i]:min(src_indices[i]+n_src_stepsize,len(m2)),:,NP.newaxis,NP.newaxis].astype(NP.float64) * phase_matrix[:,NP.newaxis,:,:], axis=0)
self.obs_catalog_indices = self.obs_catalog_indices + [m2]
else:
warnings.warn('No sources found in the catalog within matching radius. Simply populating the observed visibilities and/or gradients with noise.')
if gradient_mode is not None:
if gradient_mode.lower() == 'baseline':
skyvis_gradient = NP.zeros( (3, self.baselines.shape[0], self.channels.size), dtype=datatype)
if self.timestamp == []:
self.skyvis_freq = skyvis[:,:,NP.newaxis]
if gradient_mode is not None:
if gradient_mode.lower() == 'baseline':
self.gradient[gradient_mode] = skyvis_gradient[:,:,:,NP.newaxis]
else:
self.skyvis_freq = NP.dstack((self.skyvis_freq, skyvis[:,:,NP.newaxis]))
if gradient_mode is not None:
if gradient_mode.lower() == 'baseline':
self.gradient[gradient_mode] = NP.concatenate((self.gradient[gradient_mode], skyvis_gradient[:,:,:,NP.newaxis]), axis=3)
self.timestamp = self.timestamp + [timeobj.jd]
self.t_acc = self.t_acc + [t_acc]
self.t_obs += t_acc
self.n_acc += 1
self.lst = self.lst + [lst]
numbytes = []
variables = []
var = None
obj = None
for var,obj in locals().iteritems():
if isinstance(obj, NP.ndarray):
variables += [var]
numbytes += [obj.nbytes]
nGB = NP.asarray(numbytes) / 2.0**30
totalmemGB = NP.sum(nGB)
############################################################################
def observing_run(self, pointing_init, skymodel, t_acc, duration, channels,
bpass, Tsys, lst_init, roi_radius=None, roi_center=None,
mode='track', pointing_coords=None, freq_scale=None,
brightness_units=None, verbose=True, memsave=False):
"""
-------------------------------------------------------------------------
Simulate an extended observing run in 'track' or 'drift' mode, by an
instance of the InterferometerArray class, of the sky when a sky catalog
is provided. The simulation generates visibilities observed by the
interferometer array for the specified parameters. Uses member function
observe() and builds the observation from snapshots. The timestamp for
each snapshot is the current time at which the snapshot is generated.
Inputs:
pointing_init [2-element list or numpy array] The inital pointing
of the telescope at the start of the observing run.
This is where the telescopes will be initially phased up to
as reference. Coordinate system for the pointing_center is
specified by the input pointing_coords
skymodel [instance of class SkyModel] It consists of source flux
densities, their positions, and spectral indices. Read
class SkyModel docstring for more information.
t_acc [scalar] Accumulation time (sec) corresponding to timestamp
brightness_units
[string] Units of flux density in the catalog and for the
generated visibilities. Accepted values are 'Jy' (Jansky)
and 'K' (Kelvin for temperature). If None set, it defaults
to 'Jy'
duration [scalar] Duration of observation in seconds
channels [list or numpy vector] frequency channels in units as
specified in freq_scale
bpass [list, list of lists or numpy array] Bandpass weights in
the form of M x N array or list of N-element lists. N must
equal the number of channels. If M=1, the same bandpass
will be used in all the snapshots for the entire
observation, otherwise M must equal the number of
snapshots which is int(duration/t_acc)
Tsys [scalar, list or numpy array] System temperature (in K). If
a scalar is provided, the same Tsys will be used in all the
snapshots for the duration of the observation. If a list or
numpy array is provided, the number of elements must equal
the number of snapshots which is int(duration/t_int)
lst_init [scalar] Initial LST (in degrees) at the beginning of the
observing run corresponding to pointing_init
Keyword Inputs:
roi_radius [scalar] Radius of the region of interest (degrees) inside
which sources are to be observed. Default = 90 degrees,
which is the entire horizon.
roi_center [string] Center of the region of interest around which
roi_radius is used. Accepted values are 'pointing_center'
and 'zenith'. If set to None, it defaults to 'zenith'.
freq_scale [string] Units of frequencies specified in channels.
Accepted values are 'Hz', 'hz', 'khz', 'kHz', 'mhz',
'MHz', 'GHz' and 'ghz'. If None provided, defaults to 'Hz'
mode [string] Mode of observation. Accepted values are 'track'
and 'drift'. If using 'track', pointing center is fixed to
a specific point on the sky coordinate frame. If using
'drift', pointing center is fixed to a specific point on
the antenna's reference frame.
pointing_coords
[string] Coordinate system for pointing_init. Accepted
values are 'radec', 'hadec' and 'altaz'. If None provided,
default is set based on observing mode. If mode='track',
pointing_coords defaults to 'radec', and if mode='drift',
it defaults to 'hadec'
verbose [boolean] If set to True, prints progress and diagnostic
messages. Default = True
------------------------------------------------------------------------
"""
if verbose:
print('Preparing an observing run...\n')
print('\tVerifying input arguments to observing_run()...')
try:
pointing_init, skymodel, t_acc, duration, bpass, Tsys, lst_init
except NameError:
raise NameError('One or more of pointing_init, skymodel, t_acc, duration, bpass, Tsys, lst_init not specified.')
if isinstance(pointing_init, list):
pointing_init = NP.asarray(pointing_init)
elif not isinstance(pointing_init, NP.ndarray):
raise TypeError('pointing_init must be a list or numpy array.')
if pointing_init.size != 2:
raise ValueError('pointing_init must be a 2-element vector.')
pointing_init = pointing_init.ravel()
if not isinstance(skymodel, SM.SkyModel):
raise TypeError('skymodel must be an instance of class SkyModel.')
if not isinstance(t_acc, (int, float)):
raise TypeError('t_acc must be a scalar integer or float.')
if t_acc <= 0.0:
raise ValueError('t_acc must be positive.')
if not isinstance(duration, (int, float)):
raise TypeError('duration must be a scalar integer or float.')
if duration <= t_acc:
if verbose:
warnings.warn('\t\tDuration specified to be shorter than t_acc. Will set it equal to t_acc')
duration = t_acc
n_acc = int(duration / t_acc)
if verbose:
print('\t\tObserving run will have {0} accumulations.'.format(n_acc))
if isinstance(channels, list):
channels = NP.asarray(channels)
elif not isinstance(channels, NP.ndarray):
raise TypeError('channels must be a list or numpy array')
if (freq_scale is None) or (freq_scale == 'Hz') or (freq_scale == 'hz'):
channels = NP.asarray(channels)
elif freq_scale == 'GHz' or freq_scale == 'ghz':
channels = channels * 1.0e9
elif freq_scale == 'MHz' or freq_scale == 'mhz':
channels = channels * 1.0e6
elif freq_scale == 'kHz' or freq_scale == 'khz':
channels = channels * 1.0e3
else:
raise ValueError('Frequency units must be "GHz", "MHz", "kHz" or "Hz". If not set, it defaults to "Hz"')
if isinstance(bpass, (list, tuple, NP.ndarray)):
bpass = NP.asarray(bpass)
else:
raise TypeError('bpass must be a list, tuple or numpy array')
if bpass.size == self.channels.size:
bpass = NP.expand_dims(NP.repeat(bpass.reshape(1,-1), self.baselines.shape[0], axis=0), axis=2)
if verbose:
warnings.warn('\t\tSame bandpass will be applied to all baselines and all accumulations in the observing run.')
elif bpass.size == self.baselines.shape[0] * self.channels.size:
bpass = NP.expand_dims(bpass.reshape(-1,self.channels.size), axis=2)
if verbose:
warnings.warn('\t\tSame bandpass will be applied to all accumulations in the observing run.')
elif bpass.size == self.baselines.shape[0] * self.channels.size * n_acc:
bpass = bpass.reshape(-1,self.channels.size,n_acc)
else:
raise ValueError('Dimensions of bpass incompatible with the number of frequency channels, baselines and number of accumulations.')
if isinstance(Tsys, (int, float, list, tuple, NP.ndarray)):
Tsys = NP.asarray(Tsys).reshape(-1)
else:
raise TypeError('Tsys must be a scalar, list, tuple or numpy array')
if Tsys.size == 1:
if verbose:
warnings.warn('\t\tTsys = {0:.1f} K will be assumed for all frequencies, baselines, and accumulations.'.format(Tsys[0]))
Tsys = Tsys + NP.zeros((self.baselines.shape[0], self.channels.size, 1))
elif Tsys.size == self.channels.size:
Tsys = NP.expand_dims(NP.repeat(Tsys.reshape(1,-1), self.baselines.shape[0], axis=0), axis=2)
if verbose:
warnings.warn('\t\tSame Tsys will be assumed for all baselines and all accumulations in the observing run.')
elif Tsys.size == self.baselines.shape[0]:
Tsys = NP.expand_dims(NP.repeat(Tsys.reshape(-1,1), self.channels.size, axis=1), axis=2)
if verbose:
warnings.warn('\t\tSame Tsys will be assumed for all frequency channels and all accumulations in the observing run.')
elif Tsys.size == self.baselines.shape[0] * self.channels.size:
Tsys = NP.expand_dims(Tsys.reshape(-1,self.channels.size), axis=2)
if verbose:
warnings.warn('\t\tSame Tsys will be assumed for all accumulations in the observing run.')
elif Tsys.size == self.baselines.shape[0] * self.channels.size * n_acc:
Tsys = Tsys.reshape(-1,self.channels.size,n_acc)
else:
raise ValueError('Dimensions of Tsys incompatible with the number of frequency channels, baselines and number of accumulations.')
if not isinstance(lst_init, (int, float)):
raise TypeError('Starting LST should be a scalar')
if verbose:
print('\tVerified input arguments.')
print('\tProceeding to schedule the observing run...')
lst = (lst_init + (t_acc/3.6e3) * NP.arange(n_acc)) * 15.0 # in degrees
if verbose:
print('\tCreated LST range for observing run.')
if mode == 'track':
if pointing_coords == 'hadec':
pointing = NP.asarray([lst_init - pointing_init[0], pointing_init[1]])
elif (pointing_coords == 'radec') or (pointing_coords is None):
pointing = pointing_init
elif pointing_coords == 'altaz':
hadec = GEOM.altaz2hadec(pointing_init, self.latitude, units='degrees')
pointing = NP.asarray([lst_init - hadec[0], hadec[1]])
else:
raise ValueError('pointing_coords can only be set to "hadec", "radec" or "altaz".')
self.pointing_coords = 'radec'
self.phase_center_coords = 'radec'
elif mode == 'drift':
if pointing_coords == 'radec':
pointing = NP.asarray([lst_init - pointing_init[0], pointing_init[1]])
elif (pointing_coords == 'hadec') or (pointing_coords is None):
pointing = pointing_init
elif pointing_coords == 'altaz':
pointing = GEOM.altaz2hadec(pointing_init, self.latitude, units='degrees')
else:
raise ValueError('pointing_coords can only be set to "hadec", "radec" or "altaz".')
self.pointing_coords = 'hadec'
self.phase_center_coords = 'hadec'
if verbose:
print('\tPreparing to observe in {0} mode'.format(mode))
if verbose:
milestones = range(max(1,int(n_acc/10)), int(n_acc), max(1,int(n_acc/10)))
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(), PGB.ETA()], maxval=n_acc).start()
for i in range(n_acc):
timestamp = str(DT.datetime.now())
self.observe(timestamp, Tsys[:,:,i%Tsys.shape[2]],
bpass[:,:,i%bpass.shape[2]], pointing, skymodel,
t_acc, brightness_units=brightness_units,
roi_radius=roi_radius, roi_center=roi_center,
lst=lst[i], memsave=memsave)
if verbose:
progress.update(i+1)
if verbose:
progress.finish()
self.t_obs = duration
self.n_acc = n_acc
if verbose:
print('Observing run completed successfully.')
#############################################################################
def generate_noise(self):
"""
-------------------------------------------------------------------------
Generates thermal noise from attributes that describe system parameters
which can be added to sky visibilities. Thermal RMS here corresponds to
a complex value comprising of both real and imaginary parts. Thus only
1/sqrt(2) goes into each real and imaginary parts.
[Based on equations 9-12 through 9-15 or section 5 in chapter 9 on
Sensitivity in SIRA II wherein the equations are for real and imaginary
parts separately.]
-------------------------------------------------------------------------
"""
eff_Q = self.eff_Q
A_eff = self.A_eff
t_acc = NP.asarray(self.t_acc)
if len(eff_Q.shape) == 2:
eff_Q = eff_Q[:,:,NP.newaxis]
if len(A_eff.shape) == 2:
A_eff = A_eff[:,:,NP.newaxis]
t_acc = t_acc[NP.newaxis,NP.newaxis,:]
if (self.flux_unit == 'JY') or (self.flux_unit == 'jy') or (self.flux_unit == 'Jy'):
self.vis_rms_freq = 2.0 * FCNST.k / NP.sqrt(t_acc*self.freq_resolution) * (self.Tsys/A_eff/eff_Q) / CNST.Jy
elif (self.flux_unit == 'K') or (self.flux_unit == 'k'):
self.vis_rms_freq = 1 / NP.sqrt(t_acc*self.freq_resolution) * self.Tsys/eff_Q
else:
raise ValueError('Flux density units can only be in Jy or K.')
self.vis_noise_freq = self.vis_rms_freq / NP.sqrt(2.0) * (NP.random.randn(self.baselines.shape[0], self.channels.size, len(self.timestamp)) + 1j * NP.random.randn(self.baselines.shape[0], self.channels.size, len(self.timestamp))) # sqrt(2.0) is to split equal uncertainty into real and imaginary parts
#############################################################################
def add_noise(self):
"""
-------------------------------------------------------------------------
Adds the thermal noise generated in member function generate_noise() to
the sky visibilities after extracting and applying complex instrument
gains
-------------------------------------------------------------------------
"""
gains = 1.0
if self.gaininfo is not None:
try:
gains = self.gaininfo.spline_gains(self.labels, freqs=self.channels, times=NP.asarray(self.timestamp))
except IndexError:
try:
gains = self.gaininfo.spline_gains(self.labels, freqs=self.channels, times=NP.asarray(self.timestamp)-self.timestamp[0])
except IndexError:
try:
gains = self.gaininfo.nearest_gains(self.labels, freqs=self.channels, times=NP.asarray(self.timestamp))
except:
warnings.warn('Interpolation and nearest neighbour logic failed. Proceeding with default unity gains')
else:
warnings.warn('Gain table absent. Proceeding with default unity gains')
self.vis_freq = gains * self.skyvis_freq + self.vis_noise_freq
#############################################################################
def apply_gradients(self, gradient_mode=None, perturbations=None):
"""
-------------------------------------------------------------------------
Apply the perturbations in combination with the gradients to determine
perturbed visibilities
Inputs:
perturbations [dictionary] Contains perturbations on one of the
following quantities (specified as keys):
'baseline' [numpy array] nseed x 3 x nbl baseline
perturbations (in same units as attribute
baselines). The first dimension denotes the
number of realizations, the second denotes
the x-, y- and z-axes and the third
denotes the number of baselines. It can also
handle arrays of shapes (n1, n2, ..., 3, nbl)
gradient_mode [string] Specifies the quantity on which perturbations
are provided and perturbed visibilities to be computed.
This string must be one of the keys in the input
dictionary perturbations and must be found in the
attribute gradient_mode and gradient. Currently
accepted values are 'baseline'
Output:
Perturbed visibilities as a n1 x n2 x ... x nbl x nchan x ntimes
complex array
-------------------------------------------------------------------------
"""
if gradient_mode is None:
gradient_mode = self.gradient_mode
if perturbations is None:
perturbations = {gradient_mode: NP.zeros((1,1,1))}
if self.gradient_mode is None:
raise AttributeError('No gradient attribute found')
else:
if not self.gradient:
raise AttributeError('No gradient attribute found')
if not isinstance(perturbations, dict):
raise TypeError('Input perturbations must be a dictionary')
if not isinstance(gradient_mode, str):
raise TypeError('Input gradient_mode must be a string')
if gradient_mode not in ['baseline']:
raise KeyError('Specified gradient mode {0} not currently supported'.format(gradient_mode))
if gradient_mode not in perturbations:
raise KeyError('{0} key not found in input perturbations'.format(gradient_key))
if gradient_mode != self.gradient_mode:
raise ValueError('Specified gradient mode {0} not found in attribute'.format(gradient_mode))
if not isinstance(perturbations[gradient_mode], NP.ndarray):
raise TypeError('Perturbations must be specified as a numpy array')
if perturbations[gradient_mode].ndim == 2:
perturbations[gradient_mode] = perturbations[gradient_mode][NP.newaxis,...]
if perturbations[gradient_mode].ndim < 2:
raise ValueError('Perturbations must be two--dimensions or higher')
inpshape = perturbations[gradient_mode].shape
if perturbations[gradient_mode].ndim > 3:
perturbations[gradient_mode] = perturbations[gradient_mode].reshape(-1,inpshape[-2],inpshape[-1])
if perturbations[gradient_mode].shape[2] != self.gradient[self.gradient_mode].shape[1]:
raise ValueError('Number of {0} perturbations not equal to that in the gradient attribute'.format(gradient_mode))
if perturbations[gradient_mode].shape[1] == 1:
warnings.warn('Only {0}-dimensional coordinates specified. Proceeding with zero perturbations in other coordinate axes.'.format(perturbations[gradient_mode].shape[1]))
perturbations[gradient_mode] = NP.hstack((perturbations[gradient_mode], NP.zeros((perturbations[gradient_mode].shape[0],2,perturbations[gradient_mode].shape[2])))) # nseed x 3 x nbl
elif perturbations[gradient_mode].shape[1] == 2:
warnings.warn('Only {0}-dimensional coordinates specified. Proceeding with zero perturbations in other coordinate axes.'.format(perturbations[gradient_mode].shape[1]))
perturbations[gradient_mode] = NP.hstack((perturbations[gradient_mode], NP.zeros((perturbations[gradient_mode].shape[0],1,perturbations[gradient_mode].shape[2])))) # nseed x 3 x nbl
elif perturbations[gradient_mode].shape[1] > 3:
warnings.warn('{0}-dimensional coordinates specified. Proceeding with only the first three dimensions of coordinate axes.'.format(3))
perturbations[gradient_mode] = perturbations[gradient_mode][:,:3,:] # nseed x 3 x nbl
wl = FCNST.c / self.channels
if gradient_mode == 'baseline':
delta_skyvis_freq = -1j * 2.0 * NP.pi / wl.reshape(1,1,-1,1) * NP.sum(perturbations[gradient_mode][...,NP.newaxis,NP.newaxis] * self.gradient[gradient_mode][NP.newaxis,...], axis=1) # nseed x nbl x nchan x ntimes
outshape = list(inpshape[:-2])
outshape += [self.labels.size, self.channels.size, self.lst.size]
outshape = tuple(outshape)
delta_skyvis_freq = delta_skyvis_freq.reshape(outshape)
return delta_skyvis_freq
#############################################################################
def duplicate_measurements(self, blgroups=None):
"""
-------------------------------------------------------------------------
Duplicate visibilities based on redundant baselines specified. This saves
time when compared to simulating visibilities over redundant baselines.
Thus, it is more efficient to simulate unique baselines and duplicate
measurements for redundant baselines
Inputs:
blgroups [dictionary] Dictionary of baseline groups where the keys are
tuples containing baseline labels. Under each key is a numpy
recarray of baseline labels that are redundant and fall under
the baseline label key. Any number of sets of redundant
measurements can be duplicated in this depending on the
baseline label keys and recarrays specified here. It results
in updating attributes where a new number of baselines are
formed from original baselines and new redundant baselines.
If set to None (default), attribute blgroups will be used to
create redundant sets
-------------------------------------------------------------------------
"""
if blgroups is None:
blgroups = self.blgroups
if not isinstance(blgroups, dict):
raise TypeError('Input blgroups must be a dictionary')
if self.bl_reversemap is None:
nbl = NP.sum(NP.asarray([len(blgroups[blkey]) for blkey in blgroups]))
else:
nbl = len(self.bl_reversemap)
if self.labels.size < nbl:
label_keys = NP.asarray(blgroups.keys(), dtype=self.labels.dtype)
for label_key in label_keys:
if label_key not in self.labels:
if NP.asarray([tuple(reversed(label_key))], dtype=self.labels.dtype)[0] not in self.labels:
raise KeyError('Input label {0} not found in attribute labels'.format(label_key))
else:
label_key = NP.asarray([tuple(reversed(label_key))], dtype=self.labels.dtype)[0]
if label_key.dtype != blgroups[tuple(label_key)].dtype:
warnings.warn('Datatype of attribute labels does not match that of the keys in attribute blgroups. Need to fix. Processing with forced matching of the two datatypes')
if tuple(label_key) not in map(tuple, blgroups[tuple(label_key)]):
# if NP.isin(label_key, blgroups[tuple(label_key)], invert=True):
# if label_key not in blgroups[tuple(label_key)]:
# blgroups[tuple(label_key)] += [label_key]
blgroups[tuple(label_key)] = NP.hstack((label_key.astype(blgroups[tuple(label_key)].dtype), blgroups[tuple(label_key)]))
uniq_inplabels = []
num_list = []
for label in self.labels:
if label in label_keys:
num_list += [blgroups[tuple(label)].size]
for lbl in blgroups[tuple(label)]:
if tuple(lbl) not in uniq_inplabels:
uniq_inplabels += [tuple(lbl)]
else:
raise ValueError('Label {0} repeated in more than one baseline group'.format(lbl))
else:
num_list += [1]
uniq_inplabels += [tuple(label)]
if len(num_list) != len(self.labels):
raise ValueError('Fatal error in counting and matching labels in input blgroups')
if self.skyvis_freq is not None:
self.skyvis_freq = NP.repeat(self.skyvis_freq, num_list, axis=0)
if self.gradient_mode is not None:
self.gradient[self.gradient_mode] = NP.repeat(self.gradient[self.gradient_mode], num_list, axis=1)
self.labels = NP.asarray(uniq_inplabels, dtype=self.labels.dtype)
self.baselines = NP.repeat(self.baselines, num_list, axis=0)
self.projected_baselines = NP.repeat(self.projected_baselines, num_list, axis=0)
self.baseline_lengths = NP.repeat(self.baseline_lengths, num_list)
if self.Tsys.shape[0] > 1:
self.Tsys = NP.repeat(self.Tsys, num_list, axis=0)
if self.eff_Q.shape[0] > 1:
self.eff_Q = NP.repeat(self.eff_Q, num_list, axis=0)
if self.A_eff.shape[0] > 1:
self.A_eff = NP.repeat(self.A_eff, num_list, axis=0)
if self.bp.shape[0] > 1:
self.bp = NP.repeat(self.bp, num_list, axis=0)
if self.bp_wts.shape[0] > 1:
self.bp_wts = NP.repeat(self.bp_wts, num_list, axis=0)
self.generate_noise()
self.add_noise()
############################################################################
def getBaselineGroupKeys(self, inp_labels):
"""
------------------------------------------------------------------------
Find redundant baseline group keys of groups that contain the input
baseline labels
Inputs:
inp_labels
[list] List where each element in the list is a two-element
tuple that corresponds to a baseline / antenna pair label.
e.g. [('1', '2'), ('3', '0'), ('2', '2'), ...]
Output:
Tuple containing two values. The first value is a list of all baseline
group keys corresponding to the input keys. If any input keys were not
found in blgroups_reversemap, those corresponding position in this list
will be filled with None to indicate the label was not found. The second
value in the tuple indicates if the ordering of the input label had to
be flipped in order to find the baseline group key. Positions where an
input label was found as is will contain False, but if it had to be
flipped will contain True. If the input label was not found, it will be
filled with None.
Example:
blkeys, flipped = InterferometerArray.getBaselineGroupKeys(inp_labels)
blkeys --> [('2','3'), ('11','16'), None, ('5','1'),...]
flipped --> [False, True, None, False],...)
------------------------------------------------------------------------
"""
return getBaselineGroupKeys(inp_labels, self.bl_reversemap)
#################################################################################
def getBaselinesInGroups(self, inp_labels):
"""
---------------------------------------------------------------------------
Find all redundant baseline labels in groups that contain the given input
baseline labels
Inputs:
inp_labels
[list] List where each element in the list is a two-element tuple
that corresponds to a baseline / antenna pair label.
e.g. [('1', '2'), ('3', '0'), ('2', '2'), ...]
Output:
Tuple with two elements where the first element is a list of numpy
RecArrays where each RecArray corresponds to the entry in inp_label and is
an array of two-element records corresponding to the baseline labels in
that redundant group. If the input baseline is not found, the corresponding
element in the list is None to indicate the baseline label was not found.
The second value in the tuple indicates if the ordering of the input label
had to be flipped in order to find the baseline group key. Positions where
an input label was found as is will contain False, but if it had to be
flipped will contain True. If the input label was not found, it will
contain a None entry.
Example:
list_blgrps, flipped = InterferometerArray.getBaselineGroupKeys(inplabels)
list_blgrps --> [array([('2','3'), ('11','16')]), None,
array([('5','1')]), ...],
flipped --> [False, True, None, ...])
---------------------------------------------------------------------------
"""
return getBaselinesInGroups(inp_labels, self.bl_reversemap, self.blgroups)
#################################################################################
def getThreePointCombinations(self, unique=False):
"""
-------------------------------------------------------------------------
Return all or only unique 3-point combinations of baselines
Input:
unique [boolean] If set to True, only unique 3-point combinations of
baseline triads are returned. If set to False (default), all
3-point combinations are returned.
Output:
Tuple containing two lists. The first list is a list of triplet tuples of
antenna labels in the form [(a1,a2,a3), (a1,a4,a6), ...], the second list
is a list of triplet tuples of baselines encoded as strings
-------------------------------------------------------------------------
"""
if not isinstance(unique, bool):
raise TypeError('Input unique must be boolean')
bl = self.baselines + 0.0 # to avoid any weird negative sign before 0.0
blstr = NP.unique(['{0[0]:.2f}_{0[1]:.2f}_{0[2]:.2f}'.format(lo) for lo in bl])
bltriplets = []
blvecttriplets = []
anttriplets = []
for aind1,albl1 in enumerate(self.layout['labels']):
for aind2,albl2 in enumerate(self.layout['labels']):
bl12 = self.layout['positions'][aind2] - self.layout['positions'][aind1]
bl12 += 0.0 # to avoid any weird negative sign before 0.0
bl12[NP.abs(bl12) < 1e-10] = 0.0
bl12_len = NP.sqrt(NP.sum(bl12**2))
if bl12_len > 0.0:
bl12str = '{0[0]:.2f}_{0[1]:.2f}_{0[2]:.2f}'.format(bl12)
if bl12str not in blstr:
bl12 *= -1
bl12 += 0.0 # to avoid any weird negative sign before 0.0
bl12str = '{0[0]:.2f}_{0[1]:.2f}_{0[2]:.2f}'.format(bl12)
if bl12str not in blstr:
warnings.warn('A baseline not found in the simulated reference baselines. Proceeding with the rest')
# raise IndexError('A baseline not found in reference baselines')
else:
for aind3,albl3 in enumerate(self.layout['labels']):
bl23 = self.layout['positions'][aind3] - self.layout['positions'][aind2]
bl31 = self.layout['positions'][aind1] - self.layout['positions'][aind3]
bl23 += 0.0 # to avoid any weird negative sign before 0.0
bl31 += 0.0 # to avoid any weird negative sign before 0.0
bl23[NP.abs(bl23) < 1e-10] = 0.0
bl31[NP.abs(bl31) < 1e-10] = 0.0
bl23_len = NP.sqrt(NP.sum(bl23**2))
bl31_len = NP.sqrt(NP.sum(bl31**2))
if (bl23_len > 0.0) and (bl31_len > 0.0):
bl23str = '{0[0]:.2f}_{0[1]:.2f}_{0[2]:.2f}'.format(bl23)
if bl23str not in blstr:
bl23 *= -1
bl23 += 0.0 # to avoid any weird negative sign before 0.0
bl23str = '{0[0]:.2f}_{0[1]:.2f}_{0[2]:.2f}'.format(bl23)
if bl23str not in blstr:
warnings.warn('A baseline not found in the simulated reference baselines. Proceeding with the rest')
# raise IndexError('A baseline not found in reference baselines')
else:
bl31str = '{0[0]:.2f}_{0[1]:.2f}_{0[2]:.2f}'.format(bl31)
if bl31str not in blstr:
bl31 *= -1
bl31 += 0.0 # to avoid any weird negative sign before 0.0
bl31str = '{0[0]:.2f}_{0[1]:.2f}_{0[2]:.2f}'.format(bl31)
if bl31str not in blstr:
warnings.warn('A baseline not found in the simulated reference baselines. Proceeding with the rest')
# raise IndexError('A baseline not found in reference baselines')
else:
list123_str = [bl12str, bl23str, bl31str]
if len(list123_str) == 3:
if len(bltriplets) == 0:
bltriplets += [list123_str]
blvecttriplets += [[bl12, bl23, bl31]]
anttriplets += [(albl1, albl2, albl3)]
else:
found = False
if unique:
ind = 0
while (not found) and (ind < len(bltriplets)):
bltriplet = bltriplets[ind]
if NP.setdiff1d(list123_str, bltriplet).size == 0:
found = True
else:
ind += 1
if not found:
bltriplets += [list123_str]
blvecttriplets += [[bl12, bl23, bl31]]
anttriplets += [(albl1, albl2, albl3)]
# return (anttriplets, bltriplets)
return (anttriplets, blvecttriplets)
#############################################################################
def getClosurePhase(self, antenna_triplets=None, delay_filter_info=None,
specsmooth_info=None, spectral_window_info=None,
unique=False):
"""
-------------------------------------------------------------------------
Get closure phases of visibilities from triplets of antennas.
Inputs:
antenna_triplets
[list of tuples] List of antenna ID triplets where each
triplet is given as a tuple. If set to None (default), all
the unique triplets based on the antenna layout attribute
in class InterferometerArray
unique [boolean] If set to True, only unique 3-point combinations
of baseline triads are returned. If set to False (default),
all 3-point combinations are returned. Applies only if
antenna_triplets is set to None, otherwise the 3-point
combinations of the specified antenna_triplets is returned.
delay_filter_info
[NoneType or dictionary] Info containing delay filter
parameters. If set to None (default), no delay filtering is
performed. Otherwise, delay filter is applied on each of the
visibilities in the triplet before computing the closure
phases. The delay filter parameters are specified in a
dictionary as follows:
'type' [string] 'horizon' (default) or 'regular'. If
set to 'horizon', the horizon delay limits are
estimated from the respective baseline lengths
in the triplet. If set to 'regular', the extent
of the filter is determined by the 'min' and
'width' keys (see below).
'min' [scalar] Non-negative number (in seconds) that
specifies the minimum delay in the filter span.
If not specified, it is assumed to be 0. If
'type' is set to 'horizon', the 'min' is ignored
and set to 0.
'width' [scalar] Non-negative number (in numbers of
inverse bandwidths). If 'type' is set to
'horizon', the width represents the delay
buffer beyond the horizon. If 'type' is set to
'regular', this number has to be positive and
determines the span of the filter starting from
the minimum delay in key 'min'.
'mode' [string] 'discard' (default) or 'retain'. If set
to 'discard', the span defining the filter is
discarded and the rest retained. If set to
'retain', the span defining the filter is
retained and the rest discarded. For example,
if 'type' is set to 'horizon' and 'mode' is set
to 'discard', the horizon-to-horizon is
filtered out (discarded).
specsmooth_info
[NoneType or dictionary] Spectral smoothing window to be
applied prior to the delay transform. If set to None, no
smoothing is done. This is usually set if spectral
smoothing is to be done such as in the case of RFI. The
smoothing window parameters are specified using the
following keys and values:
'op_type' [string] Smoothing operation type.
Default='median' (currently accepts only
'median' or 'interp').
'window_size' [integer] Size of smoothing window (in
pixels) along frequency axis. Applies only
if op_type is set to 'median'
'maskchans' [NoneType or numpy array] Numpy boolean array
of size nchan. False entries imply those
channels are not masked and will be used in
in interpolation while True implies they are
masked and will not be used in determining the
interpolation function. If set to None, all
channels are assumed to be unmasked (False).
'evalchans' [NoneType or numpy array] Channel numbers at
which visibilities are to be evaluated. Will
be useful for filling in RFI flagged channels.
If set to None, channels masked in 'maskchans'
will be evaluated
'noiseRMS' [NoneType or scalar or numpy array] If set to
None (default), the rest of the parameters are
used in determining the RMS of thermal noise.
If specified as scalar, all other parameters
will be ignored in estimating noiseRMS and
this value will be used instead. If specified
as a numpy array, it must be of shape
broadcastable to (nbl,nchan,ntimes). So
accpeted shapes can be (1,1,1), (1,1,ntimes),
(1,nchan,1), (nbl,1,1), (1,nchan,ntimes),
(nbl,nchan,1), (nbl,1,ntimes), or
(nbl,nchan,ntimes).
spectral_window_info
[NoneType or dictionary] Spectral window parameters to
determine the spectral weights and apply to the visibilities
in the frequency domain before filtering in the delay domain.
THESE PARAMETERS ARE APPLIED ON THE INDIVIDUAL VISIBILITIES
THAT GO INTO THE CLOSURE PHASE. THESE ARE NOT TO BE CONFUSED
WITH THE PARAMETERS THAT WILL BE USED IN THE ACTUAL DELAY
TRANSFORM OF CLOSURE PHASE SPECTRA WHICH ARE SPECIFIED
SEPARATELY FURTHER BELOW.
If set to None (default), unity spectral weights are applied.
If spectral weights are to be applied, it must be a provided
as a dictionary with the following keys and values:
bw_eff [scalar] effective bandwidths (in Hz) for the
spectral window
freq_center [scalar] frequency center (in Hz) for the
spectral window
shape [string] frequency window shape for the
spectral window. Accepted values are 'rect' or
'RECT' (for rectangular), 'bnw' and 'BNW' (for
Blackman-Nuttall), and 'bhw' or 'BHW' (for
Blackman-Harris). Default=None sets it to 'rect'
fftpow [scalar] power to which the FFT of the window
will be raised. The value must be a positive
scalar.
Output:
Dictionary containing closure phase information under the following keys
and values:
'closure_phase_skyvis' [numpy array] Closure phases (in radians) for
the given antenna triplets from the noiseless
visibilities. It is of shape
ntriplets x nchan x ntimes
'closure_phase_vis' [numpy array] Closure phases (in radians) for
the given antenna triplets for noisy
visibilities. It is of shape
ntriplets x nchan x ntimes
'closure_phase_noise' [numpy array] Closure phases (in radians) for
the given antenna triplets for thermal noise in
visibilities. It is of shape
ntriplets x nchan x ntimes
'antenna_triplets' [list of tuples] List of three-element tuples of
antenna IDs for which the closure phases are
calculated.
'baseline_triplets' [numpy array] List of 3x3 numpy arrays. Each 3x3
unit in the list represents triplets of baseline
vectors where the three rows denote the three
baselines in the triplet and the three columns
define the x-, y- and z-components of the
triplet. The number of 3x3 unit elements in the
list will equal the number of elements in the
list under key 'antenna_triplets'.
'skyvis' [numpy array] Noiseless visibilities that went
into the triplet used for estimating closure
phases. It has size ntriplets x 3 nchan x ntimes
where 3 is for the triplet of visibilities or
baselines involved.
'vis' [numpy array] Same as 'skyvis' but for noisy
visibilities
'noisevis' [numpy array] Same as 'skyvis' but for the
noise in the visibilities
'spectral_weights' [numpy array] Spectral weights applied in the
frequency domain before filtering. This is
derived based on the parameters in the input
spectral_window_info. If spectral_window_info is
set to None, the spectral weights are set to 1.0
with shape (1,). If spectral_window_info is
specified as not None, the shape of the spectral
weights is (nchan,).
-------------------------------------------------------------------------
"""
if antenna_triplets is None:
antenna_triplets, bltriplets = self.getThreePointCombinations(unique=unique)
if not isinstance(antenna_triplets, list):
raise TypeError('Input antenna triplets must be a list of triplet tuples')
# Check if spectral smoothing is to be applied
if specsmooth_info is not None:
if not isinstance(specsmooth_info, dict):
raise TypeError('Input specsmooth_info must be a dictionary')
if 'op_type' not in specsmooth_info:
raise KeyError('Key "op_type" not found in input specsmooth_info')
if specsmooth_info['op_type'].lower() not in ['median', 'interp']:
raise ValueError('op_type specified in specsmooth_info currently not supported')
if specsmooth_info['op_type'].lower() == 'median':
if 'window_size' not in specsmooth_info:
raise KeyError('Input "window_size" not found in specsmooth_info')
if specsmooth_info['window_size'] <= 0:
raise ValueError('Spectral filter window size must be positive')
if specsmooth_info['op_type'].lower() == 'interp':
if 'maskchans' not in specsmooth_info:
specsmooth_info['maskchans'] = NP.zeros(self.channels.size, dtype=NP.bool)
elif specsmooth_info['maskchans'] is None:
specsmooth_info['maskchans'] = NP.zeros(self.channels.size, dtype=NP.bool)
elif not isinstance(specsmooth_info['maskchans'], NP.ndarray):
raise TypeError('Value under key "maskchans" must be a numpy array')
else:
if specsmooth_info['maskchans'].dtype != bool:
raise TypeError('Value under key "maskchans" must be a boolean numpy array')
if specsmooth_info['maskchans'].size != self.channels.size:
raise ValueError('Size of numpy array under key "maskchans" is not equal to the number of frequency channels')
specsmooth_info['maskchans'] = specsmooth_info['maskchans'].ravel()
if 'evalchans' not in specsmooth_info:
specsmooth_info['evalchans'] = NP.where(specsmooth_info['maskchans'])[0]
elif specsmooth_info['evalchans'] is None:
specsmooth_info['evalchans'] = NP.where(specsmooth_info['maskchans'])[0]
elif not isinstance(specsmooth_info['evalchans'], (int,list,NP.ndarray)):
raise TypeError('Values under key "evalchans" must be an integer, list or numpy array')
else:
specsmooth_info['evalchans'] = NP.asarray(specsmooth_info['evalchans']).reshape(-1)
unmasked_chans = NP.where(NP.logical_not(specsmooth_info['maskchans']))[0]
# Check if spectral windowing is to be applied
if spectral_window_info is not None:
freq_center = spectral_window_info['freq_center']
bw_eff = spectral_window_info['bw_eff']
shape = spectral_window_info['shape']
fftpow = spectral_window_info['fftpow']
if freq_center is None:
freq_center = self.channels[self.channels.size/2]
if shape is None:
shape = 'rect'
else:
shape = shape.lower()
if bw_eff is None:
if shape == 'rect':
bw_eff = self.channels.size * self.freq_resolution
elif shape == 'bhw':
bw_eff = 0.5 * self.channels.size * self.freq_resolution
else:
raise ValueError('Specified window shape not currently supported')
if fftpow is None:
fftpow = 1.0
elif isinstance(fftpow, (int,float)):
if fftpow <= 0.0:
raise ValueError('Value fftpow must be positive')
else:
raise ValueError('Value fftpow must be a scalar (int or float)')
freq_wts = NP.empty(self.channels.size, dtype=NP.float_)
frac_width = DSP.window_N2width(n_window=None, shape=shape, fftpow=fftpow, area_normalize=False, power_normalize=True)
window_loss_factor = 1 / frac_width
n_window = NP.round(window_loss_factor * bw_eff / self.freq_resolution).astype(NP.int)
ind_freq_center, ind_channels, dfrequency = LKP.find_1NN(self.channels.reshape(-1,1), NP.asarray(freq_center).reshape(-1,1), distance_ULIM=0.5*self.freq_resolution, remove_oob=True)
sortind = NP.argsort(ind_channels)
ind_freq_center = ind_freq_center[sortind]
ind_channels = ind_channels[sortind]
dfrequency = dfrequency[sortind]
# n_window = n_window[sortind]
window = NP.sqrt(frac_width * n_window) * DSP.window_fftpow(n_window, shape=shape, fftpow=fftpow, centering=True, peak=None, area_normalize=False, power_normalize=True)
window_chans = self.channels[ind_channels[0]] + self.freq_resolution * (NP.arange(n_window) - int(n_window/2))
ind_window_chans, ind_chans, dfreq = LKP.find_1NN(self.channels.reshape(-1,1), window_chans.reshape(-1,1), distance_ULIM=0.5*self.freq_resolution, remove_oob=True)
sind = NP.argsort(ind_window_chans)
ind_window_chans = ind_window_chans[sind]
ind_chans = ind_chans[sind]
dfreq = dfreq[sind]
window = window[ind_window_chans]
window = NP.pad(window, ((ind_chans.min(), self.channels.size-1-ind_chans.max())), mode='constant', constant_values=((0.0,0.0)))
freq_wts = window
else:
freq_wts = NP.asarray(1.0).reshape(-1)
# Check if delay filter is to be performed
filter_unmask = NP.ones(self.channels.size)
if delay_filter_info is not None:
fft_delays = DSP.spectral_axis(self.channels.size, delx=self.freq_resolution, shift=False, use_real=False)
dtau = fft_delays[1] - fft_delays[0]
if not isinstance(delay_filter_info, dict):
raise TypeError('Delay filter info must be specified as a dictionary')
if 'mode' not in delay_filter_info:
filter_mode = 'discard'
else:
filter_mode = delay_filter_info['mode']
if filter_mode.lower() not in ['discard', 'retain']:
raise ValueError('Invalid delay filter mode specified')
if 'type' not in delay_filter_info:
filter_type = 'horizon'
else:
filter_type = delay_filter_info['type']
if filter_type.lower() not in ['horizon', 'regular']:
raise ValueError('Invalid delay filter type specified')
if filter_type.lower() == 'regular':
if ('min' not in delay_filter_info) or ('width' not in delay_filter_info):
raise KeyError('Keys "min" and "width" must be specified in input delay_filter_info')
delay_min = delay_filter_info['min']
delay_width = delay_filter_info['width']
if delay_min is None:
delay_min = 0.0
elif isinstance(delay_min, (int,float)):
delay_min = max([0.0, delay_min])
else:
raise TypeError('Minimum delay in the filter must be a scalar value (int or float)')
if isinstance(delay_width, (int,float)):
if delay_width <= 0.0:
raise ValueError('Delay filter width must be positive')
else:
raise TypeError('Delay width in the filter must be a scalar value (int or float)')
else:
if 'width' not in delay_filter_info:
delay_width = 0.0
else:
delay_width = delay_filter_info['width']
if delay_width is None:
delay_width = 0.0
elif isinstance(delay_width, (int,float)):
if delay_width <= 0.0:
raise ValueError('Delay filter width must be positive')
else:
raise TypeError('Delay width in the filter must be a scalar value (int or float)')
delay_width = delay_width * dtau
skyvis_freq = NP.copy(self.skyvis_freq)
vis_freq = NP.copy(self.vis_freq)
vis_noise_freq = NP.copy(self.vis_noise_freq)
phase_skyvis123 = []
phase_vis123 = []
phase_noise123 = []
blvecttriplets = []
skyvis_triplets = []
vis_triplets = []
noise_triplets = []
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(marker='-', left=' |', right='| '), PGB.Counter(), '/{0:0d} Triplets '.format(len(antenna_triplets)), PGB.ETA()], maxval=len(antenna_triplets)).start()
for tripletind,anttriplet in enumerate(antenna_triplets):
blvecttriplets += [NP.zeros((3,3))]
a1, a2, a3 = anttriplet
a1 = str(a1)
a2 = str(a2)
a3 = str(a3)
bl12_id = (a2, a1)
conj12 = False
if bl12_id in self.bl_reversemap:
bl12_id_ref = self.bl_reversemap[bl12_id]
elif tuple(reversed(bl12_id)) in self.bl_reversemap:
bl12_id_ref = self.bl_reversemap[tuple(reversed(bl12_id))]
conj12 = True
else:
raise ValueError('Baseline ({0[0]:0d}, {0[1]:0d}) not found in simulated baselines'.format(bl12_id))
ind12 = NP.where(self.labels == bl12_id_ref)[0][0]
if not conj12:
skyvis12 = skyvis_freq[ind12,:,:]
vis12 = vis_freq[ind12,:,:]
noise12 = vis_noise_freq[ind12,:,:]
blvecttriplets[-1][0,:] = self.baselines[ind12,:]
bpwts12 = self.bp[ind12,:,:] * self.bp_wts[ind12,:,:]
else:
skyvis12 = skyvis_freq[ind12,:,:].conj()
vis12 = vis_freq[ind12,:,:].conj()
noise12 = vis_noise_freq[ind12,:,:].conj()
blvecttriplets[-1][0,:] = -self.baselines[ind12,:]
bpwts12 = self.bp[ind12,:,:].conj() * self.bp_wts[ind12,:,:].conj()
bl23_id = (a3, a2)
conj23 = False
if bl23_id in self.bl_reversemap:
bl23_id_ref = self.bl_reversemap[bl23_id]
elif tuple(reversed(bl23_id)) in self.bl_reversemap:
bl23_id_ref = self.bl_reversemap[tuple(reversed(bl23_id))]
conj23 = True
else:
raise ValueError('Baseline ({0[0]:0d}, {0[1]:0d}) not found in simulated baselines'.format(bl23_id))
ind23 = NP.where(self.labels == bl23_id_ref)[0][0]
if not conj23:
skyvis23 = skyvis_freq[ind23,:,:]
vis23 = vis_freq[ind23,:,:]
noise23 = vis_noise_freq[ind23,:,:]
blvecttriplets[-1][1,:] = self.baselines[ind23,:]
bpwts23 = self.bp[ind23,:,:] * self.bp_wts[ind23,:,:]
else:
skyvis23 = skyvis_freq[ind23,:,:].conj()
vis23 = vis_freq[ind23,:,:].conj()
noise23 = vis_noise_freq[ind23,:,:].conj()
blvecttriplets[-1][1,:] = -self.baselines[ind23,:]
bpwts23 = self.bp[ind23,:,:].conj() * self.bp_wts[ind23,:,:].conj()
bl31_id = (a1, a3)
conj31 = False
if bl31_id in self.bl_reversemap:
bl31_id_ref = self.bl_reversemap[bl31_id]
elif tuple(reversed(bl31_id)) in self.bl_reversemap:
bl31_id_ref = self.bl_reversemap[tuple(reversed(bl31_id))]
conj31 = True
else:
raise ValueError('Baseline ({0[0]:0d}, {0[1]:0d}) not found in simulated baselines'.format(bl31_id))
ind31 = NP.where(self.labels == bl31_id_ref)[0][0]
if not conj31:
skyvis31 = skyvis_freq[ind31,:,:]
vis31 = vis_freq[ind31,:,:]
noise31 = vis_noise_freq[ind31,:,:]
blvecttriplets[-1][2,:] = self.baselines[ind31,:]
bpwts31 = self.bp[ind31,:,:] * self.bp_wts[ind31,:,:]
else:
skyvis31 = skyvis_freq[ind31,:,:].conj()
vis31 = vis_freq[ind31,:,:].conj()
noise31 = vis_noise_freq[ind31,:,:].conj()
blvecttriplets[-1][2,:] = -self.baselines[ind31,:]
bpwts31 = self.bp[ind31,:,:].conj() * self.bp_wts[ind31,:,:].conj()
if specsmooth_info is not None:
# Perform interpolation for each triplet if op_type is 'interp'.
# If op_type is 'median' it can be performed triplet by triplet
# or on all triplets as once depending on if delay-filtering
# and spectral windowing is set or not.
if specsmooth_info['op_type'].lower() == 'interp':
if specsmooth_info['evalchans'].size > 0:
# Obtain the noise RMS on the required baselines
if 'noiseRMS' not in specsmooth_info:
specsmooth_info['noiseRMS'] = NP.copy(self.vis_rms_freq[NP.ix_([ind12,ind23,ind31], specsmooth_info['evalchans'], NP.arange(skyvis12.shape[1]))])
else:
specsmooth_info['noiseRMS'] = specsmooth_info['noiseRMS'][:,specsmooth_info['evalchans'],:]
noise123 = generateNoise(noiseRMS=specsmooth_info['noiseRMS'], nbl=3, nchan=specsmooth_info['evalchans'].size, ntimes=skyvis12.shape[1])
noise12[specsmooth_info['evalchans'],:] = noise123[0,:,:]
noise23[specsmooth_info['evalchans'],:] = noise123[1,:,:]
noise31[specsmooth_info['evalchans'],:] = noise123[2,:,:]
interpfunc_skyvis12_real = interpolate.interp1d(unmasked_chans, skyvis12[unmasked_chans,:].real, axis=0, kind='cubic', bounds_error=True, assume_sorted=True)
interpfunc_skyvis12_imag = interpolate.interp1d(unmasked_chans, skyvis12[unmasked_chans,:].imag, axis=0, kind='cubic', bounds_error=True, assume_sorted=True)
skyvis12[specsmooth_info['evalchans'],:] = interpfunc_skyvis12_real(specsmooth_info['evalchans']) + 1j * interpfunc_skyvis12_imag(specsmooth_info['evalchans'])
interpfunc_skyvis23_real = interpolate.interp1d(unmasked_chans, skyvis23[unmasked_chans,:].real, axis=0, kind='cubic', bounds_error=True, assume_sorted=True)
interpfunc_skyvis23_imag = interpolate.interp1d(unmasked_chans, skyvis23[unmasked_chans,:].imag, axis=0, kind='cubic', bounds_error=True, assume_sorted=True)
skyvis23[specsmooth_info['evalchans'],:] = interpfunc_skyvis23_real(specsmooth_info['evalchans']) + 1j * interpfunc_skyvis23_imag(specsmooth_info['evalchans'])
interpfunc_skyvis31_real = interpolate.interp1d(unmasked_chans, skyvis31[unmasked_chans,:].real, axis=0, kind='cubic', bounds_error=True, assume_sorted=True)
interpfunc_skyvis31_imag = interpolate.interp1d(unmasked_chans, skyvis31[unmasked_chans,:].imag, axis=0, kind='cubic', bounds_error=True, assume_sorted=True)
skyvis31[specsmooth_info['evalchans'],:] = interpfunc_skyvis31_real(specsmooth_info['evalchans']) + 1j * interpfunc_skyvis31_imag(specsmooth_info['evalchans'])
vis12[specsmooth_info['evalchans'],:] = skyvis12[specsmooth_info['evalchans'],:] + noise12[specsmooth_info['evalchans'],:]
vis23[specsmooth_info['evalchans'],:] = skyvis23[specsmooth_info['evalchans'],:] + noise23[specsmooth_info['evalchans'],:]
vis31[specsmooth_info['evalchans'],:] = skyvis31[specsmooth_info['evalchans'],:] + noise31[specsmooth_info['evalchans'],:]
# Apply the spectral ('median') smoothing first if delay filter
# and / or spectral windowing is to be performed, otherwise apply
# later on the full array instead of inside the antenna triplet loop
if (delay_filter_info is not None) or (spectral_window_info is not None):
if specsmooth_info is not None:
if specsmooth_info['op_type'].lower() == 'median':
skyvis12 = ndimage.median_filter(skyvis12.real, size=(specsmooth_info[specsmooth_info['window_size']],1)) + 1j * ndimage.median_filter(skyvis12.imag, size=(specsmooth_info[specsmooth_info['window_size']],1))
skyvis23 = ndimage.median_filter(skyvis23.real, size=(specsmooth_info[specsmooth_info['window_size']],1)) + 1j * ndimage.median_filter(skyvis23.imag, size=(specsmooth_info[specsmooth_info['window_size']],1))
skyvis31 = ndimage.median_filter(skyvis31.real, size=(specsmooth_info[specsmooth_info['window_size']],1)) + 1j * ndimage.median_filter(skyvis31.imag, size=(specsmooth_info[specsmooth_info['window_size']],1))
vis12 = ndimage.median_filter(vis12.real, size=(specsmooth_info[specsmooth_info['window_size']],1)) + 1j * ndimage.median_filter(vis12.imag, size=(specsmooth_info[specsmooth_info['window_size']],1))
vis23 = ndimage.median_filter(vis23.real, size=(specsmooth_info[specsmooth_info['window_size']],1)) + 1j * ndimage.median_filter(vis23.imag, size=(specsmooth_info[specsmooth_info['window_size']],1))
vis31 = ndimage.median_filter(vis31.real, size=(specsmooth_info[specsmooth_info['window_size']],1)) + 1j * ndimage.median_filter(vis31.imag, size=(specsmooth_info[specsmooth_info['window_size']],1))
noise12 = ndimage.median_filter(noise12.real, size=(specsmooth_info[specsmooth_info['window_size']],1)) + 1j * ndimage.median_filter(noise12.imag, size=(specsmooth_info[specsmooth_info['window_size']],1))
noise23 = ndimage.median_filter(noise23.real, size=(specsmooth_info[specsmooth_info['window_size']],1)) + 1j * ndimage.median_filter(noise23.imag, size=(specsmooth_info[specsmooth_info['window_size']],1))
noise31 = ndimage.median_filter(noise31.real, size=(specsmooth_info[specsmooth_info['window_size']],1)) + 1j * ndimage.median_filter(noise31.imag, size=(specsmooth_info[specsmooth_info['window_size']],1))
# Check if delay filter is to be performed
if delay_filter_info is not None:
if filter_type.lower() == 'regular':
delay_max = delay_min + delay_width
if filter_mode.lower() == 'discard':
mask_ind = NP.logical_and(NP.abs(fft_delays) >= delay_min, NP.abs(fft_delays) <= delay_max)
else:
mask_ind = NP.logical_or(NP.abs(fft_delays) <= delay_min, NP.abs(fft_delays) >= delay_max)
filter_unmask[mask_ind] = 0.0
skyvis12 = DSP.FT1D(filter_unmask[:,NP.newaxis] * DSP.FT1D(freq_wts.reshape(-1,1)*skyvis12,ax=0,inverse=False), ax=0, inverse=True)
skyvis23 = DSP.FT1D(filter_unmask[:,NP.newaxis] * DSP.FT1D(freq_wts.reshape(-1,1)*skyvis23,ax=0,inverse=False), ax=0, inverse=True)
skyvis31 = DSP.FT1D(filter_unmask[:,NP.newaxis] * DSP.FT1D(freq_wts.reshape(-1,1)*skyvis31,ax=0,inverse=False), ax=0, inverse=True)
vis12 = DSP.FT1D(filter_unmask[:,NP.newaxis] * DSP.FT1D(freq_wts.reshape(-1,1)*vis12,ax=0,inverse=False), ax=0, inverse=True)
vis23 = DSP.FT1D(filter_unmask[:,NP.newaxis] * DSP.FT1D(freq_wts.reshape(-1,1)*vis23,ax=0,inverse=False), ax=0, inverse=True)
vis31 = DSP.FT1D(filter_unmask[:,NP.newaxis] * DSP.FT1D(freq_wts.reshape(-1,1)*vis31,ax=0,inverse=False), ax=0, inverse=True)
noise12 = DSP.FT1D(filter_unmask[:,NP.newaxis] * DSP.FT1D(freq_wts.reshape(-1,1)*noise12,ax=0,inverse=False), ax=0, inverse=True)
noise23 = DSP.FT1D(filter_unmask[:,NP.newaxis] * DSP.FT1D(freq_wts.reshape(-1,1)*noise23,ax=0,inverse=False), ax=0, inverse=True)
noise31 = DSP.FT1D(filter_unmask[:,NP.newaxis] * DSP.FT1D(freq_wts.reshape(-1,1)*noise31,ax=0,inverse=False), ax=0, inverse=True)
# skyvis12 = 1.0 * fft_delays.size / NP.sum(filter_unmask) * DSP.FT1D(filter_unmask[:,NP.newaxis] * DSP.FT1D(skyvis12,ax=0,inverse=False), ax=0, inverse=True)
# skyvis23 = 1.0 * fft_delays.size / NP.sum(filter_unmask) * DSP.FT1D(filter_unmask[:,NP.newaxis] * DSP.FT1D(skyvis23,ax=0,inverse=False), ax=0, inverse=True)
# skyvis31 = 1.0 * fft_delays.size / NP.sum(filter_unmask) * DSP.FT1D(filter_unmask[:,NP.newaxis] * DSP.FT1D(skyvis31,ax=0,inverse=False), ax=0, inverse=True)
# vis12 = 1.0 * fft_delays.size / NP.sum(filter_unmask) * DSP.FT1D(filter_unmask[:,NP.newaxis] * DSP.FT1D(vis12,ax=0,inverse=False), ax=0, inverse=True)
# vis23 = 1.0 * fft_delays.size / NP.sum(filter_unmask) * DSP.FT1D(filter_unmask[:,NP.newaxis] * DSP.FT1D(vis23,ax=0,inverse=False), ax=0, inverse=True)
# vis31 = 1.0 * fft_delays.size / NP.sum(filter_unmask) * DSP.FT1D(filter_unmask[:,NP.newaxis] * DSP.FT1D(vis31,ax=0,inverse=False), ax=0, inverse=True)
# noise12 = 1.0 * fft_delays.size / NP.sum(filter_unmask) * DSP.FT1D(filter_unmask[:,NP.newaxis] * DSP.FT1D(noise12,ax=0,inverse=False), ax=0, inverse=True)
# noise23 = 1.0 * fft_delays.size / NP.sum(filter_unmask) * DSP.FT1D(filter_unmask[:,NP.newaxis] * DSP.FT1D(noise23,ax=0,inverse=False), ax=0, inverse=True)
# noise31 = 1.0 * fft_delays.size / NP.sum(filter_unmask) * DSP.FT1D(filter_unmask[:,NP.newaxis] * DSP.FT1D(noise31,ax=0,inverse=False), ax=0, inverse=True)
else:
filter_unmask12 = 1.0 * filter_unmask
filter_unmask23 = 1.0 * filter_unmask
filter_unmask31 = 1.0 * filter_unmask
delay_max12 = self.baseline_lengths[ind12] / FCNST.c + delay_width
delay_max23 = self.baseline_lengths[ind23] / FCNST.c + delay_width
delay_max31 = self.baseline_lengths[ind31] / FCNST.c + delay_width
if filter_mode.lower() == 'discard':
mask_ind12 = NP.abs(fft_delays) <= delay_max12
mask_ind23 = NP.abs(fft_delays) <= delay_max23
mask_ind31 = NP.abs(fft_delays) <= delay_max31
else:
mask_ind12 = NP.abs(fft_delays) >= delay_max12
mask_ind23 = NP.abs(fft_delays) >= delay_max23
mask_ind31 = NP.abs(fft_delays) >= delay_max31
filter_unmask12[mask_ind12] = 0.0
filter_unmask23[mask_ind23] = 0.0
filter_unmask31[mask_ind31] = 0.0
skyvis12 = DSP.FT1D(filter_unmask12[:,NP.newaxis] * DSP.FT1D(freq_wts.reshape(-1,1)*skyvis12,ax=0,inverse=False), ax=0, inverse=True)
skyvis23 = DSP.FT1D(filter_unmask23[:,NP.newaxis] * DSP.FT1D(freq_wts.reshape(-1,1)*skyvis23,ax=0,inverse=False), ax=0, inverse=True)
skyvis31 = DSP.FT1D(filter_unmask31[:,NP.newaxis] * DSP.FT1D(freq_wts.reshape(-1,1)*skyvis31,ax=0,inverse=False), ax=0, inverse=True)
vis12 = DSP.FT1D(filter_unmask12[:,NP.newaxis] * DSP.FT1D(freq_wts.reshape(-1,1)*vis12,ax=0,inverse=False), ax=0, inverse=True)
vis23 = DSP.FT1D(filter_unmask23[:,NP.newaxis] * DSP.FT1D(freq_wts.reshape(-1,1)*vis23,ax=0,inverse=False), ax=0, inverse=True)
vis31 = DSP.FT1D(filter_unmask31[:,NP.newaxis] * DSP.FT1D(freq_wts.reshape(-1,1)*vis31,ax=0,inverse=False), ax=0, inverse=True)
noise12 = DSP.FT1D(filter_unmask12[:,NP.newaxis] * DSP.FT1D(freq_wts.reshape(-1,1)*noise12,ax=0,inverse=False), ax=0, inverse=True)
noise23 = DSP.FT1D(filter_unmask23[:,NP.newaxis] * DSP.FT1D(freq_wts.reshape(-1,1)*noise23,ax=0,inverse=False), ax=0, inverse=True)
noise31 = DSP.FT1D(filter_unmask31[:,NP.newaxis] * DSP.FT1D(freq_wts.reshape(-1,1)*noise31,ax=0,inverse=False), ax=0, inverse=True)
# skyvis12 = 1.0 * fft_delays.size / NP.sum(filter_unmask12) * DSP.FT1D(filter_unmask12[:,NP.newaxis] * DSP.FT1D(skyvis12,ax=0,inverse=False), ax=0, inverse=True)
# skyvis23 = 1.0 * fft_delays.size / NP.sum(filter_unmask23) * DSP.FT1D(filter_unmask23[:,NP.newaxis] * DSP.FT1D(skyvis23,ax=0,inverse=False), ax=0, inverse=True)
# skyvis31 = 1.0 * fft_delays.size / NP.sum(filter_unmask31) * DSP.FT1D(filter_unmask31[:,NP.newaxis] * DSP.FT1D(skyvis31,ax=0,inverse=False), ax=0, inverse=True)
# vis12 = 1.0 * fft_delays.size / NP.sum(filter_unmask12) * DSP.FT1D(filter_unmask12[:,NP.newaxis] * DSP.FT1D(vis12,ax=0,inverse=False), ax=0, inverse=True)
# vis23 = 1.0 * fft_delays.size / NP.sum(filter_unmask23) * DSP.FT1D(filter_unmask23[:,NP.newaxis] * DSP.FT1D(vis23,ax=0,inverse=False), ax=0, inverse=True)
# vis31 = 1.0 * fft_delays.size / NP.sum(filter_unmask31) * DSP.FT1D(filter_unmask31[:,NP.newaxis] * DSP.FT1D(vis31,ax=0,inverse=False), ax=0, inverse=True)
# noise12 = 1.0 * fft_delays.size / NP.sum(filter_unmask12) * DSP.FT1D(filter_unmask12[:,NP.newaxis] * DSP.FT1D(noise12,ax=0,inverse=False), ax=0, inverse=True)
# noise23 = 1.0 * fft_delays.size / NP.sum(filter_unmask23) * DSP.FT1D(filter_unmask23[:,NP.newaxis] * DSP.FT1D(noise23,ax=0,inverse=False), ax=0, inverse=True)
# noise31 = 1.0 * fft_delays.size / NP.sum(filter_unmask31) * DSP.FT1D(filter_unmask31[:,NP.newaxis] * DSP.FT1D(noise31,ax=0,inverse=False), ax=0, inverse=True)
else:
skyvis12 = freq_wts.reshape(-1,1)*skyvis12
skyvis23 = freq_wts.reshape(-1,1)*skyvis23
skyvis31 = freq_wts.reshape(-1,1)*skyvis31
vis12 = freq_wts.reshape(-1,1)*vis12
vis23 = freq_wts.reshape(-1,1)*vis23
vis31 = freq_wts.reshape(-1,1)*vis31
noise12 = freq_wts.reshape(-1,1)*noise12
noise23 = freq_wts.reshape(-1,1)*noise23
noise31 = freq_wts.reshape(-1,1)*noise31
skyvis_triplets += [[skyvis12*bpwts12, skyvis23*bpwts23, skyvis31*bpwts31]]
vis_triplets += [[vis12*bpwts12, vis23*bpwts23, vis31*bpwts31]]
noise_triplets += [[noise12*bpwts12, noise23*bpwts23, noise31*bpwts31]]
progress.update(tripletind+1)
progress.finish()
skyvis_triplets = NP.asarray(skyvis_triplets)
vis_triplets = NP.asarray(vis_triplets)
noise_triplets = NP.asarray(noise_triplets)
# Apply the spectral smoothing now on the entire triplet arrays
# if none of delay filter or spectral windowing is to be performed,
# otherwise it must have been applied prior to either one of those
# operations
if (delay_filter_info is None) and (spectral_window_info is None) and (specsmooth_info is not None):
if specsmooth_info['op_type'].lower() == 'median':
skyvis_triplets = ndimage.median_filter(skyvis_triplets.real, size=(1,1,specsmooth_info['window_size'],1)) + 1j * ndimage.median_filter(skyvis_triplets.imag, size=(1,1,specsmooth_info['window_size'],1))
vis_triplets = ndimage.median_filter(vis_triplets.real, size=(1,1,specsmooth_info['window_size'],1)) + 1j * ndimage.median_filter(vis_triplets.imag, size=(1,1,specsmooth_info['window_size'],1))
noise_triplets = ndimage.median_filter(noise_triplets.real, size=(1,1,specsmooth_info['window_size'],1)) + 1j * ndimage.median_filter(noise_triplets.imag, size=(1,1,specsmooth_info['window_size'],1))
phase_skyvis123 = NP.angle(NP.prod(skyvis_triplets, axis=1))
phase_vis123 = NP.angle(NP.prod(vis_triplets, axis=1))
phase_noise123 = NP.angle(NP.prod(noise_triplets, axis=1))
return {'closure_phase_skyvis': phase_skyvis123, 'closure_phase_vis': phase_vis123, 'closure_phase_noise': phase_noise123, 'antenna_triplets': antenna_triplets, 'baseline_triplets': blvecttriplets, 'skyvis': skyvis_triplets, 'vis': vis_triplets, 'noisevis': noise_triplets, 'spectral_weights': freq_wts}
#############################################################################
def rotate_visibilities(self, ref_point, do_delay_transform=False,
verbose=True):
"""
-------------------------------------------------------------------------
Centers the phase of visibilities around any given phase center.
Project baseline vectors with respect to a reference point on the sky.
Essentially a wrapper to member functions phase_centering() and
project_baselines()
Input(s):
ref_point [dictionary] Contains information about the reference
position to which projected baselines and rotated
visibilities are to be computed. No defaults. It must be
contain the following keys with the following values:
'coords' [string] Refers to the coordinate system in
which value in key 'location' is specified in.
Accepted values are 'radec', 'hadec', 'altaz'
and 'dircos'
'location' [numpy array] Must be a Mx2 (if value in key
'coords' is set to 'radec', 'hadec', 'altaz' or
'dircos') or Mx3 (if value in key 'coords' is
set to 'dircos'). M can be 1 or equal to number
of timestamps. If M=1, the same reference point
in the same coordinate system will be repeated
for all tiemstamps. If value under key 'coords'
is set to 'radec', 'hadec' or 'altaz', the
value under this key 'location' must be in
units of degrees.
do_delay_transform
[boolean] If set to True (default), also recompute the
delay transform after the visibilities are rotated to the
new phase center
verbose: [boolean] If set to True (default), prints progress and
diagnostic messages.
-------------------------------------------------------------------------
"""
try:
ref_point
except NameError:
raise NameError('Input ref_point must be provided')
if ref_point is None:
raise ValueError('Invalid input specified in ref_point')
elif not isinstance(ref_point, dict):
raise TypeError('Input ref_point must be a dictionary')
else:
if ('location' not in ref_point) or ('coords' not in ref_point):
raise KeyError('Both keys "location" and "coords" must be specified in input dictionary ref_point')
self.phase_centering(ref_point, do_delay_transform=do_delay_transform, verbose=verbose)
self.project_baselines(ref_point)
#############################################################################
def phase_centering(self, ref_point, do_delay_transform=False, verbose=True):
"""
-------------------------------------------------------------------------
Centers the phase of visibilities around any given phase center.
Inputs:
ref_point [dictionary] Contains information about the reference
position to which projected baselines and rotated
visibilities are to be computed. No defaults. It must be
contain the following keys with the following values:
'coords' [string] Refers to the coordinate system in
which value in key 'location' is specified in.
Accepted values are 'radec', 'hadec', 'altaz'
and 'dircos'
'location' [numpy array] Must be a Mx2 (if value in key
'coords' is set to 'radec', 'hadec', 'altaz' or
'dircos') or Mx3 (if value in key 'coords' is
set to 'dircos'). M can be 1 or equal to number
of timestamps. If M=1, the same reference point
in the same coordinate system will be repeated
for all tiemstamps. If value under key 'coords'
is set to 'radec', 'hadec' or 'altaz', the
value under this key 'location' must be in
units of degrees.
do_delay_transform
[boolean] If set to True, also recompute the delay
transform after the visibilities are rotated to the new
phase center. If set to False (default), this is skipped
verbose: [boolean] If set to True (default), prints progress and
diagnostic messages.
-------------------------------------------------------------------------
"""
try:
ref_point
except NameError:
raise NameError('Input ref_point must be provided')
if ref_point is None:
raise ValueError('Invalid input specified in ref_point')
elif not isinstance(ref_point, dict):
raise TypeError('Input ref_point must be a dictionary')
else:
if ('location' not in ref_point) or ('coords' not in ref_point):
raise KeyError('Both keys "location" and "coords" must be specified in input dictionary ref_point')
phase_center = ref_point['location']
phase_center_coords = ref_point['coords']
if phase_center is None:
raise ValueError('Valid phase center not specified in input ref_point')
elif not isinstance(phase_center, NP.ndarray):
raise TypeError('Phase center must be a numpy array')
elif phase_center.shape[0] == 1:
phase_center = NP.repeat(phase_center, len(self.lst), axis=0)
elif phase_center.shape[0] != len(self.lst):
raise ValueError('One phase center must be provided for every timestamp.')
phase_center_current = self.phase_center + 0.0
phase_center_new = phase_center + 0.0
phase_center_coords_current = self.phase_center_coords + ''
phase_center_coords_new = phase_center_coords + ''
phase_center_temp = phase_center_new + 0.0
phase_center_coords_temp = phase_center_coords_new + ''
if phase_center_coords_new is None:
raise NameError('Coordinates of phase center not provided.')
elif phase_center_coords_new == 'dircos':
if (phase_center_new.shape[1] < 2) or (phase_center_new.shape[1] > 3):
raise ValueError('Dimensions incompatible for direction cosine positions')
if NP.any(NP.sqrt(NP.sum(phase_center_new**2, axis=1)) > 1.0):
raise ValueError('direction cosines found to be exceeding unit magnitude.')
if phase_center_new.shape[1] == 2:
n = 1.0 - NP.sqrt(NP.sum(phase_center_new**2, axis=1))
phase_center_new = NP.hstack((phase_center_new, n.reshape(-1,1)))
phase_center_temp = phase_center_new + 0.0
phase_center_coords_temp = 'dircos'
if phase_center_coords_temp != phase_center_coords_current:
phase_center_temp = GEOM.dircos2altaz(phase_center_temp, units='degrees')
phase_center_coords_temp = 'altaz'
if phase_center_coords_temp != phase_center_coords_current:
phase_center_temp = GEOM.altaz2hadec(phase_center_temp, self.latitude, units='degrees')
phase_center_coords_temp = 'hadec'
if phase_center_coords_temp != phase_center_coords_current:
phase_center_temp[:,0] = self.lst - phase_center_temp[:,0]
phase_center_coords_temp = 'hadec'
if phase_center_coords_temp != phase_center_coords_current:
phase_center_temp[:,0] = self.lst - phase_center_temp[:,0]
phase_center_coords_temp = 'radec'
if phase_center_coords_temp != phase_center_coords_current:
raise ValueError('Pointing coordinates of interferometer array instance invalid.')
elif phase_center_coords_new == 'altaz':
phase_center_temp = phase_center_new + 0.0
phase_center_coords_temp = 'altaz'
if phase_center_coords_temp != phase_center_coords_current:
phase_center_temp = GEOM.altaz2hadec(phase_center_temp, self.latitude, units='degrees')
phase_center_coords_temp = 'hadec'
if phase_center_coords_temp != phase_center_coords_current:
phase_center_temp[:,0] = self.lst - phase_center_temp[:,0]
phase_center_coords_temp = 'radec'
if phase_center_coords_temp != phase_center_coords_current:
raise ValueError('Pointing coordinates of interferometer array instance invalid.')
phase_center_coords_temp = phase_center_coords_current + ''
phase_center_new = GEOM.altaz2dircos(phase_center_new, units='degrees')
elif phase_center_coords_new == 'hadec':
phase_center_temp = phase_center_new + 0.0
phase_center_coords_temp = 'hadec'
if phase_center_coords_temp != phase_center_coords_current:
if self.pointing_coords == 'radec':
phase_center_temp[:,0] = self.lst - phase_center_temp[:,0]
phase_center_coords_temp = 'radec'
else:
phase_center_temp = GEOM.hadec2altaz(phase_center_temp, self.latitude, units='degrees')
phase_center_coords_temp = 'altaz'
if phase_center_coords_temp != phase_center_coords_current:
phase_center_temp = GEOM.altaz2dircos(phase_center_temp, units='degrees')
phase_center_coords_temp = 'dircos'
if phase_center_coords_temp != phase_center_coords_current:
raise ValueError('Pointing coordinates of interferometer array instance invalid.')
phase_center_new = GEOM.hadec2altaz(phase_center_new, self.latitude, units='degrees')
phase_center_new = GEOM.altaz2dircos(phase_center_new, units='degrees')
elif phase_center_coords_new == 'radec':
phase_center_temp = phase_center_new + 0.0
if phase_center_coords_temp != phase_center_coords_current:
phase_center_temp[:,0] = self.lst - phase_center_temp[:,0]
phase_center_coords_temp = 'hadec'
if phase_center_coords_temp != phase_center_coords_current:
phase_center_temp = GEOM.hadec2altaz(phase_center_temp, self.latitude, units='degrees')
phase_center_coords_temp = 'altaz'
if phase_center_coords_temp != phase_center_coords_current:
phase_center_temp = GEOM.altaz2dircos(phase_center_temp, units='degrees')
phase_center_coords_temp = 'dircos'
if phase_center_coords_temp != phase_center_coords_current:
raise ValueError('Pointing coordinates of interferometer array instance invalid.')
phase_center_new[:,0] = self.lst - phase_center_new[:,0]
phase_center_new = GEOM.hadec2altaz(phase_center_new, self.latitude, units='degrees')
phase_center_new = GEOM.altaz2dircos(phase_center_new, units='degrees')
else:
raise ValueError('Invalid phase center coordinate system specified')
phase_center_current_temp = phase_center_current + 0.0
phase_center_coords_current_temp = phase_center_coords_current + ''
if phase_center_coords_current_temp == 'radec':
phase_center_current_temp[:,0] = self.lst - phase_center_current_temp[:,0]
phase_center_coords_current_temp = 'hadec'
if phase_center_coords_current_temp == 'hadec':
phase_center_current_temp = GEOM.hadec2altaz(phase_center_current_temp, self.latitude, units='degrees')
phase_center_coords_current_temp = 'altaz'
if phase_center_coords_current_temp == 'altaz':
phase_center_current_temp = GEOM.altaz2dircos(phase_center_current_temp, units='degrees')
phase_center_coords_current_temp = 'dircos'
pos_diff_dircos = phase_center_current_temp - phase_center_new
b_dot_l = NP.dot(self.baselines, pos_diff_dircos.T)
self.phase_center = phase_center_temp + 0.0
self.phase_center_coords = phase_center_coords_temp + ''
self.skyvis_freq = self.skyvis_freq * NP.exp(-1j * 2 * NP.pi * b_dot_l[:,NP.newaxis,:] * self.channels.reshape(1,-1,1) / FCNST.c)
if self.vis_freq is not None:
self.vis_freq = self.vis_freq * NP.exp(-1j * 2 * NP.pi * b_dot_l[:,NP.newaxis,:] * self.channels.reshape(1,-1,1) / FCNST.c)
if self.vis_noise_freq is not None:
self.vis_noise_freq = self.vis_noise_freq * NP.exp(-1j * 2 * NP.pi * b_dot_l[:,NP.newaxis,:] * self.channels.reshape(1,-1,1) / FCNST.c)
if do_delay_transform:
self.delay_transform()
print('Running delay_transform() with defaults inside phase_centering() after rotating visibility phases. Run delay_transform() again with appropriate inputs.')
#############################################################################
def project_baselines(self, ref_point):
"""
------------------------------------------------------------------------
Project baseline vectors with respect to a reference point on the sky.
Assigns the projected baselines to the attribute projected_baselines
Input(s):
ref_point [dictionary] Contains information about the reference
position to which projected baselines are to be computed.
No defaults. It must be contain the following keys with the
following values:
'coords' [string] Refers to the coordinate system in
which value in key 'location' is specified in.
Accepted values are 'radec', 'hadec', 'altaz'
and 'dircos'
'location' [numpy array] Must be a Mx2 (if value in key
'coords' is set to 'radec', 'hadec', 'altaz' or
'dircos') or Mx3 (if value in key 'coords' is
set to 'dircos'). M can be 1 or equal to number
of timestamps. If M=1, the same reference point
in the same coordinate system will be repeated
for all tiemstamps. If value under key 'coords'
is set to 'radec', 'hadec' or 'altaz', the
value under this key 'location' must be in
units of degrees.
------------------------------------------------------------------------
"""
try:
ref_point
except NameError:
raise NameError('Input ref_point must be provided')
if ref_point is None:
raise ValueError('Invalid input specified in ref_point')
elif not isinstance(ref_point, dict):
raise TypeError('Input ref_point must be a dictionary')
else:
if ('location' not in ref_point) or ('coords' not in ref_point):
raise KeyError('Both keys "location" and "coords" must be specified in input dictionary ref_point')
phase_center = ref_point['location']
phase_center_coords = ref_point['coords']
if not isinstance(phase_center, NP.ndarray):
raise TypeError('The specified reference point must be a numpy array')
if not isinstance(phase_center_coords, str):
raise TypeError('The specified coordinates of the reference point must be a string')
if phase_center_coords not in ['radec', 'hadec', 'altaz', 'dircos']:
raise ValueError('Specified coordinates of reference point invalid')
if phase_center.ndim == 1:
phase_center = phase_center.reshape(1,-1)
if phase_center.ndim > 2:
raise ValueError('Reference point has invalid dimensions')
if (phase_center.shape[0] != self.n_acc) and (phase_center.shape[0] != 1):
raise ValueError('Reference point has dimensions incompatible with the number of timestamps')
if phase_center.shape[0] == 1:
phase_center = phase_center + NP.zeros(self.n_acc).reshape(-1,1)
if phase_center_coords == 'radec':
if phase_center.shape[1] != 2:
raise ValueError('Reference point has invalid dimensions')
ha = NP.asarray(self.lst) - phase_center[:,0]
dec = phase_center[:,1]
elif phase_center_coords == 'hadec':
if phase_center.shape[1] != 2:
raise ValueError('Reference point has invalid dimensions')
ha = phase_center[:,0]
dec = phase_center[:,1]
elif phase_center_coords == 'altaz':
if phase_center.shape[1] != 2:
raise ValueError('Reference point has invalid dimensions')
hadec = GEOM.altaz2hadec(phase_center, self.latitude, units='degrees')
ha = hadec[:,0]
dec = hadec[:,1]
else: # phase_center_coords = 'dircos'
if (phase_center.shape[1] < 2) or (phase_center.shape[1] > 3):
raise ValueError('Reference point has invalid dimensions')
if NP.any(NP.sqrt(NP.sum(phase_center**2, axis=1)) > 1.0):
raise ValueError('direction cosines found to be exceeding unit magnitude.')
if NP.any(NP.max(NP.abs(phase_center), axis=1) > 1.0):
raise ValueError('direction cosines found to be exceeding unit magnitude.')
if phase_center.shape[1] == 2:
n = 1.0 - NP.sqrt(NP.sum(phase_center**2, axis=1))
phase_center = NP.hstack((phase_center, n.reshape(-1,1)))
altaz = GEOM.dircos2altaz(phase_center, units='degrees')
hadec = GEOM.altaz2hadec(phase_center, self.latitude, units='degrees')
ha = hadec[:,0]
dec = hadec[:,1]
ha = NP.radians(ha).ravel()
dec = NP.radians(dec).ravel()
eq_baselines = GEOM.enu2xyz(self.baselines, self.latitude, units='degrees')
rot_matrix = NP.asarray([[NP.sin(ha), NP.cos(ha), NP.zeros(ha.size)],
[-NP.sin(dec)*NP.cos(ha), NP.sin(dec)*NP.sin(ha), NP.cos(dec)],
[NP.cos(dec)*NP.cos(ha), -NP.cos(dec)*NP.sin(ha), NP.sin(dec)]])
if rot_matrix.ndim == 2:
rot_matrix = rot_matrix[:,:,NP.newaxis] # To ensure correct dot product is obtained in the next step
self.projected_baselines = NP.dot(eq_baselines, rot_matrix) # (n_bl x [3]).(3 x [3] x n_acc) -> n_bl x (first 3) x n_acc
# proj_baselines = NP.empty((eq_baselines.shape[0], eq_baselines.shape[1], len(self.lst)))
# for i in xrange(len(self.lst)):
# rot_matrix = NP.asarray([[NP.sin(ha[i]), NP.cos(ha[i]), 0.0],
# [-NP.sin(dec[i])*NP.cos(ha[i]), NP.sin(dec[i])*NP.sin(ha[i]), NP.cos(dec[i])],
# [NP.cos(dec[i])*NP.cos(ha[i]), -NP.cos(dec[i])*NP.sin(ha[i]), NP.sin(dec[i])]])
# proj_baselines[:,:,i] = NP.dot(eq_baselines, rot_matrix.T)
# self.projected_baselines = proj_baselines
#############################################################################
def conjugate(self, ind=None, verbose=True):
"""
------------------------------------------------------------------------
Flips the baseline vectors and conjugates the visibilies for a specified
subset of baselines.
Inputs:
ind [scalar, list or numpy array] Indices pointing to specific
baseline vectors which need to be flipped. Default = None means
no flipping or conjugation. If all baselines are to be
flipped, either provide all the indices in ind or set ind="all"
verbose [boolean] If set to True (default), print diagnostic and
progress messages. If set to False, no such messages are
printed.
------------------------------------------------------------------------
"""
if ind is not None:
if isinstance(ind, str):
if ind != 'all':
raise ValueError('Value of ind must be "all" if set to string')
ind = NP.arange(self.baselines.shape[0])
elif isinstance(ind, int):
ind = [ind]
elif isinstance(ind, NP.ndarray):
ind = ind.tolist()
elif not isinstance(ind, list):
raise TypeError('ind must be string "all", scalar interger, list or numpy array')
ind = NP.asarray(ind)
if NP.any(ind >= self.baselines.shape[0]):
raise IndexError('Out of range indices found.')
self.labels = [tuple(reversed(self.labels[i])) if i in ind else self.labels[i] for i in xrange(len(self.labels))]
self.baselines[ind,:] = -self.baselines[ind,:]
self.baseline_orientations = NP.angle(self.baselines[:,0] + 1j * self.baselines[:,1])
if self.vis_freq is not None:
self.vis_freq[ind,:,:] = self.vis_freq[ind,:,:].conj()
if self.skyvis_freq is not None:
self.skyvis_freq[ind,:,:] = self.skyvis_freq[ind,:,:].conj()
if self.vis_noise_freq is not None:
self.vis_noise_freq[ind,:,:] = self.vis_noise_freq[ind,:,:].conj()
if self.projected_baselines is not None:
self.projected_baselines[ind,:,:] = -self.projected_baselines[ind,:,:]
if verbose:
warnings.warn('Certain baselines have been flipped and their visibilities conjugated. Use delay_transform() to update the delay spectra.')
#############################################################################
def delay_transform(self, pad=1.0, freq_wts=None, verbose=True):
"""
------------------------------------------------------------------------
Transforms the visibilities from frequency axis onto delay (time) axis
using an IFFT. This is performed for noiseless sky visibilities, thermal
noise in visibilities, and observed visibilities.
Inputs:
pad [scalar] Non-negative scalar indicating padding fraction
relative to the number of frequency channels. For e.g., a
pad of 1.0 pads the frequency axis with zeros of the same
width as the number of channels. After the delay transform,
the transformed visibilities are downsampled by a factor of
1+pad. If a negative value is specified, delay transform
will be performed with no padding
freq_wts [numpy vector or array] window shaping to be applied before
computing delay transform. It can either be a vector or size
equal to the number of channels (which will be applied to all
time instances for all baselines), or a nchan x n_snapshots
numpy array which will be applied to all baselines, or a
n_baselines x nchan numpy array which will be applied to all
timestamps, or a n_baselines x nchan x n_snapshots numpy
array. Default (None) will not apply windowing and only the
inherent bandpass will be used.
verbose [boolean] If set to True (default), print diagnostic and
progress messages. If set to False, no such messages are
printed.
------------------------------------------------------------------------
"""
if verbose:
print('Preparing to compute delay transform...\n\tChecking input parameters for compatibility...')
if not isinstance(pad, (int, float)):
raise TypeError('pad fraction must be a scalar value.')
if pad < 0.0:
pad = 0.0
if verbose:
warnings.warn('\tPad fraction found to be negative. Resetting to 0.0 (no padding will be applied).')
if freq_wts is not None:
if freq_wts.size == self.channels.size:
freq_wts = NP.repeat(NP.expand_dims(NP.repeat(freq_wts.reshape(1,-1), self.baselines.shape[0], axis=0), axis=2), self.n_acc, axis=2)
elif freq_wts.size == self.channels.size * self.n_acc:
freq_wts = NP.repeat(NP.expand_dims(freq_wts.reshape(self.channels.size, -1), axis=0), self.baselines.shape[0], axis=0)
elif freq_wts.size == self.channels.size * self.baselines.shape[0]:
freq_wts = NP.repeat(NP.expand_dims(freq_wts.reshape(-1, self.channels.size), axis=2), self.n_acc, axis=2)
elif freq_wts.size == self.channels.size * self.baselines.shape[0] * self.n_acc:
freq_wts = freq_wts.reshape(self.baselines.shape[0], self.channels.size, self.n_acc)
else:
raise ValueError('window shape dimensions incompatible with number of channels and/or number of tiemstamps.')
self.bp_wts = freq_wts
if verbose:
print('\tFrequency window weights assigned.')
if verbose:
print('\tInput parameters have been verified to be compatible.\n\tProceeding to compute delay transform.')
self.lags = DSP.spectral_axis(self.channels.size, delx=self.freq_resolution, use_real=False, shift=True)
if pad == 0.0:
self.vis_lag = DSP.FT1D(self.vis_freq * self.bp * self.bp_wts, ax=1, inverse=True, use_real=False, shift=True) * self.channels.size * self.freq_resolution
self.skyvis_lag = DSP.FT1D(self.skyvis_freq * self.bp * self.bp_wts, ax=1, inverse=True, use_real=False, shift=True) * self.channels.size * self.freq_resolution
self.vis_noise_lag = DSP.FT1D(self.vis_noise_freq * self.bp * self.bp_wts, ax=1, inverse=True, use_real=False, shift=True) * self.channels.size * self.freq_resolution
self.lag_kernel = DSP.FT1D(self.bp * self.bp_wts, ax=1, inverse=True, use_real=False, shift=True) * self.channels.size * self.freq_resolution
if verbose:
print('\tDelay transform computed without padding.')
else:
npad = int(self.channels.size * pad)
self.vis_lag = DSP.FT1D(NP.pad(self.vis_freq * self.bp * self.bp_wts, ((0,0),(0,npad),(0,0)), mode='constant'), ax=1, inverse=True, use_real=False, shift=True) * (npad + self.channels.size) * self.freq_resolution
self.skyvis_lag = DSP.FT1D(NP.pad(self.skyvis_freq * self.bp * self.bp_wts, ((0,0),(0,npad),(0,0)), mode='constant'), ax=1, inverse=True, use_real=False, shift=True) * (npad + self.channels.size) * self.freq_resolution
self.vis_noise_lag = DSP.FT1D(NP.pad(self.vis_noise_freq * self.bp * self.bp_wts, ((0,0),(0,npad),(0,0)), mode='constant'), ax=1, inverse=True, use_real=False, shift=True) * (npad + self.channels.size) * self.freq_resolution
self.lag_kernel = DSP.FT1D(NP.pad(self.bp * self.bp_wts, ((0,0),(0,npad),(0,0)), mode='constant'), ax=1, inverse=True, use_real=False, shift=True) * (npad + self.channels.size) * self.freq_resolution
if verbose:
print('\tDelay transform computed with padding fraction {0:.1f}'.format(pad))
self.vis_lag = DSP.downsampler(self.vis_lag, 1+pad, axis=1)
self.skyvis_lag = DSP.downsampler(self.skyvis_lag, 1+pad, axis=1)
self.vis_noise_lag = DSP.downsampler(self.vis_noise_lag, 1+pad, axis=1)
self.lag_kernel = DSP.downsampler(self.lag_kernel, 1+pad, axis=1)
if verbose:
print('\tDelay transform products downsampled by factor of {0:.1f}'.format(1+pad))
print('delay_transform() completed successfully.')
#############################################################################
def multi_window_delay_transform(self, bw_eff, freq_center=None, shape=None,
pad=1.0, verbose=True):
"""
------------------------------------------------------------------------
Computes delay transform on multiple frequency windows with specified
weights
Inputs:
bw_eff [scalar, list, numpy array] Effective bandwidths of the
selected frequency windows. If a scalar is provided, the
same will be applied to all frequency windows.
freq_center [scalar, list, numpy array] Frequency centers of the
selected frequency windows. If a scalar is provided, the
same will be applied to all frequency windows. Default=None
uses the center frequency from the class attribute named
channels
shape [string] specifies frequency window shape. Accepted values
are 'rect' or 'RECT' (for rectangular), 'bnw' and 'BNW'
(for Blackman-Nuttall), and 'bhw' or 'BHW' (for Blackman-
Harris). Default=None sets it to 'rect' (rectangular
window)
pad [scalar] Non-negative scalar indicating padding fraction
relative to the number of frequency channels. For e.g., a
pad of 1.0 pads the frequency axis with zeros of the same
width as the number of channels. After the delay transform,
the transformed visibilities are downsampled by a factor of
1+pad. If a negative value is specified, delay transform
will be performed with no padding
verbose [boolean] If set to True (default), print diagnostic and
progress messages. If set to False, no such messages are
printed.
Output:
A dictionary containing information under the following keys:
skyvis_lag Numpy array of pure sky visibilities delay spectra of
size n_bl x n_windows x nchan x n_snaps
vis_noise_lag Numpy array of noise delay spectra of size
size n_bl x n_windows x nchan x n_snaps
lag_kernel Numpy array of delay kernel of size
size n_bl x n_windows x nchan x n_snaps
lag_corr_length Numpy array of correlation length (in units of number
of delay samples) due to convolving kernel in delay
space. This is the number by which the delay spectra
obtained have to be downsampled by to get independent
samples of delay spectra
------------------------------------------------------------------------
"""
try:
bw_eff
except NameError:
raise NameError('Effective bandwidth must be specified')
else:
if not isinstance(bw_eff, (int, float, list, NP.ndarray)):
raise TypeError('Effective bandwidth must be a scalar, list or numpy array')
bw_eff = NP.asarray(bw_eff).reshape(-1)
if NP.any(bw_eff <= 0.0):
raise ValueError('All values in effective bandwidth must be strictly positive')
if freq_center is None:
freq_center = NP.asarray(self.channels[int(0.5*self.channels.size)]).reshape(-1)
elif isinstance(freq_center, (int, float, list, NP.ndarray)):
freq_center = NP.asarray(freq_center).reshape(-1)
if NP.any((freq_center <= self.channels.min()) | (freq_center >= self.channels.max())):
raise ValueError('Frequency centers must lie strictly inside the observing band')
else:
raise TypeError('Frequency center(s) must be scalar, list or numpy array')
if (bw_eff.size == 1) and (freq_center.size > 1):
bw_eff = NP.repeat(bw_eff, freq_center.size)
elif (bw_eff.size > 1) and (freq_center.size == 1):
freq_center = NP.repeat(freq_center, bw_eff.size)
elif bw_eff.size != freq_center.size:
raise ValueError('Effective bandwidth(s) and frequency center(s) must have same number of elements')
if shape is not None:
if not isinstance(shape, str):
raise TypeError('Window shape must be a string')
if shape not in ['rect', 'bhw', 'bnw', 'RECT', 'BHW', 'BNW']:
raise ValueError('Invalid value for window shape specified.')
else:
shape = 'rect'
if not isinstance(pad, (int, float)):
raise TypeError('pad fraction must be a scalar value.')
if pad < 0.0:
pad = 0.0
if verbose:
warnings.warn('\tPad fraction found to be negative. Resetting to 0.0 (no padding will be applied).')
freq_wts = NP.empty((bw_eff.size, self.channels.size))
frac_width = DSP.window_N2width(n_window=None, shape=shape)
window_loss_factor = 1 / frac_width
n_window = NP.round(window_loss_factor * bw_eff / self.freq_resolution).astype(NP.int)
ind_freq_center, ind_channels, dfrequency = LKP.find_1NN(self.channels.reshape(-1,1), freq_center.reshape(-1,1), distance_ULIM=0.5*self.freq_resolution, remove_oob=True)
sortind = NP.argsort(ind_channels)
ind_freq_center = ind_freq_center[sortind]
ind_channels = ind_channels[sortind]
dfrequency = dfrequency[sortind]
n_window = n_window[sortind]
for i,ind_chan in enumerate(ind_channels):
window = DSP.windowing(n_window[i], shape=shape, centering=True)
window_chans = self.channels[ind_chan] + self.freq_resolution * (NP.arange(n_window[i]) - int(n_window[i]/2))
ind_window_chans, ind_chans, dfreq = LKP.find_1NN(self.channels.reshape(-1,1), window_chans.reshape(-1,1), distance_ULIM=0.5*self.freq_resolution, remove_oob=True)
sind = NP.argsort(ind_window_chans)
ind_window_chans = ind_window_chans[sind]
ind_chans = ind_chans[sind]
dfreq = dfreq[sind]
window = window[ind_window_chans]
window = NP.pad(window, ((ind_chans.min(), self.channels.size-1-ind_chans.max())), mode='constant', constant_values=((0.0,0.0)))
freq_wts[i,:] = window
lags = DSP.spectral_axis(self.channels.size, delx=self.freq_resolution, use_real=False, shift=True)
if pad == 0.0:
skyvis_lag = DSP.FT1D(self.skyvis_freq[:,NP.newaxis,:,:] * self.bp[:,NP.newaxis,:,:] * freq_wts[NP.newaxis,:,:,NP.newaxis], ax=2, inverse=True, use_real=False, shift=True) * self.channels.size * self.freq_resolution
vis_noise_lag = DSP.FT1D(self.vis_noise_freq[:,NP.newaxis,:,:] * self.bp[:,NP.newaxis,:,:] * freq_wts[NP.newaxis,:,:,NP.newaxis], ax=2, inverse=True, use_real=False, shift=True) * self.channels.size * self.freq_resolution
lag_kernel = DSP.FT1D(self.bp[:,NP.newaxis,:,:] * freq_wts[NP.newaxis,:,:,NP.newaxis], ax=2, inverse=True, use_real=False, shift=True) * self.channels.size * self.freq_resolution
if verbose:
print('\tMulti-window delay transform computed without padding.')
else:
npad = int(self.channels.size * pad)
skyvis_lag = DSP.FT1D(NP.pad(self.skyvis_freq[:,NP.newaxis,:,:] * self.bp[:,NP.newaxis,:,:] * freq_wts[NP.newaxis,:,:,NP.newaxis], ((0,0),(0,0),(0,npad),(0,0)), mode='constant'), ax=2, inverse=True, use_real=False, shift=True) * (npad + self.channels.size) * self.freq_resolution
vis_noise_lag = DSP.FT1D(NP.pad(self.vis_noise_freq[:,NP.newaxis,:,:] * self.bp[:,NP.newaxis,:,:] * freq_wts[NP.newaxis,:,:,NP.newaxis], ((0,0),(0,0),(0,npad),(0,0)), mode='constant'), ax=2, inverse=True, use_real=False, shift=True) * (npad + self.channels.size) * self.freq_resolution
lag_kernel = DSP.FT1D(NP.pad(self.bp[:,NP.newaxis,:,:] * freq_wts[NP.newaxis,:,:,NP.newaxis], ((0,0),(0,0),(0,npad),(0,0)), mode='constant'), ax=2, inverse=True, use_real=False, shift=True) * (npad + self.channels.size) * self.freq_resolution
if verbose:
print('\tMulti-window delay transform computed with padding fraction {0:.1f}'.format(pad))
skyvis_lag = DSP.downsampler(skyvis_lag, 1+pad, axis=2)
vis_noise_lag = DSP.downsampler(vis_noise_lag, 1+pad, axis=2)
lag_kernel = DSP.downsampler(lag_kernel, 1+pad, axis=2)
if verbose:
print('\tMulti-window delay transform products downsampled by factor of {0:.1f}'.format(1+pad))
print('multi_window_delay_transform() completed successfully.')
return {'skyvis_lag': skyvis_lag, 'vis_noise_lag': vis_noise_lag, 'lag_kernel': lag_kernel, 'lag_corr_length': self.channels.size / NP.sum(freq_wts, axis=1)}
#############################################################################
def concatenate(self, others, axis):
"""
-------------------------------------------------------------------------
Concatenates different visibility data sets from instances of class
InterferometerArray along baseline, frequency or time axis.
Inputs:
others [instance of class Interferometer Array or list of such
instances] Instance or list of instances of class
InterferometerArray whose visibility data have to be
concatenated to the current instance.
axis [scalar] Axis along which visibility data sets are to be
concatenated. Accepted values are 0 (concatenate along
baseline axis), 1 (concatenate frequency channels), or 2
(concatenate along time/snapshot axis). No default
-------------------------------------------------------------------------
"""
try:
others, axis
except NameError:
raise NameError('An instance of class InterferometerArray or a list of such instances and the axis along which they are to be concatenated must be provided.')
if isinstance(others, list):
for other in others:
if not isinstance(other, InterferometerArray):
raise TypeError('The interferometer array data to be concatenated must be an instance of class InterferometerArray or a list of such instances')
loo = [self]+others
elif isinstance(others, InterferometerArray):
loo = [self, others]
elif not isinstance(other, InterferometerArray):
raise TypeError('The interferometer array data to be concatenated must be an instance of class InterferometerArray or a list of such instances')
if not isinstance(axis, int):
raise TypeError('axis must be an integer')
self_shape = self.skyvis_freq.shape
if axis >= len(self_shape):
raise ValueError('Specified axis not found in the visibility data.')
elif axis == -1:
axis = len(self_shape) - 1
elif axis < -1:
raise ValueError('Specified axis not found in the visibility data.')
self.skyvis_freq = NP.concatenate(tuple([elem.skyvis_freq for elem in loo]), axis=axis)
if self.vis_freq is not None:
self.vis_freq = NP.concatenate(tuple([elem.vis_freq for elem in loo]), axis=axis)
if self.vis_noise_freq is not None:
self.vis_noise_freq = NP.concatenate(tuple([elem.vis_noise_freq for elem in loo]), axis=axis)
if self.vis_rms_freq is not None:
self.vis_rms_freq = NP.concatenate(tuple([elem.vis_rms_freq for elem in loo]), axis=axis)
self.bp = NP.concatenate(tuple([elem.bp for elem in loo]), axis=axis)
self.bp_wts = NP.concatenate(tuple([elem.bp_wts for elem in loo]), axis=axis)
self.Tsys = NP.concatenate(tuple([elem.Tsys for elem in loo]), axis=axis)
if self.gradient_mode is not None:
self.gradient[self.gradient_mode] = NP.concatenate(tuple([elem.gradient[self.gradient_mode] for elem in loo]), axis=axis+1)
if not self.Tsysinfo:
for elem in loo:
if elem.Tsysinfo:
self.Tsysinfo = elem.Tsysinfo
if axis != 1:
if self.skyvis_lag is not None:
self.skyvis_lag = NP.concatenate(tuple([elem.skyvis_lag for elem in loo]), axis=axis)
if self.vis_lag is not None:
self.vis_lag = NP.concatenate(tuple([elem.vis_lag for elem in loo]), axis=axis)
if self.vis_noise_lag is not None:
self.vis_noise_lag = NP.concatenate(tuple([elem.vis_noise_lag for elem in loo]), axis=axis)
if axis == 0: # baseline axis
for elem in loo:
if elem.baseline_coords != self.baseline_coords:
raise ValueError('Coordinate systems for the baseline vectors are mismatched.')
self.baselines = NP.vstack(tuple([elem.baselines for elem in loo]))
self.baseline_lengths = NP.sqrt(NP.sum(self.baselines**2, axis=1))
self.baseline_orientations = NP.angle(self.baselines[:,0] + 1j * self.baselines[:,1])
self.projected_baselines = NP.vstack(tuple([elem.projected_baselines for elem in loo]))
self.labels = [label for elem in loo for label in elem.labels]
self.A_eff = NP.vstack(tuple([elem.A_eff for elem in loo]))
self.eff_Q = NP.vstack(tuple([elem.eff_Q for elem in loo]))
elif axis == 1: # Frequency axis
self.channels = NP.hstack(tuple([elem.channels for elem in loo]))
self.A_eff = NP.hstack(tuple([elem.A_eff for elem in loo]))
self.eff_Q = NP.hstack(tuple([elem.eff_Q for elem in loo]))
# self.delay_transform()
elif axis == 2: # time axis
# self.timestamp = [timestamp for elem in loo for timestamp in elem.timestamp]
self.t_acc = [t_acc for elem in loo for t_acc in elem.t_acc]
self.n_acc = len(self.t_acc)
self.t_obs = sum(self.t_acc)
self.pointing_center = NP.vstack(tuple([elem.pointing_center for elem in loo]))
self.phase_center = NP.vstack(tuple([elem.phase_center for elem in loo]))
self.lst = [lst for elem in loo for lst in elem.lst]
self.timestamp = [timestamp for elem in loo for timestamp in elem.timestamp]
self.Tsysinfo = [Tsysinfo for elem in loo for Tsysinfo in elem.Tsysinfo]
#############################################################################
def save(self, outfile, fmt='HDF5', tabtype='BinTableHDU', npz=True,
overwrite=False, uvfits_parms=None, verbose=True):
"""
-------------------------------------------------------------------------
Saves the interferometer array information to disk in HDF5, FITS, NPZ
and UVFITS formats
Inputs:
outfile [string] Filename with full path to be saved to. Will be
appended with '.hdf5' or '.fits' extension depending on
input keyword fmt. If input npz is set to True, the
simulated visibilities will also get stored in '.npz'
format. Depending on parameters in uvfits_parms, three
UVFITS files will also be created whose names will be
outfile+'-noiseless', outfile+'-noisy' and
'outfile+'-noise' appended with '.uvfits'
Keyword Input(s):
fmt [string] string specifying the format of the output.
Accepted values are 'HDF5' (default) and 'FITS'.
The file names will be appended with '.hdf5' or '.fits'
respectively
tabtype [string] indicates table type for one of the extensions in
the FITS file. Allowed values are 'BinTableHDU' and
'TableHDU' for binary and ascii tables respectively. Default
is 'BinTableHDU'. Only applies if input fmt is set to 'FITS'
npz [boolean] True (default) indicates a numpy NPZ format file
is created in addition to the FITS file to store essential
attributes of the class InterferometerArray for easy
handing over of python files
overwrite [boolean] True indicates overwrite even if a file already
exists. Default = False (does not overwrite). Beware this
may not work reliably for UVFITS output when uvfits_method
is set to None or 'uvdata' and hence always better to make
sure the output file does not exist already
uvfits_parms [dictionary] specifies basic parameters required for
saving in UVFITS format. If set to None (default), the
data will not be saved in UVFITS format. To save in UVFITS
format, the following keys and values are required:
'ref_point' [dictionary] Contains information about the
reference position to which projected
baselines and rotated visibilities are to
be computed. Default=None (no additional
phasing will be performed). It must be
contain the following keys with the
following values:
'coords' [string] Refers to the
coordinate system in which value
in key 'location' is specified
in. Accepted values are 'radec',
'hadec', 'altaz' and 'dircos'
'location' [numpy array] Must be a Mx2 (if
value in key 'coords' is set to
'radec', 'hadec', 'altaz' or
'dircos') or Mx3 (if value in
key 'coords' is set to
'dircos'). M can be 1 or equal
to number of timestamps. If M=1,
the same reference point in the
same coordinate system will be
repeated for all tiemstamps. If
value under key 'coords' is set
to 'radec', 'hadec' or 'altaz',
the value under this key
'location' must be in units of
degrees.
'method' [string] specifies method to be used in
saving in UVFITS format. Accepted values are
'uvdata', 'uvfits' or None (default). If set
to 'uvdata', the UVFITS writer in uvdata
module is used. If set to 'uvfits', the
in-house UVFITS writer is used. If set to
None, first uvdata module will be attempted
but if it fails then the in-house UVFITS
writer will be tried.
verbose [boolean] If True (default), prints diagnostic and progress
messages. If False, suppress printing such messages.
-------------------------------------------------------------------------
"""
try:
outfile
except NameError:
raise NameError('No filename provided. Aborting InterferometerArray.save()...')
if fmt.lower() not in ['hdf5', 'fits']:
raise ValueError('Invalid output file format specified')
if fmt.lower() == 'hdf5':
filename = outfile + '.' + fmt.lower()
if fmt.lower() == 'fits':
filename = outfile + '.' + fmt.lower()
if verbose:
print('\nSaving information about interferometer...')
if fmt.lower() == 'fits':
use_ascii = False
if tabtype == 'TableHDU':
use_ascii = True
hdulist = []
hdulist += [fits.PrimaryHDU()]
hdulist[0].header['latitude'] = (self.latitude, 'Latitude of interferometer')
hdulist[0].header['longitude'] = (self.longitude, 'Longitude of interferometer')
hdulist[0].header['altitude'] = (self.altitude, 'Altitude of interferometer')
hdulist[0].header['baseline_coords'] = (self.baseline_coords, 'Baseline coordinate system')
hdulist[0].header['freq_resolution'] = (self.freq_resolution, 'Frequency Resolution (Hz)')
hdulist[0].header['pointing_coords'] = (self.pointing_coords, 'Pointing coordinate system')
hdulist[0].header['phase_center_coords'] = (self.phase_center_coords, 'Phase center coordinate system')
hdulist[0].header['skycoords'] = (self.skycoords, 'Sky coordinate system')
if 'id' in self.telescope:
hdulist[0].header['telescope'] = (self.telescope['id'], 'Telescope Name')
if self.telescope['groundplane'] is not None:
hdulist[0].header['groundplane'] = (self.telescope['groundplane'], 'Ground plane height')
if self.simparms_file is not None:
hdulist[0].header['simparms'] = (self.simparms_file, 'YAML file with simulation parameters')
if self.gradient_mode is not None:
hdulist[0].header['gradient_mode'] = (self.gradient_mode, 'Visibility Gradient Mode')
if self.gaininfo is not None:
hdulist[0].header['gainsfile'] = (outfile+'.gains.hdf5', 'Gains File')
hdulist[0].header['element_shape'] = (self.telescope['shape'], 'Antenna element shape')
hdulist[0].header['element_size'] = (self.telescope['size'], 'Antenna element size')
hdulist[0].header['element_ocoords'] = (self.telescope['ocoords'], 'Antenna element orientation coordinates')
hdulist[0].header['t_obs'] = (self.t_obs, 'Observing duration (s)')
hdulist[0].header['n_acc'] = (self.n_acc, 'Number of accumulations')
hdulist[0].header['flux_unit'] = (self.flux_unit, 'Unit of flux density')
hdulist[0].header['EXTNAME'] = 'PRIMARY'
if verbose:
print('\tCreated a primary HDU.')
hdulist += [fits.ImageHDU(self.telescope['orientation'], name='Antenna element orientation')]
if verbose:
print('\tCreated an extension for antenna element orientation.')
cols = []
if self.lst:
cols += [fits.Column(name='LST', format='D', array=NP.asarray(self.lst).ravel())]
cols += [fits.Column(name='pointing_longitude', format='D', array=self.pointing_center[:,0])]
cols += [fits.Column(name='pointing_latitude', format='D', array=self.pointing_center[:,1])]
cols += [fits.Column(name='phase_center_longitude', format='D', array=self.phase_center[:,0])]
cols += [fits.Column(name='phase_center_latitude', format='D', array=self.phase_center[:,1])]
columns = _astropy_columns(cols, tabtype=tabtype)
tbhdu = fits.new_table(columns)
tbhdu.header.set('EXTNAME', 'POINTING AND PHASE CENTER INFO')
hdulist += [tbhdu]
if verbose:
print('\tCreated pointing and phase center information table.')
# label_lengths = [len(label[0]) for label in self.labels]
# maxlen = max(label_lengths)
maxlen = int(self.layout['labels'].dtype.str.split('|')[1][1:])
labels = NP.asarray(self.labels, dtype=[('A2', '|S{0:0d}'.format(maxlen)), ('A1', '|S{0:0d}'.format(maxlen))])
cols = []
cols += [fits.Column(name='A1', format='{0:0d}A'.format(maxlen), array=labels['A1'])]
cols += [fits.Column(name='A2', format='{0:0d}A'.format(maxlen), array=labels['A2'])]
# cols += [fits.Column(name='labels', format='5A', array=NP.asarray(self.labels))]
columns = _astropy_columns(cols, tabtype=tabtype)
tbhdu = fits.new_table(columns)
tbhdu.header.set('EXTNAME', 'LABELS')
hdulist += [tbhdu]
if verbose:
print('\tCreated extension table containing baseline labels.')
hdulist += [fits.ImageHDU(self.baselines, name='baselines')]
if verbose:
print('\tCreated an extension for baseline vectors.')
if self.projected_baselines is not None:
hdulist += [fits.ImageHDU(self.projected_baselines, name='proj_baselines')]
if verbose:
print('\tCreated an extension for projected baseline vectors.')
if self.layout:
label_lengths = [len(label) for label in self.layout['labels']]
maxlen = max(label_lengths)
cols = []
cols += [fits.Column(name='labels', format='{0:0d}A'.format(maxlen), array=self.layout['labels'])]
cols += [fits.Column(name='ids', format='J', array=self.layout['ids'])]
cols += [fits.Column(name='positions', format='3D', array=self.layout['positions'])]
columns = _astropy_columns(cols, tabtype=tabtype)
tbhdu = fits.new_table(columns)
tbhdu.header.set('EXTNAME', 'LAYOUT')
tbhdu.header.set('COORDS', self.layout['coords'])
hdulist += [tbhdu]
hdulist += [fits.ImageHDU(self.A_eff, name='Effective area')]
if verbose:
print('\tCreated an extension for effective area.')
hdulist += [fits.ImageHDU(self.eff_Q, name='Interferometer efficiency')]
if verbose:
print('\tCreated an extension for interferometer efficiency.')
cols = []
cols += [fits.Column(name='frequency', format='D', array=self.channels)]
if self.lags is not None:
cols += [fits.Column(name='lag', format='D', array=self.lags)]
columns = _astropy_columns(cols, tabtype=tabtype)
tbhdu = fits.new_table(columns)
tbhdu.header.set('EXTNAME', 'SPECTRAL INFO')
hdulist += [tbhdu]
if verbose:
print('\tCreated spectral information table.')
if self.t_acc:
hdulist += [fits.ImageHDU(self.t_acc, name='t_acc')]
if verbose:
print('\tCreated an extension for accumulation times.')
cols = []
if isinstance(self.timestamp[0], str):
cols += [fits.Column(name='timestamps', format='24A', array=NP.asarray(self.timestamp))]
elif isinstance(self.timestamp[0], float):
cols += [fits.Column(name='timestamps', format='D', array=NP.asarray(self.timestamp))]
else:
raise TypeError('Invalid data type for timestamps')
columns = _astropy_columns(cols, tabtype=tabtype)
tbhdu = fits.new_table(columns)
tbhdu.header.set('EXTNAME', 'TIMESTAMPS')
hdulist += [tbhdu]
if verbose:
print('\tCreated extension table containing timestamps.')
if self.Tsysinfo:
cols = []
cols += [fits.Column(name='Trx', format='D', array=NP.asarray([elem['Trx'] for elem in self.Tsysinfo], dtype=NP.float))]
cols += [fits.Column(name='Tant0', format='D', array=NP.asarray([elem['Tant']['T0'] for elem in self.Tsysinfo], dtype=NP.float))]
cols += [fits.Column(name='f0', format='D', array=NP.asarray([elem['Tant']['f0'] for elem in self.Tsysinfo], dtype=NP.float))]
cols += [fits.Column(name='spindex', format='D', array=NP.asarray([elem['Tant']['spindex'] for elem in self.Tsysinfo], dtype=NP.float))]
columns = _astropy_columns(cols, tabtype=tabtype)
tbhdu = fits.new_table(columns)
tbhdu.header.set('EXTNAME', 'TSYSINFO')
hdulist += [tbhdu]
hdulist += [fits.ImageHDU(self.Tsys, name='Tsys')]
if verbose:
print('\tCreated an extension for Tsys.')
if self.vis_rms_freq is not None:
hdulist += [fits.ImageHDU(self.vis_rms_freq, name='freq_channel_noise_rms_visibility')]
if verbose:
print('\tCreated an extension for simulated visibility noise rms per channel.')
if self.vis_freq is not None:
hdulist += [fits.ImageHDU(self.vis_freq.real, name='real_freq_obs_visibility')]
hdulist += [fits.ImageHDU(self.vis_freq.imag, name='imag_freq_obs_visibility')]
if verbose:
print('\tCreated extensions for real and imaginary parts of observed visibility frequency spectrum of size {0[0]} x {0[1]} x {0[2]}'.format(self.vis_freq.shape))
if self.skyvis_freq is not None:
hdulist += [fits.ImageHDU(self.skyvis_freq.real, name='real_freq_sky_visibility')]
hdulist += [fits.ImageHDU(self.skyvis_freq.imag, name='imag_freq_sky_visibility')]
if verbose:
print('\tCreated extensions for real and imaginary parts of noiseless sky visibility frequency spectrum of size {0[0]} x {0[1]} x {0[2]}'.format(self.skyvis_freq.shape))
if self.vis_noise_freq is not None:
hdulist += [fits.ImageHDU(self.vis_noise_freq.real, name='real_freq_noise_visibility')]
hdulist += [fits.ImageHDU(self.vis_noise_freq.imag, name='imag_freq_noise_visibility')]
if verbose:
print('\tCreated extensions for real and imaginary parts of visibility noise frequency spectrum of size {0[0]} x {0[1]} x {0[2]}'.format(self.vis_noise_freq.shape))
if self.gradient_mode is not None:
for gradkey in self.gradient:
hdulist += [fits.ImageHDU(self.gradient[gradkey].real, name='real_freq_sky_visibility_gradient_wrt_{0}'.format(gradkey))]
hdulist += [fits.ImageHDU(self.gradient[gradkey].imag, name='imag_freq_sky_visibility_gradient_wrt_{0}'.format(gradkey))]
if verbose:
print('\tCreated extensions for real and imaginary parts of gradient of sky visibility frequency spectrum wrt {0} of size {1[0]} x {1[1]} x {1[2]} x {1[3]}'.format(gradkey, self.gradient[gradkey].shape))
hdulist += [fits.ImageHDU(self.bp, name='bandpass')]
if verbose:
print('\tCreated an extension for bandpass functions of size {0[0]} x {0[1]} x {0[2]} as a function of baseline, frequency, and snapshot instance'.format(self.bp.shape))
hdulist += [fits.ImageHDU(self.bp_wts, name='bandpass_weights')]
if verbose:
print('\tCreated an extension for bandpass weights of size {0[0]} x {0[1]} x {0[2]} as a function of baseline, frequency, and snapshot instance'.format(self.bp_wts.shape))
# hdulist += [fits.ImageHDU(self.lag_kernel.real, name='lag_kernel_real')]
# hdulist += [fits.ImageHDU(self.lag_kernel.imag, name='lag_kernel_imag')]
# if verbose:
# print('\tCreated an extension for impulse response of frequency bandpass shape of size {0[0]} x {0[1]} x {0[2]} as a function of baseline, lags, and snapshot instance'.format(self.lag_kernel.shape))
if self.vis_lag is not None:
hdulist += [fits.ImageHDU(self.vis_lag.real, name='real_lag_visibility')]
hdulist += [fits.ImageHDU(self.vis_lag.imag, name='imag_lag_visibility')]
if verbose:
print('\tCreated extensions for real and imaginary parts of observed visibility delay spectrum of size {0[0]} x {0[1]} x {0[2]}'.format(self.vis_lag.shape))
if self.skyvis_lag is not None:
hdulist += [fits.ImageHDU(self.skyvis_lag.real, name='real_lag_sky_visibility')]
hdulist += [fits.ImageHDU(self.skyvis_lag.imag, name='imag_lag_sky_visibility')]
if verbose:
print('\tCreated extensions for real and imaginary parts of noiseless sky visibility delay spectrum of size {0[0]} x {0[1]} x {0[2]}'.format(self.skyvis_lag.shape))
if self.vis_noise_lag is not None:
hdulist += [fits.ImageHDU(self.vis_noise_lag.real, name='real_lag_noise_visibility')]
hdulist += [fits.ImageHDU(self.vis_noise_lag.imag, name='imag_lag_noise_visibility')]
if verbose:
print('\tCreated extensions for real and imaginary parts of visibility noise delay spectrum of size {0[0]} x {0[1]} x {0[2]}'.format(self.vis_noise_lag.shape))
if verbose:
print('\tNow writing FITS file to disk...')
hdu = fits.HDUList(hdulist)
hdu.writeto(filename, overwrite=overwrite)
if self.gaininfo is not None:
self.gaininfo.write_gaintable(outfile+'.gains.hdf5')
elif fmt.lower() == 'hdf5':
if overwrite:
write_str = 'w'
else:
write_str = 'w-'
with h5py.File(filename, write_str) as fileobj:
hdr_group = fileobj.create_group('header')
hdr_group['AstroUtils#'] = astroutils.__githash__
hdr_group['PRISim#'] = prisim.__githash__
hdr_group['flux_unit'] = self.flux_unit
tlscp_group = fileobj.create_group('telescope_parms')
tlscp_group['latitude'] = self.latitude
tlscp_group['longitude'] = self.longitude
tlscp_group['altitude'] = self.altitude
tlscp_group['latitude'].attrs['units'] = 'deg'
tlscp_group['longitude'].attrs['units'] = 'deg'
tlscp_group['altitude'].attrs['units'] = 'm'
if 'id' in self.telescope:
tlscp_group['id'] = self.telescope['id']
spec_group = fileobj.create_group('spectral_info')
spec_group['freq_resolution'] = self.freq_resolution
spec_group['freq_resolution'].attrs['units'] = 'Hz'
spec_group['freqs'] = self.channels
spec_group['freqs'].attrs['units'] = 'Hz'
if self.lags is not None:
spec_group['lags'] = self.lags
spec_group['lags'].attrs['units'] = 's'
spec_group['bp'] = self.bp
spec_group['bp_wts'] = self.bp_wts
if self.simparms_file is not None:
sim_group = fileobj.create_group('simparms')
sim_group['simfile'] = self.simparms_file
antelem_group = fileobj.create_group('antenna_element')
antelem_group['shape'] = self.telescope['shape']
antelem_group['size'] = self.telescope['size']
antelem_group['size'].attrs['units'] = 'm'
antelem_group['ocoords'] = self.telescope['ocoords']
antelem_group['orientation'] = self.telescope['orientation']
if self.telescope['ocoords'] != 'dircos':
antelem_group['orientation'].attrs['units'] = 'deg'
if 'groundplane' in self.telescope:
if self.telescope['groundplane'] is not None:
antelem_group['groundplane'] = self.telescope['groundplane']
if self.layout:
layout_group = fileobj.create_group('layout')
layout_group['positions'] = self.layout['positions']
layout_group['positions'].attrs['units'] = 'm'
layout_group['positions'].attrs['coords'] = self.layout['coords']
layout_group['labels'] = self.layout['labels']
layout_group['ids'] = self.layout['ids']
timing_group = fileobj.create_group('timing')
timing_group['t_obs'] = self.t_obs
timing_group['n_acc'] = self.n_acc
if self.t_acc:
timing_group['t_acc'] = self.t_acc
timing_group['timestamps'] = NP.asarray(self.timestamp)
sky_group = fileobj.create_group('skyparms')
sky_group['pointing_coords'] = self.pointing_coords
sky_group['phase_center_coords'] = self.phase_center_coords
sky_group['skycoords'] = self.skycoords
sky_group['LST'] = NP.asarray(self.lst).ravel()
sky_group['LST'].attrs['units'] = 'deg'
sky_group['pointing_center'] = self.pointing_center
sky_group['phase_center'] = self.phase_center
array_group = fileobj.create_group('array')
# label_lengths = [len(label[0]) for label in self.labels]
# maxlen = max(label_lengths)
# labels = NP.asarray(self.labels, dtype=[('A2', '|S{0:0d}'.format(maxlen)), ('A1', '|S{0:0d}'.format(maxlen))])
# if isinstance(self.labels, list):
# str_dtype = str(NP.asarray(self.labels).dtype)
# elif isinstance(self.labels, NP.ndarray):
# str_dtype = str(NP.asarray(self.labels.tolist()).dtype)
str_dtype = self.layout['labels'].dtype.str
labels = NP.asarray(self.labels, dtype=[('A2', str_dtype), ('A1', str_dtype)])
array_group['labels'] = labels
array_group['baselines'] = self.baselines
array_group['baseline_coords'] = self.baseline_coords
array_group['baselines'].attrs['coords'] = 'local-ENU'
array_group['baselines'].attrs['units'] = 'm'
array_group['projected_baselines'] = self.projected_baselines
array_group['baselines'].attrs['coords'] = 'eq-XYZ'
array_group['baselines'].attrs['units'] = 'm'
instr_group = fileobj.create_group('instrument')
instr_group['effective_area'] = self.A_eff
instr_group['effective_area'].attrs['units'] = 'm^2'
instr_group['efficiency'] = self.eff_Q
if self.Tsysinfo:
instr_group['Trx'] = NP.asarray([elem['Trx'] for elem in self.Tsysinfo], dtype=NP.float)
instr_group['Tant0'] = NP.asarray([elem['Tant']['T0'] for elem in self.Tsysinfo], dtype=NP.float)
instr_group['f0'] = NP.asarray([elem['Tant']['f0'] for elem in self.Tsysinfo], dtype=NP.float)
instr_group['spindex'] = NP.asarray([elem['Tant']['spindex'] for elem in self.Tsysinfo], dtype=NP.float)
instr_group['Trx'].attrs['units'] = 'K'
instr_group['Tant0'].attrs['units'] = 'K'
instr_group['f0'].attrs['units'] = 'Hz'
instr_group['Tnet'] = NP.asarray([elem['Tnet'] if elem['Tnet'] is not None else -999 for elem in self.Tsysinfo], dtype=NP.float)
instr_group['Tnet'].attrs['units'] = 'K'
instr_group['Tsys'] = self.Tsys
instr_group['Tsys'].attrs['units'] = 'K'
vis_group = fileobj.create_group('visibilities')
visfreq_group = vis_group.create_group('freq_spectrum')
if self.vis_rms_freq is not None:
visfreq_group['rms'] = self.vis_rms_freq
visfreq_group['rms'].attrs['units'] = 'Jy'
if self.vis_freq is not None:
visfreq_group['vis'] = self.vis_freq
visfreq_group['vis'].attrs['units'] = 'Jy'
if self.skyvis_freq is not None:
visfreq_group['skyvis'] = self.skyvis_freq
visfreq_group['skyvis'].attrs['units'] = 'Jy'
if self.vis_noise_freq is not None:
visfreq_group['noise'] = self.vis_noise_freq
visfreq_group['noise'].attrs['units'] = 'Jy'
vislags_group = vis_group.create_group('delay_spectrum')
if self.vis_lag is not None:
vislags_group['vis'] = self.vis_lag
vislags_group['vis'].attrs['units'] = 'Jy Hz'
if self.skyvis_lag is not None:
vislags_group['skyvis'] = self.skyvis_lag
vislags_group['skyvis'].attrs['units'] = 'Jy Hz'
if self.vis_noise_lag is not None:
vislags_group['noise'] = self.vis_noise_lag
vislags_group['noise'].attrs['units'] = 'Jy Hz'
if self.gradient_mode is not None:
visgradient_group = fileobj.create_group('gradients')
for gradkey in self.gradient:
visgradient_group[gradkey] = self.gradient[gradkey]
if self.gaininfo is not None:
gains_group = fileobj.create_group('gaininfo')
gains_group['gainsfile'] = outfile+'.gains.hdf5'
self.gaininfo.write_gaintable(gains_group['gainsfile'].value)
if self.blgroups is not None:
blinfo = fileobj.create_group('blgroupinfo')
blgrp = blinfo.create_group('groups')
for blkey in self.blgroups:
blgrp[str(blkey)] = self.blgroups[blkey]
revmap = blinfo.create_group('reversemap')
for blkey in self.bl_reversemap:
revmap[str(blkey)] = self.bl_reversemap[blkey]
if verbose:
print('\tInterferometer array information written successfully to file on disk:\n\t\t{0}\n'.format(filename))
if npz:
if (self.vis_freq is not None) and (self.vis_noise_freq is not None):
NP.savez_compressed(outfile+'.npz', skyvis_freq=self.skyvis_freq, vis_freq=self.vis_freq, vis_noise_freq=self.vis_noise_freq, lst=self.lst, freq=self.channels, timestamp=self.timestamp, bl=self.baselines, bl_length=self.baseline_lengths)
else:
NP.savez_compressed(outfile+'.npz', skyvis_freq=self.skyvis_freq, lst=self.lst, freq=self.channels, timestamp=self.timestamp, bl=self.baselines, bl_length=self.baseline_lengths)
if verbose:
print('\tInterferometer array information written successfully to NPZ file on disk:\n\t\t{0}\n'.format(outfile+'.npz'))
if uvfits_parms is not None:
self.write_uvfits(outfile, uvfits_parms=uvfits_parms, overwrite=overwrite, verbose=verbose)
#############################################################################
def pyuvdata_write(self, outfile, formats=None, uvfits_parms=None,
datapool=None, overwrite=False, verbose=True):
"""
-------------------------------------------------------------------------
Saves the interferometer array information to disk in various formats
through pyuvdata module
Inputs:
outfile [string] Filename with full path to be saved to. Three
UVFITS files will also be created whose names will be
outfile+'-noiseless', outfile+'-noisy' and
'outfile+'-noise' appended with '.uvfits'
Keyword Input(s):
formats [list] List of formats for the data to be written in.
Accepted values include 'uvfits', and 'uvh5'. If 'uvfits'
is included in this list, then uvfits_parms must be
provided.
uvfits_parms
[dictionary] specifies basic parameters required for
saving in UVFITS format. This will be used only if the
keyword input formats includes 'uvfits'. If set to None
(default), the data will not be saved in UVFITS format.
To save in UVFITS format, the following keys and
values are required:
'ref_point' [dictionary] Contains information about the
reference position to which projected
baselines and rotated visibilities are to
be computed. Default=None (no additional
phasing will be performed). It must be
contain the following keys with the
following values:
'coords' [string] Refers to the
coordinate system in which value
in key 'location' is specified
in. Accepted values are 'radec',
'hadec', 'altaz' and 'dircos'
'location' [numpy array] Must be a Mx2 (if
value in key 'coords' is set to
'radec', 'hadec', 'altaz' or
'dircos') or Mx3 (if value in
key 'coords' is set to
'dircos'). M can be 1 or equal
to number of timestamps. If M=1,
the same reference point in the
same coordinate system will be
repeated for all tiemstamps. If
value under key 'coords' is set
to 'radec', 'hadec' or 'altaz',
the value under this key
'location' must be in units of
degrees.
'method' [string] specifies method to be used in
saving in UVFITS format. Accepted values are
'uvdata', 'uvfits' or None (default). If set
to 'uvdata', the UVFITS writer in uvdata
module is used. If set to 'uvfits', the
in-house UVFITS writer is used. If set to
None, first uvdata module will be attempted
but if it fails then the in-house UVFITS
writer will be tried.
'datapool' [NoneType or list] Indicates which portion of the data
is to be written to the external file. If set to None
(default), all of skyvis_freq, vis_freq, and
vis_noise_freq attributes will be written. Otherwise,
accepted values are a list of strings that can include
'noiseless' (skyvis_freq attribute), 'noisy' (vis_freq
attribute), and 'noise' (vis_nosie_freq attribute).
overwrite [boolean] True indicates overwrite even if a file already
exists. Default = False (does not overwrite). Beware this
may not work reliably if uvfits_method is set to None or
'uvdata' and hence always better to make sure the output
file does not exist already
verbose [boolean] If True (default), prints diagnostic and progress
messages. If False, suppress printing such messages.
-------------------------------------------------------------------------
"""
if datapool is None:
datapool = ['noiseless', 'noisy', 'noise']
if not isinstance(datapool, list):
raise TypeError('Keyword input datapool must be a list')
else:
datapool_list = [dpool.lower() for dpool in datapool if (isinstance(dpool, str) and dpool.lower() in ['noiseless', 'noise', 'noisy'])]
if len(datapool_list) == 0:
raise ValueError('No valid datapool string found in input datapool')
datapool = datapool_list
for format in formats:
if format.lower() == 'uvh5':
dataobj = InterferometerData(self, ref_point=None, datakeys=datapool)
uvfits_method = None
if format.lower() == 'uvfits':
if uvfits_parms is not None:
if not isinstance(uvfits_parms, dict):
raise TypeError('Input uvfits_parms must be a dictionary')
if 'ref_point' not in uvfits_parms:
uvfits_parms['ref_point'] = None
if 'method' not in uvfits_parms:
uvfits_parms['method'] = None
else:
uvfits_parms = {'ref_point': None, 'method': None}
uvfits_method = uvfits_parms['method']
dataobj = InterferometerData(self, ref_point=uvfits_parms['ref_point'], datakeys=datapool)
filextn = format.lower()
for datakey in dataobj.infodict['data_array']:
if dataobj.infodict['data_array'][datakey] is not None:
dataobj.write(outfile+'-{0}.{1}'.format(datakey, filextn), datatype=datakey, fmt=format.upper(), uvfits_method=uvfits_method, overwrite=overwrite)
#################################################################################
class ApertureSynthesis(object):
"""
----------------------------------------------------------------------------
Class to manage aperture synthesis of visibility measurements of a
multi-element interferometer array.
Attributes:
ia [instance of class InterferometerArray] Instance of class
InterferometerArray created at the time of instantiating an
object of class ApertureSynthesis
baselines: [M x 3 Numpy array] The baseline vectors associated with the
M interferometers in SI units. The coordinate system of these
vectors is local East, North, Up system
blxyz [M x 3 Numpy array] The baseline vectors associated with the
M interferometers in SI units. The coordinate system of these
vectors is X, Y, Z in equatorial coordinates
uvw_lambda [M x 3 x Nt numpy array] Baseline vectors phased to the phase
center of each accummulation. M is the number of baselines, Nt
is the number of accumulations and 3 denotes U, V and W
components. This is in units of physical distance (usually in m)
uvw [M x 3 x Nch x Nt numpy array] Baseline vectors phased to the
phase center of each accummulation at each frequency. M is the
number of baselines, Nt is the number of accumulations, Nch is
the number of frequency channels, and 3 denotes U, V and W
components. This is uvw_lambda / wavelength and in units of
number of wavelengths
blc [numpy array] 3-element numpy array specifying bottom left
corner of the grid coincident with bottom left interferometer
location in UVW coordinate system (same units as uvw)
trc [numpy array] 3-element numpy array specifying top right
corner of the grid coincident with top right interferometer
location in UVW coordinate system (same units as uvw)
grid_blc [numpy array] 3-element numpy array specifying bottom left
corner of the grid in UVW coordinate system including any
padding used (same units as uvw)
grid_trc [numpy array] 2-element numpy array specifying top right
corner of the grid in UVW coordinate system including any
padding used (same units as uvw)
gridu [numpy array] 3-dimensional numpy meshgrid array specifying
grid u-locations in units of uvw in the UVW coordinate system
whose corners are specified by attributes grid_blc and grid_trc
gridv [numpy array] 3-dimensional numpy meshgrid array specifying
grid v-locations in units of uvw in the UVW coordinate system
whose corners are specified by attributes grid_blc and grid_trc
gridw [numpy array] 3-dimensional numpy meshgrid array specifying
grid w-locations in units of uvw in the UVW coordinate system
whose corners are specified by attributes grid_blc and grid_trc
grid_ready [boolean] set to True if the gridding has been performed,
False if grid is not available yet. Set to False in case
blc, trc, grid_blc or grid_trc is updated indicating gridding
is to be perfomed again
f [numpy vector] frequency channels in Hz
df [scalar] Frequency resolution (in Hz)
latitude [Scalar] Latitude of the interferometer's location. Default
is 34.0790 degrees North corresponding to that of the VLA.
lst [list] List of LST (in degrees) for each timestamp
n_acc [scalar] Number of accumulations
pointing_center
[2-column numpy array] Pointing center (latitude and
longitude) of the observation at a given timestamp. This is
where the telescopes will be phased up to as reference.
Coordinate system for the pointing_center is specified by another
attribute pointing_coords.
phase_center
[2-column numpy array] Phase center (latitude and
longitude) of the observation at a given timestamp. This is
where the telescopes will be phased up to as reference.
Coordinate system for the phase_center is specified by another
attribute phase_center_coords.
pointing_coords
[string] Coordinate system for telescope pointing. Accepted
values are 'radec' (RA-Dec), 'hadec' (HA-Dec) or 'altaz'
(Altitude-Azimuth). Default = 'hadec'.
phase_center_coords
[string] Coordinate system for array phase center. Accepted
values are 'radec' (RA-Dec), 'hadec' (HA-Dec) or 'altaz'
(Altitude-Azimuth). Default = 'hadec'.
timestamp [list] List of timestamps during the observation
Member functions:
__init__() Initialize an instance of class ApertureSynthesis which
manages information on a aperture synthesis with an
interferometer array.
genUVW() Generate U, V, W (in units of number of wavelengths) by
phasing the baseline vectors to the phase centers of each
pointing at all frequencies
reorderUVW() Reorder U, V, W (in units of number of wavelengths) of shape
nbl x 3 x nchan x n_acc to 3 x (nbl x nchan x n_acc)
setUVWgrid() Set up U, V, W grid (in units of number of wavelengths)
based on the synthesized U, V, W
----------------------------------------------------------------------------
"""
def __init__(self, interferometer_array=None):
"""
------------------------------------------------------------------------
Intialize the ApertureSynthesis class which manages information on a
aperture synthesis with an interferometer array.
Class attributes initialized are:
ia, f, df, lst, timestamp, baselines, blxyz, phase_center, n_acc,
phase_center_coords, pointing_center, pointing_coords, latitude, blc,
trc, grid_blc, grid_trc, grid_ready, uvw, uvw_lambda, gridu, gridv,
gridw
Read docstring of class ApertureSynthesis for details on these
attributes.
Keyword input(s):
interferometer_array
[instance of class InterferometerArray] Instance of class
InterferometerArray used to initialize an instance of
class ApertureSynthesis
------------------------------------------------------------------------
"""
if interferometer_array is not None:
if isinstance(interferometer_array, InterferometerArray):
self.ia = interferometer_array
else:
raise TypeError('Input interferometer_array must be an instance of class InterferoemterArray')
else:
raise NameError('No input interferometer_array provided')
self.f = self.ia.channels
self.df = interferometer_array.freq_resolution
self.n_acc = interferometer_array.n_acc
self.lst = interferometer_array.lst
self.phase_center = interferometer_array.phase_center
self.pointing_center = interferometer_array.pointing_center
self.phase_center_coords = interferometer_array.phase_center_coords
self.pointing_coords = interferometer_array.pointing_coords
self.baselines = interferometer_array.baselines
self.timestamp = interferometer_array.timestamp
self.latitude = interferometer_array.latitude
self.blxyz = GEOM.enu2xyz(self.baselines, self.latitude, units='degrees')
self.uvw_lambda = None
self.uvw = None
self.blc = NP.zeros(2)
self.trc = NP.zeros(2)
self.grid_blc = NP.zeros(2)
self.grid_trc = NP.zeros(2)
self.gridu, self.gridv, self.gridw = None, None, None
self.grid_ready = False
#############################################################################
def genUVW(self):
"""
------------------------------------------------------------------------
Generate U, V, W (in units of number of wavelengths) by phasing the
baseline vectors to the phase centers of each pointing at all
frequencies
------------------------------------------------------------------------
"""
if self.phase_center_coords == 'hadec':
pc_hadec = self.phase_center
elif self.phase_center_coords == 'radec':
pc_hadec = NP.hstack((NP.asarray(self.lst).reshape(-1,1), NP.zeros(len(self.lst)).reshape(-1,1)))
elif self.phase_center_coords == 'altaz':
pc_altaz = self.phase_center
pc_hadec = GEOM.altaz2hadec(pc_altaz, self.latitude, units='degrees')
else:
raise ValueError('Attribute phase_center_coords must be set to one of "hadec", "radec" or "altaz"')
pc_hadec = NP.radians(pc_hadec)
ha = pc_hadec[:,0]
dec = pc_hadec[:,1]
rotmat = NP.asarray([[NP.sin(ha), NP.cos(ha), NP.zeros_like(ha)],
[-NP.sin(dec)*NP.cos(ha), NP.sin(dec)*NP.sin(ha), NP.cos(dec)],
[NP.cos(dec)*NP.cos(ha), -NP.cos(dec)*NP.sin(ha), NP.sin(dec)]])
self.uvw_lambda = NP.tensordot(self.blxyz, rotmat, axes=[1,1])
wl = FCNST.c / self.f
self.uvw = self.uvw_lambda[:,:,NP.newaxis,:] / wl.reshape(1,1,-1,1)
#############################################################################
def reorderUVW(self):
"""
------------------------------------------------------------------------
Reorder U, V, W (in units of number of wavelengths) of shape
nbl x 3 x nchan x n_acc to 3 x (nbl x nchan x n_acc)
------------------------------------------------------------------------
"""
reorderedUVW = NP.swapaxes(self.uvw, 0, 1) # now 3 x Nbl x nchan x n_acc
reorderedUVW = reorderedUVW.reshape(3,-1) # now 3 x (Nbl x nchan x n_acc)
return reorderedUVW
#############################################################################
def setUVWgrid(self, spacing=0.5, pad=None, pow2=True):
"""
------------------------------------------------------------------------
Routine to produce a grid based on the UVW spacings of the
interferometer array
Inputs:
spacing [Scalar] Positive value indicating the upper limit on grid
spacing in uvw-coordinates desirable at the lowest
wavelength (max frequency). Default = 0.5
pad [List] Padding to be applied around the locations
before forming a grid. List elements should be positive. If
it is a one-element list, the element is applicable to all
x, y and z axes. If list contains four or more elements,
only the first three elements are considered one for each
axis. Default = None (no padding).
pow2 [Boolean] If set to True, the grid is forced to have a size
a next power of 2 relative to the actual size required. If
False, gridding is done with the appropriate size as
determined by spacing. Default = True.
------------------------------------------------------------------------
"""
if self.uvw is None:
self.genUVW()
uvw = self.reorderUVW()
blc = NP.amin(uvw, axis=1)
trc = NP.amax(uvw, axis=1)
self.trc = NP.amax(NP.abs(NP.vstack((blc, trc))), axis=0)
self.blc = -1 * self.trc
self.gridu, self.gridv, self.gridw = GRD.grid_3d([(self.blc[0], self.trc[0]), (self.blc[1], self.trc[1]), (self.blc[2], self.trc[2])], pad=pad, spacing=spacing, pow2=True)
self.grid_blc = NP.asarray([self.gridu.min(), self.gridv.min(), self.gridw.min()])
self.grid_trc = NP.asarray([self.gridu.max(), self.gridv.max(), self.gridw.max()])
self.grid_ready = True
################################################################################
class InterferometerData(object):
"""
----------------------------------------------------------------------------
Class to act as an interface between PRISim object and external data
formats.
Attributes:
infodict [dictionary] Dictionary consisting of many attributes loaded
from the PRISim object. This will be used to convert to info
required in external data formats
Member functions:
__init__() Initialize an instance of class InterferometerData
createUVData()
Create an instance of class UVData
write() Write an instance of class InterferometerData into specified
formats. Currently writes in UVFITS format
----------------------------------------------------------------------------
"""
def __init__(self, prisim_object, ref_point=None, datakeys=None):
"""
------------------------------------------------------------------------
Initialize an instance of class InterferometerData.
Class attributes initialized are:
infodict
Inputs:
prisim_object
[instance of class InterferometerArray] Instance of
class InterferometerArray used to initialize an
instance of class InterferometerData.
ref_point [dictionary] Contains information about the reference
position to which projected baselines and rotated
visibilities are to be computed. Default=None (no additional
phasing will be performed). It must be contain the following
keys with the following values:
'coords' [string] Refers to the coordinate system in
which value in key 'location' is specified in.
Accepted values are 'radec', 'hadec', 'altaz'
and 'dircos'
'location' [numpy array] Must be a Mx2 (if value in key
'coords' is set to 'radec', 'hadec', 'altaz' or
'dircos') or Mx3 (if value in key 'coords' is
set to 'dircos'). M can be 1 or equal to number
of timestamps. If M=1, the same reference point
in the same coordinate system will be repeated
for all tiemstamps. If value under key 'coords'
is set to 'radec', 'hadec' or 'altaz', the
value under this key 'location' must be in
units of degrees.
datakeys [NoneType or list] Indicates which portion of the data
is to be written to the UVFITS file. If set to None
(default), all of skyvis_freq, vis_freq, and
vis_noise_freq attributes will be written. Otherwise,
accepted values are a list of strings that can include
'noiseless' (skyvis_freq attribute), 'noisy' (vis_freq
attribute), and 'noise' (vis_nosie_freq attribute).
------------------------------------------------------------------------
"""
try:
prisim_object
except NameError:
raise NameError('Input prisim_object not specified')
if ref_point is not None:
prisim_object.rotate_visibilities(ref_point)
if not isinstance(prisim_object, InterferometerArray):
raise TypeError('Inout prisim_object must be an instance of class InterferometerArray')
if datakeys is None:
datakeys = ['noiseless', 'noisy', 'noise']
if not isinstance(datakeys, list):
raise TypeError('Input datakeys must be a list')
else:
datapool_list = [dpool.lower() for dpool in datakeys if (isinstance(dpool, str) and dpool.lower() in ['noiseless', 'noise', 'noisy'])]
if len(datapool_list) == 0:
raise ValueError('No valid datapool string found in input uvfits_parms')
datakeys = datapool_list
# datatypes = ['noiseless', 'noisy', 'noise']
visibilities = {key: None for key in datakeys}
for key in visibilities:
# Conjugate visibilities for compatibility with UVFITS and CASA imager
if key == 'noiseless':
visibilities[key] = prisim_object.skyvis_freq.conj()
if key == 'noisy':
if prisim_object.vis_freq is not None:
visibilities[key] = prisim_object.vis_freq.conj()
if key == 'noise':
if prisim_object.vis_noise_freq is not None:
visibilities[key] = prisim_object.vis_noise_freq.conj()
self.infodict = {}
self.infodict['Ntimes'] = prisim_object.n_acc
self.infodict['Nbls'] = prisim_object.baselines.shape[0]
self.infodict['Nblts'] = self.infodict['Nbls'] * self.infodict['Ntimes']
self.infodict['Nfreqs'] = prisim_object.channels.size
self.infodict['Npols'] = 1
self.infodict['Nspws'] = 1
self.infodict['data_array'] = {'noiseless': None, 'noisy': None, 'noise': None}
for key in visibilities:
if visibilities[key] is not None:
self.infodict['data_array'][key] = NP.transpose(NP.transpose(visibilities[key], (2,0,1)).reshape(self.infodict['Nblts'], self.infodict['Nfreqs'], self.infodict['Nspws'], self.infodict['Npols']), (0,2,1,3)) # (Nbls, Nfreqs, Ntimes) -> (Ntimes, Nbls, Nfreqs) -> (Nblts, Nfreqs, Nspws=1, Npols=1) -> (Nblts, Nspws=1, Nfreqs, Npols=1)
self.infodict['vis_units'] = 'Jy'
self.infodict['nsample_array'] = NP.ones((self.infodict['Nblts'], self.infodict['Nspws'], self.infodict['Nfreqs'], self.infodict['Npols']))
self.infodict['flag_array'] = NP.zeros((self.infodict['Nblts'], self.infodict['Nspws'], self.infodict['Nfreqs'], self.infodict['Npols']), dtype=NP.bool)
self.infodict['spw_array'] = NP.arange(self.infodict['Nspws'])
self.infodict['uvw_array'] = NP.transpose(prisim_object.projected_baselines, (2,0,1)).reshape(self.infodict['Nblts'], 3)
time_array = NP.asarray(prisim_object.timestamp).reshape(-1,1) + NP.zeros(self.infodict['Nbls']).reshape(1,-1)
self.infodict['time_array'] = time_array.ravel()
lst_array = NP.radians(NP.asarray(prisim_object.lst).reshape(-1,1)) + NP.zeros(self.infodict['Nbls']).reshape(1,-1)
self.infodict['lst_array'] = lst_array.ravel()
labels_A1 = prisim_object.labels['A1']
labels_A2 = prisim_object.labels['A2']
if prisim_object.layout:
id_A1 = [prisim_object.layout['ids'][prisim_object.layout['labels'].tolist().index(albl)] for albl in labels_A1]
id_A2 = [prisim_object.layout['ids'][prisim_object.layout['labels'].tolist().index(albl)] for albl in labels_A2]
id_A1 = NP.asarray(id_A1, dtype=int)
id_A2 = NP.asarray(id_A2, dtype=int)
else:
try:
id_A1 = prisim_object.labels['A1'].astype(NP.int)
id_A2 = prisim_object.labels['A2'].astype(NP.int)
except ValueError:
raise ValueError('Could not convert antenna labels to numbers')
ant_1_array = id_A1
ant_2_array = id_A2
ant_1_array = ant_1_array.reshape(1,-1) + NP.zeros(self.infodict['Ntimes'], dtype=NP.int).reshape(-1,1)
ant_2_array = ant_2_array.reshape(1,-1) + NP.zeros(self.infodict['Ntimes'], dtype=NP.int).reshape(-1,1)
self.infodict['ant_1_array'] = ant_1_array.ravel()
self.infodict['ant_2_array'] = ant_2_array.ravel()
self.infodict['baseline_array'] = 2048 * (self.infodict['ant_2_array'] + 1) + (self.infodict['ant_1_array'] + 1) + 2**16
self.infodict['freq_array'] = prisim_object.channels.reshape(self.infodict['Nspws'],-1)
self.infodict['polarization_array'] = NP.asarray([-5]).reshape(self.infodict['Npols']) # stokes 1:4 (I,Q,U,V); circular -1:-4 (RR,LL,RL,LR); linear -5:-8 (XX,YY,XY,YX)
if uvdata_module_found:
if LooseVersion(pyuvdata.__version__)>=LooseVersion('1.3.2'):
self.infodict['integration_time'] = prisim_object.t_acc[0] + NP.zeros(self.infodict['Nblts']) # Replicate to be of shape (Nblts,) to be Baseline-Dependent-Averaging compliant with pyuvdata >= v1.3.2
else:
self.infodict['integration_time'] = prisim_object.t_acc[0]
else:
self.infodict['integration_time'] = prisim_object.t_acc[0] + NP.zeros(self.infodict['Nblts'])
self.infodict['channel_width'] = prisim_object.freq_resolution
# ----- Observation information ------
pointing_center = prisim_object.pointing_center
pointing_coords = prisim_object.pointing_coords
if pointing_coords == 'dircos':
pointing_center_dircos = pointing_center
pointing_center_altaz = GEOM.dircos2altaz(pointing_center_dircos, units='degrees')
pointing_center_hadec = GEOM.altaz2hadec(pointing_center_altaz, prisim_object.latitude, units='degrees')
pointing_center_ra = NP.asarray(prisim_object.lst) - pointing_center_hadec[:,0]
pointing_center_radec = NP.hstack((pointing_center_ra.reshape(-1,1), pointing_center_hadec[:,1].reshape(-1,1)))
pointing_coords = 'radec'
elif pointing_coords == 'altaz':
pointing_center_altaz = pointing_center
pointing_center_hadec = GEOM.altaz2hadec(pointing_center_altaz, prisim_object.latitude, units='degrees')
pointing_center_ra = NP.asarray(prisim_object.lst) - pointing_center_hadec[:,0]
pointing_center_radec = NP.hstack((pointing_center_ra.reshape(-1,1), pointing_center_hadec[:,1].reshape(-1,1)))
pointing_coords = 'radec'
elif pointing_coords == 'hadec':
pointing_center_hadec = pointing_center
pointing_center_ra = NP.asarray(prisim_object.lst) - pointing_center_hadec[:,0]
pointing_center_radec = NP.hstack((pointing_center_ra.reshape(-1,1), pointing_center_hadec[:,1].reshape(-1,1)))
pointing_coords = 'radec'
elif pointing_coords == 'radec':
pointing_center_radec = pointing_center
else:
raise ValueError('Invalid pointing center coordinates')
phase_center = prisim_object.phase_center
phase_center_coords = prisim_object.phase_center_coords
if phase_center_coords == 'dircos':
phase_center_dircos = phase_center
phase_center_altaz = GEOM.dircos2altaz(phase_center_dircos, units='degrees')
phase_center_hadec = GEOM.altaz2hadec(phase_center_altaz, prisim_object.latitude, units='degrees')
phase_center_ra = NP.asarray(prisim_object.lst) - phase_center_hadec[:,0]
phase_center_radec = NP.hstack((phase_center_ra.reshape(-1,1), phase_center_hadec[:,1].reshape(-1,1)))
phase_center_coords = 'radec'
elif phase_center_coords == 'altaz':
phase_center_altaz = phase_center
phase_center_hadec = GEOM.altaz2hadec(phase_center_altaz, prisim_object.latitude, units='degrees')
phase_center_ra = NP.asarray(prisim_object.lst) - phase_center_hadec[:,0]
phase_center_radec = NP.hstack((phase_center_ra.reshape(-1,1), phase_center_hadec[:,1].reshape(-1,1)))
phase_center_coords = 'radec'
elif phase_center_coords == 'hadec':
phase_center_hadec = phase_center
phase_center_ra = NP.asarray(prisim_object.lst) - phase_center_hadec[:,0]
phase_center_radec = NP.hstack((phase_center_ra.reshape(-1,1), phase_center_hadec[:,1].reshape(-1,1)))
phase_center_coords = 'radec'
elif phase_center_coords == 'radec':
phase_center_radec = phase_center
else:
raise ValueError('Invalid phase center coordinates')
pointing_centers = SkyCoord(ra=pointing_center_radec[:,0], dec=pointing_center_radec[:,1], frame='icrs', unit='deg')
phase_centers = SkyCoord(ra=phase_center_radec[:,0], dec=phase_center_radec[:,1], frame='icrs', unit='deg')
pointing_center_obscenter = pointing_centers[int(prisim_object.n_acc/2)]
phase_center_obscenter = phase_centers[int(prisim_object.n_acc/2)]
self.infodict['object_name'] = 'J{0}{1}'.format(pointing_center_obscenter.ra.to_string(sep='', precision=2, pad=True), pointing_center_obscenter.dec.to_string(sep='', precision=2, alwayssign=True, pad=True))
if 'id' not in prisim_object.telescope:
self.infodict['telescope_name'] = 'custom'
else:
self.infodict['telescope_name'] = prisim_object.telescope['id']
self.infodict['instrument'] = self.infodict['telescope_name']
self.infodict['telescope_location'] = NP.asarray([prisim_object.latitude, prisim_object.longitude, prisim_object.altitude])
self.infodict['history'] = 'PRISim'
self.infodict['phase_center_epoch'] = 2000.0
is_phased = NP.allclose(phase_centers.ra.value, phase_centers.ra.value[::-1]) and NP.allclose(phase_centers.dec.value, phase_centers.dec.value[::-1])
self.infodict['is_phased'] = is_phased
# ----- antenna information ------
self.infodict['Nants_data'] = len(set(prisim_object.labels['A1']) | set(prisim_object.labels['A2']))
if prisim_object.layout:
# self.infodict['Nants_telescope'] = len(set(prisim_object.labels['A1']) | set(prisim_object.labels['A2']))
self.infodict['Nants_telescope'] = prisim_object.layout['ids'].size
else:
self.infodict['Nants_telescope'] = self.infodict['Nants_data']
if prisim_object.layout:
self.infodict['antenna_names'] = prisim_object.layout['labels']
self.infodict['antenna_numbers'] = prisim_object.layout['ids']
else:
self.infodict['antenna_names'] = NP.asarray(list(set(prisim_object.labels['A1']) | set(prisim_object.labels['A2'])))
try:
self.infodict['antenna_numbers'] = NP.asarray(list(set(prisim_object.labels['A1']) | set(prisim_object.labels['A2']))).astype(NP.int)
except ValueError:
raise ValueError('Count not convert antenna labels to numbers')
# ----- Optional information ------
self.infodict['dateobs'] = Time(prisim_object.timestamp[0], format='jd', scale='utc').iso
self.infodict['phase_center_ra'] = NP.radians(phase_center_obscenter.ra.value)
self.infodict['phase_center_dec'] = NP.radians(phase_center_obscenter.dec.value)
self.infodict['antenna_positions'] = NP.zeros((self.infodict['Nants_telescope'],3), dtype=NP.float)
if hasattr(prisim_object, 'layout'):
if prisim_object.layout:
if not isinstance(prisim_object.layout['positions'], NP.ndarray):
warnings.warn('Antenna positions must be a numpy array. Proceeding with default values.')
else:
if prisim_object.layout['positions'].shape != (self.infodict['Nants_telescope'],3):
warnings.warn('Number of antennas in prisim_object found to be incompatible with number of unique antennas found. Proceeding with default values.')
else:
x, y, z = GEOM.lla2ecef(*self.infodict['telescope_location'], units='degrees')
telscp_loc = NP.asarray([x[0], y[0], z[0]])
self.infodict['antenna_positions'] = GEOM.enu2ecef(prisim_object.layout['positions'], {'lat': prisim_object.latitude, 'lon': prisim_object.longitude, 'alt': prisim_object.altitude, 'units': 'degrees'}) - telscp_loc.reshape(1,-1)
# self.infodict['antenna_positions'] = UVUtils.ECEF_from_ENU(prisim_object.layout['positions'], NP.radians(prisim_object.latitude), NP.radians(prisim_object.longitude), prisim_object.altitude) - telscp_loc.reshape(1,-1)
self.infodict['gst0'] = 0.0
self.infodict['rdate'] = ''
self.infodict['earth_omega'] = 360.985
self.infodict['dut1'] = 0.0
self.infodict['timesys'] = 'UTC'
#############################################################################
def createUVData(self, datatype='noiseless'):
"""
------------------------------------------------------------------------
Create an instance of class UVData.
Inputs:
datatype [string] Specifies which visibilities are to be used in
creating the UVData object. Accepted values are 'noiseless'
(default) for noiseless pure-sky visibilities, 'noisy' for
sky visibilities to which noise has been added, or 'noise'
for pure noise visibilities.
Outputs:
dataobj [instance of class UVData] an instance of class UVData
containing visibilities of type specified in datatype. This
object can be used to write to some common external formats
such as UVFITS, etc.
------------------------------------------------------------------------
"""
if not uvdata_module_found:
raise ImportError('uvdata module not found')
if datatype not in ['noiseless', 'noisy', 'noise']:
raise ValueError('Invalid input datatype specified')
attributes_of_uvdata = ['Ntimes', 'Nbls', 'Nblts', 'Nfreqs', 'Npols', 'Nspws', 'data_array', 'vis_units', 'nsample_array', 'flag_array', 'spw_array', 'uvw_array', 'time_array', 'lst_array', 'ant_1_array', 'ant_2_array', 'baseline_array', 'freq_array', 'polarization_array', 'integration_time', 'channel_width', 'object_name', 'telescope_name', 'instrument', 'telescope_location', 'history', 'phase_center_epoch', 'is_phased', 'phase_type', 'Nants_data', 'Nants_telescope', 'antenna_names', 'antenna_numbers', 'dateobs', 'phase_center_ra', 'phase_center_dec', 'antenna_positions']
dataobj = UVData()
for attrkey in attributes_of_uvdata:
if attrkey == 'telescope_location':
x, y, z = GEOM.lla2ecef(*self.infodict[attrkey], units='degrees')
setattr(dataobj, attrkey, NP.asarray([x[0],y[0],z[0]]))
elif attrkey == 'phase_type':
if self.infodict['is_phased']:
setattr(dataobj, attrkey, 'phased')
else:
setattr(dataobj, attrkey, 'drift')
elif attrkey != 'data_array':
setattr(dataobj, attrkey, self.infodict[attrkey])
else:
if datatype in self.infodict[attrkey]:
if self.infodict[attrkey][datatype] is not None:
setattr(dataobj, attrkey, self.infodict[attrkey][datatype])
else:
raise KeyError('Data of specified datatype not found in InterferometerData object')
else:
raise KeyError('Specified datatype not found in InterferometerData object')
return dataobj
#############################################################################
def _blnum_to_antnums(self, blnum):
if self.infodict['Nants_telescope'] > 2048:
raise StandardError('error Nants={Nants}>2048 not supported'.format(Nants=self.infodict['Nants_telescope']))
if NP.min(blnum) > 2**16:
i = (blnum - 2**16) % 2048 - 1
j = (blnum - 2**16 - (i + 1)) / 2048 - 1
else:
i = (blnum) % 256 - 1
j = (blnum - (i + 1)) / 256 - 1
return NP.int32(i), NP.int32(j)
#############################################################################
def _antnums_to_blnum(self, i, j, attempt256=False):
# set the attempt256 keyword to True to (try to) use the older
# 256 standard used in many uvfits files
# (will use 2048 standard if there are more than 256 antennas)
i, j = NP.int64((i, j))
if self.infodict['Nants_telescope'] > 2048:
raise StandardError('cannot convert i,j to a baseline index '
'with Nants={Nants}>2048.'
.format(Nants=self.infodict['Nants_telescope']))
if attempt256:
if (NP.max(i) < 255 and NP.max(j) < 255):
return 256 * (j + 1) + (i + 1)
else:
print('Max antnums are {} and {}'.format(NP.max(i), NP.max(j)))
message = 'antnums_to_baseline: found > 256 antennas, using ' \
'2048 baseline indexing. Beware compatibility ' \
'with CASA etc'
warnings.warn(message)
return NP.int64(2048 * (j + 1) + (i + 1) + 2**16)
#############################################################################
def write(self, outfile, datatype='noiseless', fmt='UVFITS',
uvfits_method=None, overwrite=False):
"""
------------------------------------------------------------------------
Write an instance of class InterferometerData into specified formats.
Currently writes in UVFITS format
Inputs:
outfile [string] Filename into which data will be written
datatype [string] Specifies which visibilities are to be used in
creating the UVData object. Accepted values are 'noiseless'
(default) for noiseless pure-sky visibilities, 'noisy' for
sky visibilities to which noise has been added, or 'noise'
for pure noise visibilities.
fmt [string] Output file format. Currently accepted values are
'UVFITS' and 'UVH5'. Default='UVFITS'
uvfits_method
[string] Method using which UVFITS output is produced. It
is only used if fmt is set to 'UVFITS'. Accepted values
are 'uvdata', 'uvfits' or None (default). If set to
'uvdata', the UVFITS writer in uvdata module is used. If
set to 'uvfits', the in-house UVFITS writer is used. If
set to None, first uvdata module will be attempted but if
it fails then the in-house UVFITS writer will be tried.
overwrite [boolean] True indicates overwrite even if a file already
exists. Default = False (does not overwrite). Beware this
may not work reliably if uvfits_method is set to None or
'uvdata' and hence always better to make sure the output
file does not exist already
------------------------------------------------------------------------
"""
try:
outfile
except NameError:
raise NameError('Output filename not specified')
if not isinstance(outfile, str):
raise TypeError('Output filename must be a string')
if datatype not in ['noiseless', 'noisy', 'noise']:
raise ValueError('Invalid input datatype specified')
if fmt.lower() not in ['uvfits', 'uvh5']:
raise ValueError('Output format not supported')
uvdataobj = self.createUVData(datatype=datatype)
if fmt.lower() == 'uvh5':
uvdataobj.write_uvh5(outfile, clobber=overwrite)
if fmt.lower() == 'uvfits':
write_successful = False
if uvfits_method not in [None, 'uvfits', 'uvdata']:
uvfits_method = None
if (uvfits_method is None) or (uvfits_method == 'uvdata'):
try:
uvdataobj.write_uvfits(outfile, spoof_nonessential=True)
except Exception as xption1:
write_successful = False
if uvfits_method == 'uvdata':
warnings.warn('Output through UVData module did not work due to the following exception:')
raise xption1
else:
warnings.warn('Output through UVData module did not work. Trying with built-in UVFITS writer')
else:
write_successful = True
print('Data successfully written using uvdata module to {0}'.format(outfile))
return
# Try with in-house UVFITS writer
try:
weights_array = self.infodict['nsample_array'] * NP.where(self.infodict['flag_array'], -1, 1)
data_array = self.infodict['data_array'][datatype][:, NP.newaxis, NP.newaxis, :, :, :, NP.newaxis]
weights_array = weights_array[:, NP.newaxis, NP.newaxis, :, :, :, NP.newaxis]
# uvfits_array_data shape will be (Nblts,1,1,[Nspws],Nfreqs,Npols,3)
uvfits_array_data = NP.concatenate([data_array.real, data_array.imag, weights_array], axis=6)
uvw_array_sec = self.infodict['uvw_array'] / FCNST.c
# jd_midnight = NP.floor(self.infodict['time_array'][0] - 0.5) + 0.5
tzero = NP.float32(self.infodict['time_array'][0])
# uvfits convention is that time_array + relevant PZERO = actual JD
# We are setting PZERO4 = float32(first time of observation)
time_array = NP.float32(self.infodict['time_array'] - NP.float64(tzero))
int_time_array = (NP.zeros_like((time_array), dtype=NP.float) + self.infodict['integration_time'])
baselines_use = self._antnums_to_blnum(self.infodict['ant_1_array'], self.infodict['ant_2_array'], attempt256=True)
# Set up dictionaries for populating hdu
# Note that uvfits antenna arrays are 1-indexed so we add 1
# to our 0-indexed arrays
group_parameter_dict = {'UU ': uvw_array_sec[:, 0],
'VV ': uvw_array_sec[:, 1],
'WW ': uvw_array_sec[:, 2],
'DATE ': time_array,
'BASELINE': baselines_use,
'ANTENNA1': self.infodict['ant_1_array'] + 1,
'ANTENNA2': self.infodict['ant_2_array'] + 1,
'SUBARRAY': NP.ones_like(self.infodict['ant_1_array']),
'INTTIM': int_time_array}
pscal_dict = {'UU ': 1.0, 'VV ': 1.0, 'WW ': 1.0,
'DATE ': 1.0, 'BASELINE': 1.0, 'ANTENNA1': 1.0,
'ANTENNA2': 1.0, 'SUBARRAY': 1.0, 'INTTIM': 1.0}
pzero_dict = {'UU ': 0.0, 'VV ': 0.0, 'WW ': 0.0,
'DATE ': tzero, 'BASELINE': 0.0, 'ANTENNA1': 0.0,
'ANTENNA2': 0.0, 'SUBARRAY': 0.0, 'INTTIM': 0.0}
# list contains arrays of [u,v,w,date,baseline];
# each array has shape (Nblts)
if (NP.max(self.infodict['ant_1_array']) < 255 and
NP.max(self.infodict['ant_2_array']) < 255):
# if the number of antennas is less than 256 then include both the
# baseline array and the antenna arrays in the group parameters.
# Otherwise just use the antenna arrays
parnames_use = ['UU ', 'VV ', 'WW ',
'DATE ', 'BASELINE', 'ANTENNA1',
'ANTENNA2', 'SUBARRAY', 'INTTIM']
else:
parnames_use = ['UU ', 'VV ', 'WW ', 'DATE ',
'ANTENNA1', 'ANTENNA2', 'SUBARRAY', 'INTTIM']
group_parameter_list = [group_parameter_dict[parname] for
parname in parnames_use]
hdu = fits.GroupData(uvfits_array_data, parnames=parnames_use,
pardata=group_parameter_list, bitpix=-32)
hdu = fits.GroupsHDU(hdu)
for i, key in enumerate(parnames_use):
hdu.header['PSCAL' + str(i + 1) + ' '] = pscal_dict[key]
hdu.header['PZERO' + str(i + 1) + ' '] = pzero_dict[key]
# ISO string of first time in self.infodict['time_array']
# hdu.header['DATE-OBS'] = Time(self.infodict['time_array'][0], scale='utc', format='jd').iso
hdu.header['DATE-OBS'] = self.infodict['dateobs']
hdu.header['CTYPE2 '] = 'COMPLEX '
hdu.header['CRVAL2 '] = 1.0
hdu.header['CRPIX2 '] = 1.0
hdu.header['CDELT2 '] = 1.0
hdu.header['CTYPE3 '] = 'STOKES '
hdu.header['CRVAL3 '] = self.infodict['polarization_array'][0]
hdu.header['CRPIX3 '] = 1.0
try:
hdu.header['CDELT3 '] = NP.diff(self.infodict['polarization_array'])[0]
except(IndexError):
hdu.header['CDELT3 '] = 1.0
hdu.header['CTYPE4 '] = 'FREQ '
hdu.header['CRVAL4 '] = self.infodict['freq_array'][0, 0]
hdu.header['CRPIX4 '] = 1.0
hdu.header['CDELT4 '] = NP.diff(self.infodict['freq_array'][0])[0]
hdu.header['CTYPE5 '] = 'IF '
hdu.header['CRVAL5 '] = 1.0
hdu.header['CRPIX5 '] = 1.0
hdu.header['CDELT5 '] = 1.0
hdu.header['CTYPE6 '] = 'RA'
hdu.header['CRVAL6 '] = NP.degrees(self.infodict['phase_center_ra'])
hdu.header['CTYPE7 '] = 'DEC'
hdu.header['CRVAL7 '] = NP.degrees(self.infodict['phase_center_dec'])
hdu.header['BUNIT '] = self.infodict['vis_units']
hdu.header['BSCALE '] = 1.0
hdu.header['BZERO '] = 0.0
hdu.header['OBJECT '] = self.infodict['object_name']
hdu.header['TELESCOP'] = self.infodict['telescope_name']
hdu.header['LAT '] = self.infodict['telescope_location'][0]
hdu.header['LON '] = self.infodict['telescope_location'][1]
hdu.header['ALT '] = self.infodict['telescope_location'][2]
hdu.header['INSTRUME'] = self.infodict['instrument']
hdu.header['EPOCH '] = float(self.infodict['phase_center_epoch'])
for line in self.infodict['history'].splitlines():
hdu.header.add_history(line)
# ADD the ANTENNA table
staxof = NP.zeros(self.infodict['Nants_telescope'])
# 0 specifies alt-az, 6 would specify a phased array
mntsta = NP.zeros(self.infodict['Nants_telescope'])
# beware, X can mean just about anything
poltya = NP.full((self.infodict['Nants_telescope']), 'X', dtype=NP.object_)
polaa = [90.0] + NP.zeros(self.infodict['Nants_telescope'])
poltyb = NP.full((self.infodict['Nants_telescope']), 'Y', dtype=NP.object_)
polab = [0.0] + NP.zeros(self.infodict['Nants_telescope'])
col1 = fits.Column(name='ANNAME', format='8A',
array=self.infodict['antenna_names'])
col2 = fits.Column(name='STABXYZ', format='3D',
array=self.infodict['antenna_positions'])
# convert to 1-indexed from 0-indexed indicies
col3 = fits.Column(name='NOSTA', format='1J',
array=self.infodict['antenna_numbers'] + 1)
col4 = fits.Column(name='MNTSTA', format='1J', array=mntsta)
col5 = fits.Column(name='STAXOF', format='1E', array=staxof)
col6 = fits.Column(name='POLTYA', format='1A', array=poltya)
col7 = fits.Column(name='POLAA', format='1E', array=polaa)
# col8 = fits.Column(name='POLCALA', format='3E', array=polcala)
col9 = fits.Column(name='POLTYB', format='1A', array=poltyb)
col10 = fits.Column(name='POLAB', format='1E', array=polab)
# col11 = fits.Column(name='POLCALB', format='3E', array=polcalb)
# note ORBPARM is technically required, but we didn't put it in
cols = fits.ColDefs([col1, col2, col3, col4, col5, col6, col7, col9, col10])
ant_hdu = fits.BinTableHDU.from_columns(cols)
ant_hdu.header['EXTNAME'] = 'AIPS AN'
ant_hdu.header['EXTVER'] = 1
# write XYZ coordinates if not already defined
ant_hdu.header['ARRAYX'] = self.infodict['telescope_location'][0]
ant_hdu.header['ARRAYY'] = self.infodict['telescope_location'][1]
ant_hdu.header['ARRAYZ'] = self.infodict['telescope_location'][2]
# ant_hdu.header['FRAME'] = 'ITRF'
ant_hdu.header['FRAME'] = None
ant_hdu.header['GSTIA0'] = self.infodict['gst0']
ant_hdu.header['FREQ'] = self.infodict['freq_array'][0, 0]
ant_hdu.header['RDATE'] = self.infodict['rdate']
ant_hdu.header['UT1UTC'] = self.infodict['dut1']
ant_hdu.header['TIMSYS'] = self.infodict['timesys']
if self.infodict['timesys'] == 'IAT':
warnings.warn('This file has an "IAT" time system. Files of '
'this type are not properly supported')
ant_hdu.header['ARRNAM'] = self.infodict['telescope_name']
ant_hdu.header['NO_IF'] = self.infodict['Nspws']
ant_hdu.header['DEGPDY'] = self.infodict['earth_omega']
# ant_hdu.header['IATUTC'] = 35.
# set mandatory parameters which are not supported by this object
# (or that we just don't understand)
ant_hdu.header['NUMORB'] = 0
# note: Bart had this set to 3. We've set it 0 after aips 117. -jph
ant_hdu.header['NOPCAL'] = 0
ant_hdu.header['POLTYPE'] = 'X-Y LIN'
# note: we do not support the concept of "frequency setups"
# -- lists of spws given in a SU table.
ant_hdu.header['FREQID'] = -1
# if there are offsets in images, this could be the culprit
ant_hdu.header['POLARX'] = 0.0
ant_hdu.header['POLARY'] = 0.0
ant_hdu.header['DATUTC'] = 0 # ONLY UTC SUPPORTED
# we always output right handed coordinates
ant_hdu.header['XYZHAND'] = 'RIGHT'
# ADD the FQ table
# skipping for now and limiting to a single spw
# write the file
hdulist = fits.HDUList(hdus=[hdu, ant_hdu])
hdulist.writeto(outfile, overwrite=overwrite)
except Exception as xption2:
print(xption2)
raise IOError('Could not write to UVFITS file')
else:
write_successful = True
print('Data successfully written using in-house uvfits writer to {0}'.format(outfile))
return
#################################################################################
| 579,057 | 57.526177 | 587 | py |
PRISim | PRISim-master/scripts/altsim_interface.py | #!python
import yaml, argparse, ast, warnings
import numpy as NP
from astropy.io import ascii
from astropy.time import Time
import prisim
prisim_path = prisim.__path__[0]+'/'
def simparms_from_pyuvsim_to_prisim(pyuvsim_parms, prisim_parms):
if not isinstance(pyuvsim_parms, dict):
raise TypeError('Input pyuvsim_parms must be a dictionary')
if not isinstance(prisim_parms, dict):
raise TypeError('Input prisim_parms must be a dictionary')
#I/O and directory structure
pyuvsim_outpath = pyuvsim_parms['filing']['outdir']
pyuvsim_outpath_hierarchy = pyuvsim_outpath.split('/')
pyuvsim_outpath_hierarchy = [item for item in pyuvsim_outpath_hierarchy if item != '']
prisim_parms['dirstruct']['rootdir'] = '/' + '/'.join(pyuvsim_outpath_hierarchy[:-1]) + '/'
prisim_parms['dirstruct']['project'] = '/'.join(pyuvsim_outpath_hierarchy[-1:])
prisim_parms['dirstruct']['simid'] = pyuvsim_parms['filing']['outfile_name']
# Telescope parameters
pyuvsim_telescope_parms = pyuvsim_parms['telescope']
with open(pyuvsim_telescope_parms['telescope_config_name'], 'r') as pyuvsim_telescope_config_file:
pyuvsim_telescope_config = yaml.safe_load(pyuvsim_telescope_config_file)
telescope_location = ast.literal_eval(pyuvsim_telescope_config['telescope_location'])
prisim_parms['telescope']['latitude'] = telescope_location[0]
prisim_parms['telescope']['longitude'] = telescope_location[1]
prisim_parms['telescope']['altitude'] = telescope_location[2]
# Array parameters
prisim_parms['array']['redundant'] = True
prisim_parms['array']['layout'] = None
prisim_parms['array']['file'] = pyuvsim_telescope_parms['array_layout']
prisim_parms['array']['filepathtype'] = 'custom'
prisim_parms['array']['parser']['data_start'] = 1
prisim_parms['array']['parser']['label'] = 'Name'
prisim_parms['array']['parser']['east'] = 'E'
prisim_parms['array']['parser']['north'] = 'N'
prisim_parms['array']['parser']['up'] = 'U'
# Antenna power pattern parameters
if pyuvsim_telescope_config['beam_paths'][0].lower() == 'uniform':
prisim_parms['antenna']['shape'] = 'delta'
if pyuvsim_telescope_config['beam_paths'][0].lower() == 'gaussian':
prisim_parms['antenna']['shape'] = 'gaussian'
prisim_parms['antenna']['size'] = pyuvsim_telescope_config['diameter']
if pyuvsim_telescope_config['beam_paths'][0].lower() == 'airy':
prisim_parms['antenna']['shape'] = 'dish'
prisim_parms['antenna']['size'] = pyuvsim_telescope_config['diameter']
if pyuvsim_telescope_config['beam_paths'][0].lower() in ['uniform', 'airy', 'gaussian']:
prisim_parms['beam']['use_external'] = False
prisim_parms['beam']['file'] = None
else:
prisim_parms['beam']['use_external'] = True
prisim_parms['beam']['file'] = pyuvsim_telescope_config['beam_paths'][0]
prisim_parms['beam']['filepathtype'] = 'custom'
prisim_parms['beam']['filefmt'] = 'UVBeam'
# Bandpass parameters
prisim_parms['bandpass']['freq_resolution'] = pyuvsim_parms['freq']['channel_width']
prisim_parms['bandpass']['nchan'] = pyuvsim_parms['freq']['Nfreqs']
if prisim_parms['bandpass']['nchan'] == 1:
warnings.warn('Single channel simulation is not supported currently in PRISim. Request at least two frequency channels.')
pyuvsim_start_freq = pyuvsim_parms['freq']['start_freq']
pyuvsim_freqs = pyuvsim_start_freq + prisim_parms['bandpass']['freq_resolution'] * NP.arange(prisim_parms['bandpass']['nchan'])
prisim_parms['bandpass']['freq'] = pyuvsim_start_freq + 0.5 * prisim_parms['bandpass']['nchan'] * prisim_parms['bandpass']['freq_resolution']
# Observing parameters
prisim_parms['obsparm']['n_acc'] = pyuvsim_parms['time']['Ntimes']
prisim_parms['obsparm']['t_acc'] = pyuvsim_parms['time']['integration_time']
prisim_parms['obsparm']['obs_mode'] = 'drift'
prisim_parms['pointing']['jd_init'] = pyuvsim_parms['time']['start_time']
prisim_parms['obsparm']['obs_date'] = Time(prisim_parms['pointing']['jd_init'], scale='utc', format='jd').iso.split(' ')[0].replace('-', '/')
prisim_parms['pointing']['lst_init'] = None
prisim_parms['pointing']['drift_init']['alt'] = 90.0
prisim_parms['pointing']['drift_init']['az'] = 270.0
prisim_parms['pointing']['drift_init']['ha'] = None
prisim_parms['pointing']['drift_init']['dec'] = None
# Sky model
prisim_parms['skyparm']['model'] = 'custom'
prisim_parms['catalog']['filepathtype'] = 'custom'
prisim_parms['catalog']['custom_file'] = pyuvsim_parms['sources']['catalog'].split('.txt')[0] + '_prisim.txt'
pyuvsim_catalog = ascii.read(pyuvsim_parms['sources']['catalog'], comment='#', header_start=0, data_start=1)
ra_colname = ''
dec_colname = ''
epoch = ''
for colname in pyuvsim_catalog.colnames:
if 'RA' in colname:
ra_colname = colname
ra_deg = pyuvsim_catalog[colname].data
epoch = ra_colname.split('_')[1].split()[0][1:]
if 'Dec' in colname:
dec_colname = colname
dec_deg = pyuvsim_catalog[colname].data
if 'Flux' in colname:
fint = pyuvsim_catalog[colname].data.astype(NP.float)
if 'Frequency' in colname:
ref_freq = pyuvsim_catalog[colname].data.astype(NP.float)
spindex = NP.zeros(fint.size, dtype=NP.float)
majax = NP.zeros(fint.size, dtype=NP.float)
minax = NP.zeros(fint.size, dtype=NP.float)
pa = NP.zeros(fint.size, dtype=NP.float)
prisim_parms['skyparm']['epoch'] = epoch
prisim_parms['skyparm']['flux_unit'] = 'Jy'
prisim_parms['skyparm']['flux_min'] = None
prisim_parms['skyparm']['flux_max'] = None
prisim_parms['skyparm']['custom_reffreq'] = float(ref_freq[0]) / 1e9
ascii.write([ra_deg, dec_deg, fint, spindex, majax, minax, pa], prisim_parms['catalog']['custom_file'], names=['RA', 'DEC', 'F_INT', 'SPINDEX', 'MAJAX', 'MINAX', 'PA'], delimiter=' ', format='fixed_width', formats={'RA': '%11.7f', 'DEC': '%12.7f', 'F_INT': '%10.4f', 'SPINDEX': '%8.5f', 'MAJAX': '%8.5f', 'MINAX': '%8.5f', 'PA': '%8.5f'}, bookend=False, overwrite=True)
# Save format parameters
prisim_parms['save_formats']['npz'] = False
prisim_parms['save_formats']['uvfits'] = False
prisim_parms['save_formats']['uvh5'] = True
return prisim_parms
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Program to convert simulation parameter configurations from one simulator to another')
## Parse input arguments
io_group = parser.add_argument_group('Input/Output parameters', 'Input/output specifications')
io_group.add_argument('-i', '--infile', dest='infile', default=None, type=str, required=False, help='Full path to file specifying input parameters')
io_group.add_argument('-o', '--outfile', dest='outfile', default=None, type=str, required=True, help='Full path to file specifying output parameters')
io_group.add_argument('--from', dest='from', default=None, type=str, required=True, help='String specifying origin simulation configuration. Accepts "prisim", "pyuvsim"')
io_group.add_argument('--to', dest='to', default=None, type=str, required=True, help='String specifying destination simulation configuration. Accepts "prisim", "pyuvsim"')
args = vars(parser.parse_args())
if args['from'].lower() not in ['prisim', 'pyuvsim']:
raise ValueError('Originating simulation must be set to "prisim" or "pyuvsim"')
if args['to'].lower() not in ['prisim', 'pyuvsim']:
raise ValueError('Destination simulation must be set to "prisim" or "pyuvsim"')
if args['from'].lower() == args['to'].lower():
raise ValueError('Origin and destination simulation types must not be equal')
if args['to'].lower() == 'prisim':
prisim_template_file = prisim_path+'examples/simparms/defaultparms.yaml'
with open(prisim_template_file, 'r') as prisim_parms_file:
prisim_parms = yaml.safe_load(prisim_parms_file)
with open(args['infile'], 'r') as pyuvsim_parms_file:
pyuvsim_parms = yaml.safe_load(pyuvsim_parms_file)
outparms = simparms_from_pyuvsim_to_prisim(pyuvsim_parms, prisim_parms)
elif args['from'].lower() == 'prisim':
with open(args['infile'], 'r') as prisim_parms_file:
prisim_parms = yaml.safe_load(prisim_template_file)
outparms = simparms_from_pyuvsim_to_prisim(prisim_parms)
with open(args['outfile'], 'w') as outfile:
yaml.dump(outparms, outfile, default_flow_style=False)
| 8,667 | 49.988235 | 376 | py |
PRISim | PRISim-master/scripts/run_prisim.py | #!python
import os, shutil, subprocess, pwd, errno, warnings
from mpi4py import MPI
import yaml
import h5py
import argparse
import copy
import numpy as NP
from astropy.io import fits, ascii
from astropy.coordinates import Galactic, FK5, ICRS, SkyCoord, AltAz, EarthLocation
from astropy import units as U
from astropy.time import Time
import scipy.constants as FCNST
from scipy import interpolate
import matplotlib.pyplot as PLT
import matplotlib.colors as PLTC
import matplotlib.animation as MOV
from scipy.interpolate import griddata
import datetime as DT
import time
import progressbar as PGB
import healpy as HP
import psutil
from astroutils import MPI_modules as my_MPI
from astroutils import geometry as GEOM
from astroutils import catalog as SM
from astroutils import constants as CNST
from astroutils import DSP_modules as DSP
from astroutils import lookup_operations as LKP
from astroutils import mathops as OPS
from astroutils import ephemeris_timing as ET
import prisim
from prisim import interferometry as RI
from prisim import primary_beams as PB
from prisim import baseline_delay_horizon as DLY
try:
from pyuvdata import UVBeam
except ImportError:
uvbeam_module_found = False
else:
uvbeam_module_found = True
import ipdb as PDB
## Set MPI parameters
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
nproc = comm.Get_size()
name = MPI.Get_processor_name()
## global parameters
sday = CNST.sday
sday_correction = 1 / sday
prisim_path = prisim.__path__[0]+'/'
## Parse input arguments
parser = argparse.ArgumentParser(description='Program to simulate interferometer array data')
input_group = parser.add_argument_group('Input parameters', 'Input specifications')
input_group.add_argument('-i', '--infile', dest='infile', default=prisim_path+'examples/simparms/defaultparms.yaml', type=file, required=False, help='File specifying input parameters')
args = vars(parser.parse_args())
default_parms = {}
with args['infile'] as custom_parms_file:
custom_parms = yaml.safe_load(custom_parms_file)
if custom_parms['preload']['template'] is not None:
with open(custom_parms['preload']['template']) as default_parms_file:
default_parms = yaml.safe_load(default_parms_file)
if not default_parms:
parms = custom_parms
else:
parms = default_parms
if custom_parms['preload']['template'] is not None:
for key in custom_parms:
if key != 'preload':
if key in default_parms:
if not isinstance(custom_parms[key], dict):
parms[key] = custom_parms[key]
else:
for subkey in custom_parms[key]:
if subkey in default_parms[key]:
if not isinstance(custom_parms[key][subkey], dict):
parms[key][subkey] = custom_parms[key][subkey]
else:
for subsubkey in custom_parms[key][subkey]:
if subsubkey in default_parms[key][subkey]:
if not isinstance(custom_parms[key][subkey][subsubkey], dict):
parms[key][subkey][subsubkey] = custom_parms[key][subkey][subsubkey]
else:
raise TypeError('Parsing YAML simulation parameter files with this level of nesting is not supported')
else:
raise KeyError('Invalid parameter found in custom simulation parameters file')
else:
raise KeyError('Invalid parameter found in custom simulation parameters file')
else:
raise KeyError('Invalid parameter found in custom simulation parameters file')
rootdir = parms['dirstruct']['rootdir']
project = parms['dirstruct']['project']
simid = parms['dirstruct']['simid']
telescope_id = parms['telescope']['id']
label_prefix = parms['telescope']['label_prefix']
Trx = parms['telescope']['Trx']
Tant_freqref = parms['telescope']['Tant_freqref']
Tant_ref = parms['telescope']['Tant_ref']
Tant_spindex = parms['telescope']['Tant_spindex']
Tsys = parms['telescope']['Tsys']
Tsysinfo = {'Trx': Trx, 'Tant':{'f0': Tant_freqref, 'spindex': Tant_spindex, 'T0': Tant_ref}, 'Tnet': Tsys}
A_eff = parms['telescope']['A_eff']
eff_aprtr = parms['telescope']['eff_aprtr']
A_eff *= eff_aprtr
eff_Q = parms['telescope']['eff_Q']
latitude = parms['telescope']['latitude']
longitude = parms['telescope']['longitude']
altitude = parms['telescope']['altitude']
if longitude is None:
longitude = 0.0
if altitude is None:
altitude = 0.0
pfb_method = parms['bandpass']['pfb_method']
pfb_filepath = parms['bandpass']['pfb_filepath']
pfb_file = parms['bandpass']['pfb_file']
if pfb_method is not None:
if pfb_method not in ['theoretical', 'empirical']:
raise ValueError('Value specified for pfb_method is not one of accepted values')
if not isinstance(pfb_file, str):
raise TypeError('Filename containing PFB information must be a string')
if pfb_filepath == 'default':
pfb_file = prisim_path + 'data/bandpass/'+pfb_file
element_shape = parms['antenna']['shape']
element_size = parms['antenna']['size']
element_ocoords = parms['antenna']['ocoords']
element_orientation = parms['antenna']['orientation']
ground_plane = parms['antenna']['ground_plane']
phased_array = parms['antenna']['phased_array']
phased_elements_file = parms['phasedarray']['file']
if phased_array:
if not isinstance(phased_elements_file, str):
raise TypeError('Filename containing phased array elements must be a string')
if parms['phasedarray']['filepathtype'] == 'default':
phased_elements_file = prisim_path+'data/phasedarray_layouts/'+phased_elements_file
phasedarray_delayerr = parms['phasedarray']['delayerr']
phasedarray_gainerr = parms['phasedarray']['gainerr']
nrand = parms['phasedarray']['nrand']
obs_date = parms['obsparm']['obs_date']
obs_mode = parms['obsparm']['obs_mode']
n_acc = parms['obsparm']['n_acc']
t_acc = parms['obsparm']['t_acc']
t_obs = parms['obsparm']['t_obs']
freq = parms['bandpass']['freq']
freq_resolution = parms['bandpass']['freq_resolution']
nchan = parms['bandpass']['nchan']
beam_info = parms['beam']
use_external_beam = beam_info['use_external']
if use_external_beam:
if not isinstance(beam_info['file'], str):
raise TypeError('Filename containing external beam information must be a string')
external_beam_file = beam_info['file']
if beam_info['filepathtype'] == 'default':
external_beam_file = prisim_path+'data/beams/'+external_beam_file
if beam_info['filefmt'].lower() in ['hdf5', 'fits', 'uvbeam']:
beam_filefmt = beam_info['filefmt'].lower()
else:
raise ValueError('Invalid beam file format specified')
beam_pol = beam_info['pol']
beam_id = beam_info['identifier']
pbeam_spec_interp_method = beam_info['spec_interp']
beam_chromaticity = beam_info['chromatic']
select_beam_freq = beam_info['select_freq']
if select_beam_freq is None:
select_beam_freq = freq
gainparms = parms['gains']
# gaintable = None
gaininfo = None
if gainparms['file'] is not None:
gaintable = {}
if not isinstance(gainparms['file'], str):
raise TypeError('Filename of instrument gains must be a string')
gainsfile = gainparms['file']
if gainparms['filepathtype'] == 'default':
gainsfile = prisim_path + 'data/gains/'+gainsfile
gaininfo = RI.GainInfo(init_file=gainsfile, axes_order=['label', 'frequency', 'time'])
avg_drifts = parms['snapshot']['avg_drifts']
beam_switch = parms['snapshot']['beam_switch']
pick_snapshots = parms['snapshot']['pick']
all_snapshots = parms['snapshot']['all']
snapshots_range = parms['snapshot']['range']
pointing_info = parms['pointing']
pointing_file = pointing_info['file']
pointing_drift_init = pointing_info['drift_init']
pointing_track_init = pointing_info['track_init']
gradient_mode = parms['processing']['gradient_mode']
if gradient_mode is not None:
if not isinstance(gradient_mode, str):
raise TypeError('gradient_mode must be a string')
if gradient_mode.lower() not in ['baseline', 'skypos', 'grequency']:
raise ValueError('Invalid value specified for gradient_mode')
if gradient_mode.lower() != 'baseline':
raise ValueError('Specified gradient_mode not supported currently')
memuse = parms['processing']['memuse']
memory_available = parms['processing']['memavail']
if memory_available is None:
memory_available = psutil.virtual_memory().available # in Bytes
pvmemavail = None # Let it be flexible if going by memory on single node
else:
memory_available *= 2**30 # GB to bytes
pvmemavail = 1.0 * memory_available / nproc
if memuse is None:
memuse = 0.9 * memory_available
elif isinstance(memuse, (int,float)):
memuse = NP.abs(float(memuse)) # now in GB
if memuse * 2**30 > 0.9 * memory_available:
memuse = 0.9 * memory_available # now converted to bytes
else:
memuse = memuse * 2**30 # now converted to bytes
else:
raise TypeError('Usable memory must be specified as a scalar numeric value')
n_bins_baseline_orientation = parms['processing']['n_bins_blo']
n_sky_sectors = parms['processing']['n_sky_sectors']
bpass_shape = parms['processing']['bpass_shape']
ant_bpass_file = parms['processing']['ant_bpass_file']
max_abs_delay = parms['processing']['max_abs_delay']
f_pad = parms['processing']['f_pad']
n_pad = parms['processing']['n_pad']
coarse_channel_width = parms['processing']['coarse_channel_width']
bandpass_correct = parms['processing']['bp_correct']
noise_bandpass_correct = parms['processing']['noise_bp_correct']
do_delay_transform = parms['processing']['delay_transform']
memsave = parms['processing']['memsave']
store_prev_sky = parms['processing']['store_prev_sky']
if not isinstance(store_prev_sky, (bool,int)):
store_prev_sky = True
cleanup = parms['processing']['cleanup']
if not isinstance(cleanup, (bool,int)):
raise TypeError('cleanup parameter must be an integer or boolean')
else:
if isinstance(cleanup, bool):
cleanup = int(cleanup)
if (cleanup < 0) or (cleanup > 3):
raise ValueError('Value of cleanup parameter outside bounds')
flag_chan = NP.asarray(parms['flags']['flag_chan']).reshape(-1)
bp_flag_repeat = parms['flags']['bp_flag_repeat']
n_edge_flag = NP.asarray(parms['flags']['n_edge_flag']).reshape(-1)
flag_repeat_edge_channels = parms['flags']['flag_repeat_edge_channels']
sky_str = parms['skyparm']['model']
fsky = parms['skyparm']['fsky']
skycat_epoch = parms['skyparm']['epoch']
nside = parms['skyparm']['nside']
flux_unit = parms['skyparm']['flux_unit']
fluxcut_min = parms['skyparm']['flux_min']
fluxcut_max = parms['skyparm']['flux_max']
fluxcut_freq = parms['skyparm']['fluxcut_reffreq']
if fluxcut_min is None:
fluxcut_min = 0.0
spindex = parms['skyparm']['spindex']
spindex_rms = parms['skyparm']['spindex_rms']
spindex_seed = parms['skyparm']['spindex_seed']
roi_radius = parms['skyparm']['roi_radius']
if roi_radius is None:
roi_radius = 90.0
use_lidz = parms['skyparm']['lidz']
use_21cmfast = parms['skyparm']['21cmfast']
global_HI_parms = parms['skyparm']['global_EoR_parms']
catalog_filepathtype = parms['catalog']['filepathtype']
DSM_file_prefix = parms['catalog']['DSM_file_prefix']
spectrum_file = parms['catalog']['spectrum_file']
SUMSS_file = parms['catalog']['SUMSS_file']
NVSS_file = parms['catalog']['NVSS_file']
MWACS_file = parms['catalog']['MWACS_file']
GLEAM_file = parms['catalog']['GLEAM_file']
custom_catalog_file = parms['catalog']['custom_file']
skymod_file = parms['catalog']['skymod_file']
if catalog_filepathtype == 'default':
DSM_file_prefix = prisim_path + 'data/catalogs/' + DSM_file_prefix
spectrum_file = prisim_path + 'data/catalogs/' + spectrum_file
SUMSS_file = prisim_path + 'data/catalogs/' + SUMSS_file
NVSS_file = prisim_path + 'data/catalogs/' + NVSS_file
MWACS_file = prisim_path + 'data/catalogs/' + MWACS_file
GLEAM_file = prisim_path + 'data/catalogs/' + GLEAM_file
custom_catalog_file = prisim_path + 'data/catalogs/' + custom_catalog_file
skymod_file = prisim_path + 'data/catalogs/' + skymod_file
pc = parms['phasing']['center']
pc_coords = parms['phasing']['coords']
mpi_key = parms['pp']['key']
mpi_eqvol = parms['pp']['eqvol']
save_redundant = parms['save_redundant']
save_formats = parms['save_formats']
save_to_npz = save_formats['npz']
save_to_uvfits = save_formats['uvfits']
save_to_uvh5 = save_formats['uvh5']
savefmt = save_formats['fmt']
if savefmt not in ['HDF5', 'hdf5', 'FITS', 'fits']:
raise ValueError('Output format invalid')
if save_to_uvfits:
if save_formats['uvfits_method'] not in [None, 'uvdata', 'uvfits']:
raise ValueError('Invalid method specified for saving to UVFITS format')
plots = parms['plots']
diagnosis_parms = parms['diagnosis']
display_resource_monitor = diagnosis_parms['resource_monitor']
tint = diagnosis_parms['refresh_interval']
if tint is None:
tint = 2.0
elif not isinstance(tint, (int, float)):
raise TypeError('Refresh interval must be a scalar number')
else:
if tint <= 0.0:
tint = 2.0
pid = os.getpid()
pids = comm.gather(pid, root=0)
if display_resource_monitor:
if rank == 0:
cmd = ' '.join(['xterm', '-e', 'prisim_resource_monitor.py', '-p', ' '.join(map(str, pids)), '-t', '{0:.1f}'.format(tint), '&'])
subprocess.call([cmd], shell=True)
project_dir = project + '/'
try:
os.makedirs(rootdir+project_dir, 0755)
except OSError as exception:
if exception.errno == errno.EEXIST and os.path.isdir(rootdir+project_dir):
pass
else:
raise
if rank == 0:
if simid is None:
simid = time.strftime('%Y-%m-%d-%H-%M-%S', time.gmtime())
elif not isinstance(simid, str):
raise TypeError('simid must be a string')
else:
simid = None
simid = comm.bcast(simid, root=0) # Broadcast simulation ID
simid = simid + '/'
try:
os.makedirs(rootdir+project_dir+simid, 0755)
except OSError as exception:
if exception.errno == errno.EEXIST and os.path.isdir(rootdir+project_dir+simid):
pass
else:
raise
if telescope_id.lower() not in ['mwa', 'vla', 'gmrt', 'ugmrt', 'hera', 'mwa_dipole', 'custom', 'paper', 'mwa_tools', 'hirax', 'chime']:
raise ValueError('Invalid telescope specified')
if element_shape is None:
element_shape = 'delta'
elif element_shape not in ['dish', 'delta', 'dipole', 'gaussian']:
raise ValueError('Invalid antenna element shape specified')
if element_shape != 'delta':
if element_size is None:
raise ValueError('No antenna element size specified')
elif element_size <= 0.0:
raise ValueError('Antenna element size must be positive')
if not isinstance(phased_array, bool):
raise TypeError('phased_array specification must be boolean')
if phasedarray_delayerr is None:
phasedarray_delayerr_str = ''
phasedarray_delayerr = 0.0
elif phasedarray_delayerr < 0.0:
raise ValueError('phasedarray_delayerr must be non-negative.')
else:
phasedarray_delayerr_str = 'derr_{0:.3f}ns'.format(phasedarray_delayerr)
phasedarray_delayerr *= 1e-9
if phasedarray_gainerr is None:
phasedarray_gainerr_str = ''
phasedarray_gainerr = 0.0
elif phasedarray_gainerr < 0.0:
raise ValueError('phasedarray_gainerr must be non-negative.')
else:
phasedarray_gainerr_str = '_gerr_{0:.2f}dB'.format(phasedarray_gainerr)
if nrand is None:
nrandom_str = ''
nrand = 1
elif nrand < 1:
raise ValueError('nrandom must be positive')
else:
nrandom_str = '_nrand_{0:0d}_'.format(nrand)
if (phasedarray_delayerr_str == '') and (phasedarray_gainerr_str == ''):
nrand = 1
nrandom_str = ''
phasedarray_delaygain_err_str = phasedarray_delayerr_str + phasedarray_gainerr_str + nrandom_str
if (telescope_id.lower() == 'mwa') or (telescope_id.lower() == 'mwa_dipole'):
element_size = 0.74
element_shape = 'dipole'
if telescope_id.lower() == 'mwa': phased_array = True
elif telescope_id.lower() == 'paper':
element_size = 2.0
element_shape = 'dipole'
elif telescope_id.lower() == 'vla':
element_size = 25.0
element_shape = 'dish'
elif 'gmrt' in telescope_id.lower():
element_size = 45.0
element_shape = 'dish'
elif telescope_id.lower() == 'hera':
element_size = 14.0
element_shape = 'dish'
elif telescope_id.lower() == 'hirax':
element_size = 6.0
element_shape = 'dish'
elif telescope_id.lower() == 'custom':
if element_shape != 'delta':
if (element_shape is None) or (element_size is None):
raise ValueError('Both antenna element shape and size must be specified for the custom telescope type.')
elif element_size <= 0.0:
raise ValueError('Antenna element size must be positive.')
elif telescope_id.lower() == 'mwa_tools':
pass
else:
raise ValueError('telescope ID must be specified.')
if telescope_id.lower() == 'custom':
if element_shape == 'delta':
telescope_id = 'delta'
else:
telescope_id = '{0:.1f}m_{1:}'.format(element_size, element_shape)
if phased_array:
telescope_id = telescope_id.lower() + '_array'
telescope_str = telescope_id.lower()+'_'
if element_ocoords not in ['altaz', 'dircos']:
if element_ocoords is not None:
raise ValueError('Antenna element orientation must be "altaz" or "dircos"')
if element_orientation is None:
if element_ocoords is not None:
if element_ocoords == 'altaz':
if (telescope_id.lower() == 'mwa') or (telescope_id.lower() == 'mwa_dipole') or (element_shape == 'dipole'):
element_orientation = NP.asarray([0.0, 90.0]).reshape(1,-1)
else:
element_orientation = NP.asarray([90.0, 270.0]).reshape(1,-1)
elif element_ocoords == 'dircos':
if (telescope_id.lower() == 'mwa') or (telescope_id.lower() == 'mwa_dipole') or (element_shape == 'dipole'):
element_orientation = NP.asarray([1.0, 0.0, 0.0]).reshape(1,-1)
else:
element_orientation = NP.asarray([0.0, 0.0, 1.0]).reshape(1,-1)
else:
raise ValueError('Invalid value specified antenna element orientation coordinate system.')
else:
if (telescope_id.lower() == 'mwa') or (telescope_id.lower() == 'mwa_dipole') or (element_shape == 'dipole'):
element_orientation = NP.asarray([0.0, 90.0]).reshape(1,-1)
else:
element_orientation = NP.asarray([90.0, 270.0]).reshape(1,-1)
element_ocoords = 'altaz'
else:
if element_ocoords is None:
raise ValueError('Antenna element orientation coordinate system must be specified to describe the specified antenna orientation.')
element_orientation = NP.asarray(element_orientation).reshape(1,-1)
if (element_orientation.size < 2) or (element_orientation.size > 3):
raise ValueError('Antenna element orientation must be a two- or three-element vector.')
elif (element_ocoords == 'altaz') and (element_orientation.size != 2):
raise ValueError('Antenna element orientation must be a two-element vector if using Alt-Az coordinates.')
if ground_plane is None:
ground_plane_str = 'no_ground_'
else:
if ground_plane > 0.0:
ground_plane_str = '{0:.1f}m_ground_'.format(ground_plane)
else:
raise ValueError('Height of antenna element above ground plane must be positive.')
if use_external_beam:
if beam_filefmt.lower() == 'fits':
external_beam = fits.getdata(external_beam_file, extname='BEAM_{0}'.format(beam_pol))
external_beam_freqs = fits.getdata(external_beam_file, extname='FREQS_{0}'.format(beam_pol)) # in MHz
external_beam = external_beam.reshape(-1,external_beam_freqs.size) # npix x nfreqs
prihdr = fits.getheader(external_beam_file, 0)
beamunit = prihdr['GAINUNIT']
elif beam_filefmt.lower() == 'hdf5':
with h5py.File(external_beam_file, 'r') as fileobj:
external_beam = fileobj['gain_info'][beam_pol].value
external_beam = external_beam.T
external_beam_freqs = fileobj['spectral_info']['freqs'].value
beamunit = fileobj['header']['gainunit'].value
elif beam_filefmt == 'uvbeam':
if uvbeam_module_found:
uvbm = UVBeam()
uvbm.read_beamfits(external_beam_file)
axis_vec_ind = 0 # for power beam
spw_ind = 0 # spectral window index
if beam_pol.lower() in ['x', 'e']:
beam_pol_ind = 0
else:
beam_pol_ind = 1
external_beam = uvbm.data_array[axis_vec_ind,spw_ind,beam_pol_ind,:,:].T # npix x nfreqs
external_beam_freqs = uvbm.freq_array.ravel() # nfreqs (in Hz)
else:
raise ImportError('uvbeam module not installed/found')
if NP.abs(NP.abs(external_beam).max() - 1.0) > 1e-10:
external_beam /= NP.abs(external_beam).max()
beamunit = ''
else:
raise ValueError('Specified beam file format not currently supported')
if beamunit.lower() == 'db':
external_beam = 10**(external_beam/10.0)
beam_usage_str = 'extpb_'+beam_id
if beam_chromaticity:
if pbeam_spec_interp_method == 'fft':
external_beam = external_beam[:,:-1]
external_beam_freqs = external_beam_freqs[:-1]
beam_usage_str = beam_usage_str + '_chromatic'
else:
beam_usage_str = beam_usage_str + '_{0:.1f}_MHz'.format(select_beam_freq/1e6)+'_achromatic'
else:
beam_usage_str = 'funcpb'
beam_usage_str = beam_usage_str + '_chromatic'
telescope = {}
if telescope_id.lower() in ['mwa', 'vla', 'gmrt', 'ugmrt', 'hera', 'paper', 'mwa_dipole', 'mwa_tools', 'hirax', 'chime']:
telescope['id'] = telescope_id.lower()
telescope['shape'] = element_shape
telescope['size'] = element_size
telescope['orientation'] = element_orientation
telescope['ocoords'] = element_ocoords
telescope['groundplane'] = ground_plane
telescope['latitude'] = latitude
telescope['longitude'] = longitude
telescope['altitude'] = altitude
if A_eff is None:
if (telescope['shape'] == 'dipole') or (telescope['shape'] == 'delta'):
A_eff = (0.5*FCNST.c/freq)**2
if (telescope_id.lower() == 'mwa') or phased_array:
A_eff *= 16
if (telescope['shape'] == 'dish') or (telescope['shape'] == 'gaussian'):
A_eff = NP.pi * (0.5*element_size)**2
element_locs = None
if phased_array:
try:
element_locs = NP.loadtxt(phased_elements_file, skiprows=1, comments='#', usecols=(0,1,2))
except IOError:
raise IOError('Could not open the specified file for phased array of antenna elements.')
if telescope_id.lower() == 'mwa':
xlocs, ylocs = NP.meshgrid(1.1*NP.linspace(-1.5,1.5,4), 1.1*NP.linspace(1.5,-1.5,4))
element_locs = NP.hstack((xlocs.reshape(-1,1), ylocs.reshape(-1,1), NP.zeros(xlocs.size).reshape(-1,1)))
if element_locs is not None:
telescope['element_locs'] = element_locs
if avg_drifts + beam_switch + (pick_snapshots is not None) + (snapshots_range is not None) + all_snapshots != 1:
raise ValueError('One and only one of avg_drifts, beam_switch, pick_snapshots, snapshots_range, all_snapshots must be set')
snapshot_type_str = ''
if avg_drifts and (obs_mode == 'dns'):
snapshot_type_str = 'drift_averaged_'
if beam_switch and (obs_mode == 'dns'):
snapshot_type_str = 'beam_switches_'
if (snapshots_range is not None) and ((obs_mode == 'dns') or (obs_mode == 'lstbin')):
snapshot_type_str = 'snaps_{0[0]:0d}-{0[1]:0d}_'.format(snapshots_range)
duration_str = ''
if pointing_file is not None:
pointing_init = None
pointing_info_from_file = NP.loadtxt(pointing_file, comments='#', usecols=(1,2,3), delimiter=',')
obs_id = NP.loadtxt(pointing_file, comments='#', usecols=(0,), delimiter=',', dtype=str)
if (telescope_id.lower() == 'mwa') or (telescope_id.lower() == 'mwa_tools') or (phased_array):
delays_str = NP.loadtxt(pointing_file, comments='#', usecols=(4,), delimiter=',', dtype=str)
delays_list = [NP.fromstring(delaystr, dtype=float, sep=';', count=-1) for delaystr in delays_str]
delay_settings = NP.asarray(delays_list)
delay_settings *= 435e-12
delays = NP.copy(delay_settings)
if n_acc is None:
n_acc = pointing_info_from_file.shape[0]
pointing_info_from_file = pointing_info_from_file[:min(n_acc, pointing_info_from_file.shape[0]),:]
obs_id = obs_id[:min(n_acc, pointing_info_from_file.shape[0])]
if (telescope_id.lower() == 'mwa') or (telescope_id.lower() == 'mwa_tools') or (phased_array):
delays = delay_settings[:min(n_acc, pointing_info_from_file.shape[0]),:]
n_acc = min(n_acc, pointing_info_from_file.shape[0])
pointings_altaz = pointing_info_from_file[:,:2].reshape(-1,2)
pointings_altaz_orig = pointing_info_from_file[:,:2].reshape(-1,2)
lst = 15.0 * pointing_info_from_file[:,2]
lst_wrapped = lst + 0.0
lst_wrapped[lst_wrapped > 180.0] = lst_wrapped[lst_wrapped > 180.0] - 360.0
lst_edges = NP.concatenate((lst_wrapped, [lst_wrapped[-1]+lst_wrapped[-1]-lst_wrapped[-2]]))
if obs_mode is None:
obs_mode = 'custom'
if (obs_mode == 'dns') and (avg_drifts or beam_switch):
angle_diff = GEOM.sphdist(pointings_altaz[1:,1], pointings_altaz[1:,0], pointings_altaz[:-1,1], pointings_altaz[:-1,0])
angle_diff = NP.concatenate(([0.0], angle_diff))
shift_threshold = 1.0 # in degrees
lst_wrapped = NP.concatenate(([lst_wrapped[0]], lst_wrapped[angle_diff > shift_threshold], [lst_wrapped[-1]]))
n_acc = lst_wrapped.size - 1
pointings_altaz = NP.vstack((pointings_altaz[0,:].reshape(-1,2), pointings_altaz[angle_diff>shift_threshold,:].reshape(-1,2)))
obs_id = NP.concatenate(([obs_id[0]], obs_id[angle_diff>shift_threshold]))
if (telescope_id.lower() == 'mwa') or (telescope_id.lower() == 'mwa_tools') or (phased_array):
delays = NP.vstack((delay_settings[0,:], delay_settings[angle_diff>shift_threshold,:]))
obs_mode = 'custom'
if avg_drifts:
lst_edges = NP.concatenate(([lst_edges[0]], lst_edges[angle_diff > shift_threshold], [lst_edges[-1]]))
else:
lst_edges_left = lst_wrapped[:-1] + 0.0
lst_edges_right = NP.concatenate(([lst_edges[1]], lst_edges[NP.asarray(NP.where(angle_diff > shift_threshold)).ravel()+1]))
elif snapshots_range is not None:
snapshots_range[1] = snapshots_range[1] % n_acc
if snapshots_range[0] > snapshots_range[1]:
raise IndexError('min snaphost # must be <= max snapshot #')
lst_wrapped = lst_wrapped[snapshots_range[0]:snapshots_range[1]+2]
lst_edges = NP.copy(lst_wrapped)
pointings_altaz = pointings_altaz[snapshots_range[0]:snapshots_range[1]+1,:]
obs_id = obs_id[snapshots_range[0]:snapshots_range[1]+1]
if (telescope_id.lower() == 'mwa') or (telescope_id.lower() == 'mwa_tools') or (phased_array):
delays = delay_settings[snapshots_range[0]:snapshots_range[1]+1,:]
n_acc = snapshots_range[1]-snapshots_range[0]+1
elif pick_snapshots is not None:
pick_snapshots = NP.asarray(pick_snapshots)
n_acc = pick_snapshots.size
lst_begin = NP.asarray(lst_wrapped[pick_snapshots])
pointings_altaz = pointings_altaz[pick_snapshots,:]
obs_id = obs_id[pick_snapshots]
if (telescope_id.lower() == 'mwa') or (phased_array) or (telescope_id.lower() == 'mwa_tools'):
delays = delay_settings[pick_snapshots,:]
if obs_mode != 'lstbin':
lst_end = NP.asarray(lst_wrapped[pick_snapshots+1])
t_acc = (lst_end - lst_begin) / 15.0 * 3.6e3 * sday
lst = 0.5 * (lst_begin + lst_end)
obs_mode = 'custom'
else:
t_acc = 112.0 + NP.zeros(n_acc) # in seconds (needs to be generalized)
lst = lst_wrapped[pick_snapshots] + 0.5 * t_acc/3.6e3 * 15.0 / sday
if pick_snapshots is None:
if obs_mode != 'lstbin':
if not beam_switch:
lst = 0.5*(lst_edges[1:]+lst_edges[:-1])
t_acc = (lst_edges[1:]-lst_edges[:-1]) / 15.0 * 3.6e3 * sday
else:
lst = 0.5*(lst_edges_left + lst_edges_right)
t_acc = (lst_edges_right - lst_edges_left) / 15.0 * 3.6e3 * sday
else:
t_acc = 112.0 + NP.zeros(n_acc) # in seconds (needs to be generalized)
lst = lst_wrapped + 0.5 * t_acc/3.6e3 * 15.0 / sday
# Initialize time objects and LST from obs_date and chosen LST
lst_init = lst[0]
tobj0 = Time(obs_date.replace('/', '-'), format='iso', scale='utc', location=('{0:.6f}d'.format(telescope['longitude']), '{0:.6f}d'.format(telescope['latitude']), '{0:.6f}m'.format(telescope['altitude']))) # Time object at obs_date beginning
jd_init = ET.julian_date_from_LAST(lst_init/15.0, tobj0.jd, telescope['longitude']/15.0) # Julian date at beginning of observation
jd_init = jd_init[0]
tobj_init = Time(jd_init, format='jd', scale='utc', location=('{0:.6f}d'.format(telescope['longitude']), '{0:.6f}d'.format(telescope['latitude']), '{0:.6f}m'.format(telescope['altitude']))) # Time object at beginning of observation
lst_init = tobj_init.sidereal_time('apparent').deg # Update LST init
tobjs = tobj_init + NP.arange(n_acc) * t_acc * U.s # Time objects for the observation
lst = tobjs.sidereal_time('apparent').deg # Local Apparent Sidereal time (in degrees) for the observation
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
pointings_hadec = GEOM.altaz2hadec(pointings_altaz, latitude, units='degrees')
pointings_radec = ET.hadec2radec(pointings_hadec, lst, obstime=tobjs[0], epoch_RA=tobjs[0], time_type=None)
t_obs = NP.sum(t_acc)
elif (pointing_drift_init is not None) or (pointing_track_init is not None):
pointing_file = None
if t_acc is None:
raise NameError('t_acc must be provided for an automated observing run')
if (n_acc is None) and (t_obs is None):
raise NameError('n_acc or t_obs must be provided for an automated observing run')
elif (n_acc is not None) and (t_obs is not None):
raise ValueError('Only one of n_acc or t_obs must be provided for an automated observing run')
elif n_acc is None:
n_acc = int(t_obs/t_acc)
else:
t_obs = n_acc * t_acc
if obs_mode is None:
obs_mode = 'track'
elif obs_mode not in ['track', 'drift']:
raise ValueError('Invalid specification for obs_mode')
# Initialize time objects and LST from obs_date and chosen LST
if pointing_info['lst_init'] is not None:
lst_init = pointing_info['lst_init'] * 15.0 # in deg
else:
lst_init = None
jd_init = pointing_info['jd_init']
if jd_init is None:
if ((obs_date is not None) and (lst_init is not None)):
tobj0 = Time(obs_date.replace('/', '-'), format='iso', scale='utc', location=('{0:.6f}d'.format(telescope['longitude']), '{0:.6f}d'.format(telescope['latitude']), '{0:.6f}m'.format(telescope['altitude']))) # Time object at obs_date beginning
jd_init = ET.julian_date_from_LAST(lst_init/15.0, tobj0.jd, telescope['longitude']/15.0) # Julian date at beginning of observation
jd_init = jd_init[0]
tobj_init = Time(jd_init, format='jd', scale='utc', location=EarthLocation(lon=telescope['longitude']*U.deg, lat=telescope['latitude']*U.deg, height=telescope['altitude']*U.m)) # Time object at beginning of observation
lst_init = tobj_init.sidereal_time('apparent').deg # Update LST init
tobjs = tobj_init + NP.arange(n_acc) * t_acc * U.s # Time objects for the observation
lst = tobjs.sidereal_time('apparent').deg # Local Apparent Sidereal time (in degrees) for the observation
if obs_mode == 'drift':
alt = pointing_drift_init['alt']
az = pointing_drift_init['az']
ha = pointing_drift_init['ha']
dec = pointing_drift_init['dec']
if (alt is None) or (az is None):
if (ha is None) or (dec is None):
raise ValueError('One of alt-az or ha-dec pairs must be specified')
hadec_init = NP.asarray([ha, dec])
else:
altaz_init = NP.asarray([alt, az])
hadec_init = GEOM.altaz2hadec(altaz_init.reshape(1,-1), latitude, units='degrees')
pointings_hadec = NP.repeat(hadec_init.reshape(1,-1), n_acc, axis=0)
if obs_mode == 'track':
ra = pointing_track_init['ra']
dec = pointing_track_init['dec']
epoch = pointing_track_init['epoch']
track_init_pointing_at_epoch = SkyCoord(ra=ra*U.deg, dec=dec*U.deg, frame='fk5', equinox='J{0}'.format(epoch))
track_init_pointing_at_tinit = track_init_pointing_at_epoch.transform_to(FK5(equinox=tobj_init))
ha = lst_init - track_init_pointing_at_tinit.ra.deg # Initial HA in degrees
pointings_hadec = NP.hstack((ha + (t_acc/3.6e3)*15.0*NP.arange(n_acc).reshape(-1,1), track_init_pointing_at_tinit.dec.deg+NP.zeros(n_acc).reshape(-1,1)))
t_acc = t_acc + NP.zeros(n_acc)
pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
pointings_radec = ET.hadec2radec(pointings_hadec, lst, obstime=tobjs[0], epoch_RA=tobjs[0], time_type=None)
# pointings_radec_v2 = ET.altaz2radec(pointings_altaz, EarthLocation(lat=telescope['latitude']*U.deg, lon=telescope['longitude']*U.deg, height=telescope['altitude']*U.m), obstime=tobjs[0], epoch_RA=tobjs[0], time_type=None)
# pointings_radec = NP.hstack(((lst-pointings_hadec[:,0]).reshape(-1,1), pointings_hadec[:,1].reshape(-1,1)))
duration_str = '_{0:0d}x{1:.1f}s'.format(n_acc, t_acc[0])
# Create organized directory structure
init_time = tobj_init
obsdatetime_dir = '{0}{1}{2}_{3}{4}{5}/'.format(init_time.datetime.year, init_time.datetime.month, init_time.datetime.day, init_time.datetime.hour, init_time.datetime.minute, init_time.datetime.second)
sim_dir = 'simdata/'
meta_dir = 'metainfo/'
roi_dir = 'roi/'
skymod_dir = 'skymodel/'
try:
os.makedirs(rootdir+project_dir+simid+sim_dir, 0755)
except OSError as exception:
if exception.errno == errno.EEXIST and os.path.isdir(rootdir+project_dir+simid+sim_dir):
pass
else:
raise
try:
os.makedirs(rootdir+project_dir+simid+meta_dir, 0755)
except OSError as exception:
if exception.errno == errno.EEXIST and os.path.isdir(rootdir+project_dir+simid+meta_dir):
pass
else:
raise
try:
os.makedirs(rootdir+project_dir+simid+roi_dir, 0755)
except OSError as exception:
if exception.errno == errno.EEXIST and os.path.isdir(rootdir+project_dir+simid+roi_dir):
pass
else:
raise
if cleanup < 3:
try:
os.makedirs(rootdir+project_dir+simid+skymod_dir, 0755)
except OSError as exception:
if exception.errno == errno.EEXIST and os.path.isdir(rootdir+project_dir+simid+skymod_dir):
pass
else:
raise
pointings_radec = NP.fmod(pointings_radec, 360.0)
pointings_hadec = NP.fmod(pointings_hadec, 360.0)
pointings_altaz = NP.fmod(pointings_altaz, 360.0)
use_GSM = False
use_DSM = False
use_spectrum = False
use_pygsm = False
use_CSM = False
use_SUMSS = False
use_GLEAM = False
use_USM = False
use_noise = False
use_MSS = False
use_custom = False
use_skymod = False
use_NVSS = False
use_HI_monopole = False
use_HI_cube = False
use_HI_fluctuations = False
use_MSS=False
if sky_str not in ['asm', 'dsm', 'csm', 'nvss', 'sumss', 'gleam', 'mwacs', 'custom', 'usm', 'noise', 'mss', 'HI_cube', 'HI_monopole', 'HI_fluctuations', 'skymod_file', 'gsm2008', 'gsm2016']:
raise ValueError('Invalid foreground model string specified.')
if sky_str == 'asm':
use_GSM = True
elif sky_str == 'dsm':
use_DSM = True
elif sky_str == 'fullspectrum':
use_spectrum = True
elif (sky_str == 'gsm2008') or (sky_str == 'gsm2016'):
use_pygsm = True
elif sky_str == 'csm':
use_CSM = True
elif sky_str == 'sumss':
use_SUMSS = True
elif sky_str == 'gleam':
use_GLEAM = True
elif sky_str == 'custom':
use_custom = True
elif sky_str == 'skymod_file':
use_skymod = True
elif sky_str == 'nvss':
use_NVSS = True
elif sky_str == 'usm':
use_USM = True
elif sky_str == 'noise':
use_noise = True
elif sky_str == 'HI_monopole':
use_HI_monopole = True
elif sky_str == 'HI_fluctuations':
use_HI_fluctuations = True
elif sky_str == 'HI_cube':
use_HI_cube = True
if global_HI_parms is not None:
try:
global_HI_parms = NP.asarray(map(float, global_HI_parms))
except ValueError:
raise ValueError('Values in global_EoR_parms must be convertible to float')
T_xi0 = NP.float(global_HI_parms[0])
freq_half = global_HI_parms[1]
dz_half = global_HI_parms[2]
arrayinfo = RI.getBaselineInfo(parms)
layout_info = arrayinfo['layout_info']
bl = arrayinfo['bl']
bl_label = arrayinfo['label']
bl_id = arrayinfo['id']
blgroups = arrayinfo['groups']
bl_reversemap = arrayinfo['reversemap']
total_baselines = bl.shape[0]
try:
labels = bl_label.tolist()
except NameError:
labels = []
labels += [label_prefix+'{0:0d}'.format(i+1) for i in xrange(bl.shape[0])]
try:
ids = bl_id.tolist()
except NameError:
ids = range(bl.shape[0])
if not isinstance(mpi_key, str):
raise TypeError('MPI key must be a string')
if mpi_key not in ['src', 'bl', 'freq']:
raise ValueError('MPI key must be set on "bl" or "src"')
if mpi_key == 'src':
mpi_on_src = True
mpi_ob_bl = False
mpi_on_freq = False
elif mpi_key == 'bl':
mpi_on_src = False
mpi_on_bl = True
mpi_on_freq = False
else:
mpi_on_freq = True
mpi_on_src = False
mpi_on_bl = False
if not isinstance(mpi_eqvol, bool):
raise TypeError('MPI equal volume parameter must be boolean')
if mpi_eqvol:
mpi_sync = True
mpi_async = False
else:
mpi_sync = False
mpi_async = True
freq = NP.float(freq)
freq_resolution = NP.float(freq_resolution)
base_bpass = 1.0*NP.ones(nchan)
bandpass_shape = 1.0*NP.ones(nchan)
chans = (freq + (NP.arange(nchan) - 0.5 * nchan) * freq_resolution)/ 1e9 # in GHz
oversampling_factor = 1.0 + f_pad
bandpass_str = '{0:0d}x{1:.1f}_kHz'.format(nchan, freq_resolution/1e3)
if fluxcut_freq is None:
fluxcut_freq = freq
else:
fluxcut_freq = NP.float(fluxcut_freq)
flagged_edge_channels = []
pfb_str = ''
pfb_str2 = ''
if pfb_method is not None:
if pfb_method == 'empirical':
bandpass_shape = DSP.PFB_empirical(nchan, 32, 0.25, 0.25)
elif pfb_method == 'theoretical':
pfbhdulist = fits.open(pfb_file)
pfbdata = pfbhdulist[0].data
pfbfreq = pfbhdulist[1].data
pfb_norm = NP.amax(pfbdata, axis=0).reshape(1,-1)
pfbdata_norm = pfbdata - pfb_norm
pfbwin = 10 * NP.log10(NP.sum(10**(pfbdata_norm/10), axis=1))
freq_range = [0.9*chans.min(), 1.1*chans.max()]
useful_freq_range = NP.logical_and(pfbfreq >= freq_range[0]*1e3, pfbfreq <=freq_range[1]*1e3)
# pfb_interp_func = interpolate.interp1d(pfbfreq[useful_freq_range]/1e3, pfbwin[useful_freq_range])
# pfbwin_interp = pfb_interp_func(chans)
pfbwin_interp = NP.interp(chans, pfbfreq[useful_freq_range]/1e3, pfbwin[useful_freq_range])
bandpass_shape = 10**(pfbwin_interp/10)
if flag_repeat_edge_channels:
if NP.any(n_edge_flag > 0):
pfb_edge_channels = (bandpass_shape.argmin() + NP.arange(nchan/coarse_channel_width)*coarse_channel_width) % nchan
# pfb_edge_channels = bandpass_shape.argsort()[:int(1.0*nchan/coarse_channel_width)]
# wts = NP.exp(-0.5*((NP.arange(bandpass_shape.size)-0.5*bandpass_shape.size)/4.0)**2)/(4.0*NP.sqrt(2*NP.pi))
# wts_shift = NP.fft.fftshift(wts)
# freq_wts = NP.fft.fft(wts_shift)
# pfb_filtered = DSP.fft_filter(bandpass_shape.ravel(), wts=freq_wts.ravel(), passband='high')
# pfb_edge_channels = pfb_filtered.argsort()[:int(1.0*nchan/coarse_channel_width)]
pfb_edge_channels = NP.hstack((pfb_edge_channels.ravel(), NP.asarray([pfb_edge_channels.min()-coarse_channel_width, pfb_edge_channels.max()+coarse_channel_width])))
flagged_edge_channels += [range(max(0,pfb_edge-n_edge_flag[0]),min(nchan,pfb_edge+n_edge_flag[1])) for pfb_edge in pfb_edge_channels]
else:
pfb_str = 'no_pfb_'
pfb_str2 = '_no_pfb'
if ant_bpass_file is not None:
with NP.load(ant_bpass_file) as ant_bpass_fileobj:
ant_bpass_freq = ant_bpass_fileobj['faxis']
ant_bpass_ref = ant_bpass_fileobj['band']
ant_bpass_ref /= NP.abs(ant_bpass_ref).max()
ant_bpass_freq = ant_bpass_freq[ant_bpass_freq.size/2:]
ant_bpass_ref = ant_bpass_ref[ant_bpass_ref.size/2:]
chanind, ant_bpass, fdist = LKP.lookup_1NN_new(ant_bpass_freq.reshape(-1,1)/1e9, ant_bpass_ref.reshape(-1,1), chans.reshape(-1,1), distance_ULIM=freq_resolution/1e9, remove_oob=True)
else:
ant_bpass = NP.ones(nchan)
window = nchan * DSP.windowing(nchan, shape=bpass_shape, pad_width=n_pad, centering=True, area_normalize=True)
if bandpass_correct:
bpcorr = 1/bandpass_shape
bandpass_shape = NP.ones(base_bpass.size)
else:
bpcorr = 1.0*NP.ones(nchan)
noise_bpcorr = 1.0*NP.ones(nchan)
if noise_bandpass_correct:
noise_bpcorr = NP.copy(bpcorr)
if not flag_repeat_edge_channels:
flagged_edge_channels += [range(0,n_edge_flag[0])]
flagged_edge_channels += [range(nchan-n_edge_flag[1],nchan)]
flagged_channels = flagged_edge_channels
if flag_chan[0] >= 0:
flag_chan = flag_chan[flag_chan < nchan]
if bp_flag_repeat:
flag_chan = NP.mod(flag_chan, coarse_channel_width)
flagged_channels += [[i*coarse_channel_width+flagchan for i in range(nchan/coarse_channel_width) for flagchan in flag_chan]]
else:
flagged_channels += [flag_chan.tolist()]
flagged_channels = [x for y in flagged_channels for x in y]
flagged_channels = list(set(flagged_channels))
bandpass_shape[flagged_channels] = 0.0
bpass = base_bpass * bandpass_shape
if not isinstance(n_sky_sectors, int):
raise TypeError('n_sky_sectors must be an integer')
elif (n_sky_sectors < 1):
n_sky_sectors = 1
if use_HI_cube:
if not isinstance(use_lidz, bool):
raise TypeError('Parameter specifying use of Lidz simulations must be Boolean')
if not isinstance(use_21cmfast, bool):
raise TypeError('Parameter specifying use of 21cmfast simulations must be Boolean')
if use_HI_monopole or use_HI_fluctuations or use_HI_cube:
if use_lidz and use_21cmfast:
raise ValueError('Only one of Adam Lidz or 21CMFAST simulations can be chosen')
if not use_lidz and not use_21cmfast:
use_lidz = True
use_21cmfast = False
eor_simfile = rootdir+'EoR_simulations/Adam_Lidz/Boom_tiles/hpxcube_138.915-195.235_MHz_80.0_kHz_nside_{0:0d}.fits'.format(nside)
elif use_lidz:
eor_simfile = rootdir+'EoR_simulations/Adam_Lidz/Boom_tiles/hpxcube_138.915-195.235_MHz_80.0_kHz_nside_{0:0d}.fits'.format(nside)
elif use_21cmfast:
pass
spindex_rms_str = ''
spindex_seed_str = ''
if not isinstance(spindex_rms, (int,float)):
raise TypeError('Spectral Index rms must be a scalar')
if spindex_rms > 0.0:
spindex_rms_str = '{0:.1f}'.format(spindex_rms)
else:
spindex_rms = 0.0
if spindex_seed is not None:
if not isinstance(spindex_seed, (int, float)):
raise TypeError('Spectral index random seed must be a scalar')
spindex_seed_str = '{0:0d}_'.format(spindex_seed)
if rank == 0:
if use_HI_fluctuations or use_HI_cube:
hdulist = fits.open(eor_simfile)
nexten = hdulist['PRIMARY'].header['NEXTEN']
fitstype = hdulist['PRIMARY'].header['FITSTYPE']
temperatures = None
extnames = [hdulist[i].header['EXTNAME'] for i in xrange(1,nexten+1)]
if fitstype == 'IMAGE':
eor_simfreq = hdulist['FREQUENCY'].data['Frequency [MHz]']
else:
eor_simfreq = [float(extname.split(' ')[0]) for extname in extnames]
eor_simfreq = NP.asarray(eor_simfreq)
eor_freq_resolution = eor_simfreq[1] - eor_simfreq[0]
ind_chans, ind_eor_simfreq, dfrequency = LKP.find_1NN(eor_simfreq.reshape(-1,1), 1e3*chans.reshape(-1,1), distance_ULIM=0.5*eor_freq_resolution, remove_oob=True)
eor_simfreq = eor_simfreq[ind_eor_simfreq]
if fitstype == 'IMAGE':
temperatures = hdulist['TEMPERATURE'].data[:,ind_eor_simfreq]
else:
for i in xrange(eor_simfreq.size):
if i == 0:
temperatures = hdulist[ind_eor_simfreq[i]+1].data['Temperature'].reshape(-1,1)
else:
temperatures = NP.hstack((temperatures, hdulist[ind_eor_simfreq[i]+1].data['Temperature'].reshape(-1,1)))
if use_HI_fluctuations:
temperatures = temperatures - NP.mean(temperatures, axis=0, keepdims=True)
pixres = hdulist['PRIMARY'].header['PIXAREA']
coords_table = hdulist['COORDINATE'].data
ra_deg_EoR = coords_table['RA']
dec_deg_EoR = coords_table['DEC']
fluxes_EoR = temperatures * (2.0* FCNST.k * freq**2 / FCNST.c**2) * pixres / CNST.Jy
freq_EoR = freq/1e9
hdulist.close()
flux_unit = 'Jy'
catlabel = 'HI-cube'
spec_type = 'spectrum'
spec_parms = {}
skymod_init_parms = {'name': catlabel, 'frequency': chans*1e9, 'location': NP.hstack((ra_deg_EoR.reshape(-1,1), dec_deg_EoR.reshape(-1,1))), 'spec_type': spec_type, 'spec_parms': spec_parms, 'spectrum': fluxes_EoR}
skymod = SM.SkyModel(init_parms=skymod_init_parms, init_file=None)
elif use_HI_monopole:
theta, phi = HP.pix2ang(nside, NP.arange(HP.nside2npix(nside)))
gc = Galactic(l=NP.degrees(phi), b=90.0-NP.degrees(theta), unit=(U.degree, U.degree))
radec = gc.fk5
ra_deg_EoR = radec.ra.degree
dec_deg_EoR = radec.dec.degree
pixres = HP.nside2pixarea(nside) # pixel solid angle (steradians)
catlabel = 'HI-monopole'
spec_type = 'func'
spec_parms = {}
spec_parms['name'] = NP.repeat('tanh', ra_deg_EoR.size)
spec_parms['freq-ref'] = freq_half + NP.zeros(ra_deg_EoR.size)
spec_parms['flux-scale'] = T_xi0 * (2.0* FCNST.k * freq**2 / FCNST.c**2) * pixres / CNST.Jy
spec_parms['flux-offset'] = 0.5*spec_parms['flux-scale'] + NP.zeros(ra_deg_EoR.size)
spec_parms['z-width'] = dz_half + NP.zeros(ra_deg_EoR.size)
flux_unit = 'Jy'
skymod_init_parms = {'name': catlabel, 'frequency': chans*1e9, 'location': NP.hstack((ra_deg_EoR.reshape(-1,1), dec_deg_EoR.reshape(-1,1))), 'spec_type': spec_type, 'spec_parms': spec_parms}
skymod = SM.SkyModel(init_parms=skymod_init_parms, init_file=None)
spectrum = skymod.generate_spectrum()
elif use_GSM:
dsm_file = DSM_file_prefix+'_150.0_MHz_nside_{0:0d}.fits'.format(nside)
# dsm_file = DSM_file_prefix+'_{0:.1f}_MHz_nside_{1:0d}.fits'.format(freq*1e-6, nside)
hdulist = fits.open(dsm_file)
pixres = hdulist[0].header['PIXAREA']
dsm_table = hdulist[1].data
ra_deg_DSM = dsm_table['RA']
dec_deg_DSM = dsm_table['DEC']
temperatures = dsm_table['T_{0:.0f}'.format(150.0)]
# temperatures = dsm_table['T_{0:.0f}'.format(freq/1e6)]
fluxes_DSM = temperatures * 2.0 * FCNST.k * (150e6/FCNST.c)**2 * pixres / CNST.Jy
# fluxes_DSM = temperatures * (2.0* FCNST.k * freq**2 / FCNST.c**2) * pixres / CNST.Jy
spindex = dsm_table['spindex'] + 2.0
freq_DSM = 0.150 # in GHz
# freq_DSM = freq/1e9 # in GHz
freq_catalog = freq_DSM * 1e9 + NP.zeros(fluxes_DSM.size)
catlabel = NP.repeat('DSM', fluxes_DSM.size)
ra_deg = ra_deg_DSM + 0.0
dec_deg = dec_deg_DSM + 0.0
majax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_DSM.size)
minax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_DSM.size)
fluxes = fluxes_DSM + 0.0
freq_SUMSS = 0.843 # in GHz
catalog = NP.loadtxt(SUMSS_file, usecols=(0,1,2,3,4,5,10,12,13,14,15,16))
ra_deg_SUMSS = 15.0 * (catalog[:,0] + catalog[:,1]/60.0 + catalog[:,2]/3.6e3)
dec_dd = NP.loadtxt(SUMSS_file, usecols=(3,), dtype="|S3")
sgn_dec_str = NP.asarray([dec_dd[i][0] for i in range(dec_dd.size)])
sgn_dec = 1.0*NP.ones(dec_dd.size)
sgn_dec[sgn_dec_str == '-'] = -1.0
dec_deg_SUMSS = sgn_dec * (NP.abs(catalog[:,3]) + catalog[:,4]/60.0 + catalog[:,5]/3.6e3)
fmajax = catalog[:,7]
fminax = catalog[:,8]
fpa = catalog[:,9]
dmajax = catalog[:,10]
dminax = catalog[:,11]
PS_ind = NP.logical_and(dmajax == 0.0, dminax == 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[PS_ind]
dec_deg_SUMSS = dec_deg_SUMSS[PS_ind]
fint = catalog[PS_ind,6] * 1e-3
if spindex_seed is None:
spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)
else:
NP.random.seed(spindex_seed)
spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)
fmajax = fmajax[PS_ind]
fminax = fminax[PS_ind]
fpa = fpa[PS_ind]
dmajax = dmajax[PS_ind]
dminax = dminax[PS_ind]
bright_source_ind = fint >= 10.0 * (freq_SUMSS*1e9/freq)**spindex_SUMSS
ra_deg_SUMSS = ra_deg_SUMSS[bright_source_ind]
dec_deg_SUMSS = dec_deg_SUMSS[bright_source_ind]
fint = fint[bright_source_ind]
fmajax = fmajax[bright_source_ind]
fminax = fminax[bright_source_ind]
fpa = fpa[bright_source_ind]
dmajax = dmajax[bright_source_ind]
dminax = dminax[bright_source_ind]
spindex_SUMSS = spindex_SUMSS[bright_source_ind]
valid_ind = NP.logical_and(fmajax > 0.0, fminax > 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[valid_ind]
dec_deg_SUMSS = dec_deg_SUMSS[valid_ind]
fint = fint[valid_ind]
fmajax = fmajax[valid_ind]
fminax = fminax[valid_ind]
fpa = fpa[valid_ind]
spindex_SUMSS = spindex_SUMSS[valid_ind]
freq_catalog = NP.concatenate((freq_catalog, freq_SUMSS*1e9 + NP.zeros(fint.size)))
catlabel = NP.concatenate((catlabel, NP.repeat('SUMSS', fint.size)))
ra_deg = NP.concatenate((ra_deg, ra_deg_SUMSS))
dec_deg = NP.concatenate((dec_deg, dec_deg_SUMSS))
spindex = NP.concatenate((spindex, spindex_SUMSS))
majax = NP.concatenate((majax, fmajax/3.6e3))
minax = NP.concatenate((minax, fminax/3.6e3))
fluxes = NP.concatenate((fluxes, fint))
freq_NVSS = 1.4 # in GHz
hdulist = fits.open(NVSS_file)
ra_deg_NVSS = hdulist[1].data['RA(2000)']
dec_deg_NVSS = hdulist[1].data['DEC(2000)']
nvss_fpeak = hdulist[1].data['PEAK INT']
nvss_majax = hdulist[1].data['MAJOR AX']
nvss_minax = hdulist[1].data['MINOR AX']
hdulist.close()
if spindex_seed is None:
spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)
else:
NP.random.seed(2*spindex_seed)
spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)
not_in_SUMSS_ind = NP.logical_and(dec_deg_NVSS > -30.0, dec_deg_NVSS <= min(90.0, latitude+90.0))
bright_source_ind = nvss_fpeak >= 10.0 * (freq_NVSS*1e9/freq)**(spindex_NVSS)
PS_ind = NP.sqrt(nvss_majax**2-(0.75/60.0)**2) < 14.0/3.6e3
count_valid = NP.sum(NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind))
nvss_fpeak = nvss_fpeak[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]
freq_catalog = NP.concatenate((freq_catalog, freq_NVSS*1e9 + NP.zeros(count_valid)))
catlabel = NP.concatenate((catlabel, NP.repeat('NVSS',count_valid)))
ra_deg = NP.concatenate((ra_deg, ra_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
dec_deg = NP.concatenate((dec_deg, dec_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
spindex = NP.concatenate((spindex, spindex_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
majax = NP.concatenate((majax, nvss_majax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
minax = NP.concatenate((minax, nvss_minax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))
fluxes = NP.concatenate((fluxes, nvss_fpeak))
spec_type = 'func'
spec_parms = {}
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
flux_unit = 'Jy'
skymod_init_parms = {'name': catlabel, 'frequency': chans*1e9, 'location': NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'spec_type': spec_type, 'spec_parms': spec_parms, 'src_shape': NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), 'src_shape_units': ['degree','degree','degree']}
skymod = SM.SkyModel(init_parms=skymod_init_parms, init_file=None)
elif use_DSM:
dsm_file = DSM_file_prefix+'_150.0_MHz_nside_{0:0d}.fits'.format(nside)
# dsm_file = DSM_file_prefix+'_{0:.1f}_MHz_nside_{1:0d}.fits'.format(freq*1e-6, nside)
hdulist = fits.open(dsm_file)
pixres = hdulist[0].header['PIXAREA']
dsm_table = hdulist[1].data
ra_deg_DSM = dsm_table['RA']
dec_deg_DSM = dsm_table['DEC']
temperatures = dsm_table['T_{0:.0f}'.format(150.0)]
# temperatures = dsm_table['T_{0:.0f}'.format(freq/1e6)]
fluxes_DSM = temperatures * 2.0 * FCNST.k * (150e6/FCNST.c)**2 * pixres / CNST.Jy
# fluxes_DSM = temperatures * (2.0 * FCNST.k * freq**2 / FCNST.c**2) * pixres / CNST.Jy
flux_unit = 'Jy'
spindex = dsm_table['spindex'] + 2.0
freq_DSM = 0.150 # in GHz
# freq_DSM = freq/1e9 # in GHz
freq_catalog = freq_DSM * 1e9 + NP.zeros(fluxes_DSM.size)
catlabel = NP.repeat('DSM', fluxes_DSM.size)
ra_deg = ra_deg_DSM
dec_deg = dec_deg_DSM
majax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_DSM.size)
minax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_DSM.size)
# majax = NP.degrees(NP.sqrt(HP.nside2pixarea(64)*4/NP.pi) * NP.ones(fluxes_DSM.size))
# minax = NP.degrees(NP.sqrt(HP.nside2pixarea(64)*4/NP.pi) * NP.ones(fluxes_DSM.size))
fluxes = fluxes_DSM
hdulist.close()
spec_type = 'func'
spec_parms = {}
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
skymod_init_parms = {'name': catlabel, 'frequency': chans*1e9, 'location': NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'spec_type': spec_type, 'spec_parms': spec_parms, 'src_shape': NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), 'src_shape_units': ['degree','degree','degree']}
skymod = SM.SkyModel(init_parms=skymod_init_parms, init_file=None)
elif use_spectrum:
skymod = SM.SkyModel(init_parms=None, init_file=spectrum_file, load_spectrum=False)
elif use_pygsm:
if not SM.pygsm_found:
print('PyGSM module not found to be installed.')
PDB.set_trace()
skymod_parallel = parms['skyparm']['parallel']
if not isinstance(skymod_parallel, bool):
warnings.warn('Input parallel for determining sky model must be boolean. Setting it to False.')
skymod_parallel = False
n_mdl_freqs = parms['skyparm']['n_mdl_freqs']
if n_mdl_freqs is None:
mdl_freqs = 1e9 * chans
elif not isinstance(n_mdl_freqs, int):
raise TypeError('Input n_mdl_freqs must be an integer')
else:
if n_mdl_freqs < 2:
n_mdl_freqs = 8
mdl_freqs = 1e9 * NP.linspace(0.99 * chans.min(), 1.01 * chans.max(), n_mdl_freqs)
if nside is None:
bl_length = NP.sqrt(NP.sum(arrayinfo['bl']**2, axis=1))
u_max = bl_length.max() * 1e9 * chans.max() / FCNST.c
angres = 1 / u_max # radians
nside = 1
hpxres = HP.nside2resol(nside)
while hpxres > 0.5 * angres:
nside *= 2
hpxres = HP.nside2resol(nside)
skymod = SM.diffuse_radio_sky_model(mdl_freqs, gsmversion=sky_str, nside=nside, ind=None, outfile=None, parallel=skymod_parallel)
elif use_USM:
dsm_file = DSM_file_prefix+'_150.0_MHz_nside_{0:0d}.fits'.format(nside)
# dsm_file = DSM_file_prefix+'_{0:.1f}_MHz_nside_{1:0d}.fits'.format(freq*1e-6, nside)
hdulist = fits.open(dsm_file)
pixres = hdulist[0].header['PIXAREA']
dsm_table = hdulist[1].data
ra_deg = dsm_table['RA']
dec_deg = dsm_table['DEC']
temperatures = dsm_table['T_{0:.0f}'.format(150.0)]
# temperatures = dsm_table['T_{0:.0f}'.format(freq/1e6)]
avg_temperature = NP.mean(temperatures)
fluxes_DSM = temperatures * 2.0 * FCNST.k * (150e6/FCNST.c)**2 * pixres / CNST.Jy
# fluxes_USM = avg_temperature * (2.0 * FCNST.k * freq**2 / FCNST.c**2) * pixres / CNST.Jy * NP.ones(temperatures.size)
spindex = NP.zeros(fluxes_USM.size)
freq_USM = 0.150 # in GHz
# freq_USM = 0.185 # in GHz
freq_catalog = freq_USM * 1e9 + NP.zeros(fluxes_USM.size)
catlabel = NP.repeat('USM', fluxes_USM.size)
majax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_USM.size)
minax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_USM.size)
hdulist.close()
flux_unit = 'Jy'
spec_type = 'func'
spec_parms = {}
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes_USM
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
skymod_init_parms = {'name': catlabel, 'frequency': chans*1e9, 'location': NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'spec_type': spec_type, 'spec_parms': spec_parms, 'src_shape': NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), 'src_shape_units': ['degree','degree','degree']}
skymod = SM.SkyModel(init_parms=skymod_init_parms, init_file=None)
elif use_noise:
pixres = HP.nside2pixarea(nside)
npix = HP.nside2npix(nside)
theta, phi = HP.pix2ang(nside, NP.arange(npix))
dec = NP.pi/2 - theta
flux_unit = 'Jy'
spec_type = 'spectrum'
majax = NP.degrees(HP.nside2resol(nside)) * NP.ones(npix)
minax = NP.degrees(HP.nside2resol(nside)) * NP.ones(npix)
skyspec = NP.random.randn(npix,chans.size) * (2.0 * FCNST.k * (1e9*chans.reshape(1,-1) / FCNST.c)**2) * pixres / CNST.Jy
spec_parms = {}
catlabel = 'noise-sky'
skymod_init_parms = {'name': catlabel, 'frequency': chans*1e9, 'location': NP.hstack((NP.degrees(phi).reshape(-1,1), NP.degrees(dec).reshape(-1,1))), 'spec_type': spec_type, 'spec_parms': spec_parms, 'spectrum': skyspec, 'src_shape': NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(npix).reshape(-1,1))), 'src_shape_units': ['degree','degree','degree']}
skymod = SM.SkyModel(init_parms=skymod_init_parms, init_file=None)
elif use_CSM:
freq_SUMSS = 0.843 # in GHz
catalog = NP.loadtxt(SUMSS_file, usecols=(0,1,2,3,4,5,10,12,13,14,15,16))
ra_deg_SUMSS = 15.0 * (catalog[:,0] + catalog[:,1]/60.0 + catalog[:,2]/3.6e3)
dec_dd = NP.loadtxt(SUMSS_file, usecols=(3,), dtype="|S3")
sgn_dec_str = NP.asarray([dec_dd[i][0] for i in range(dec_dd.size)])
sgn_dec = 1.0*NP.ones(dec_dd.size)
sgn_dec[sgn_dec_str == '-'] = -1.0
dec_deg_SUMSS = sgn_dec * (NP.abs(catalog[:,3]) + catalog[:,4]/60.0 + catalog[:,5]/3.6e3)
fmajax = catalog[:,7]
fminax = catalog[:,8]
fpa = catalog[:,9]
dmajax = catalog[:,10]
dminax = catalog[:,11]
PS_ind = NP.logical_and(dmajax == 0.0, dminax == 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[PS_ind]
dec_deg_SUMSS = dec_deg_SUMSS[PS_ind]
fint = catalog[PS_ind,6] * 1e-3
if spindex_seed is None:
spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)
else:
NP.random.seed(spindex_seed)
spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)
fmajax = fmajax[PS_ind]
fminax = fminax[PS_ind]
fpa = fpa[PS_ind]
dmajax = dmajax[PS_ind]
dminax = dminax[PS_ind]
if fluxcut_max is None:
select_SUMSS_source_ind = fint >= fluxcut_min * (freq_SUMSS*1e9/fluxcut_freq)**spindex_SUMSS
else:
select_SUMSS_source_ind = NP.logical_and(fint >= fluxcut_min * (freq_SUMSS*1e9/fluxcut_freq)**spindex_SUMSS, fint <= fluxcut_max * (freq_SUMSS*1e9/fluxcut_freq)**spindex_SUMSS)
if NP.sum(select_SUMSS_source_ind) > 0:
ra_deg_SUMSS = ra_deg_SUMSS[select_SUMSS_source_ind]
dec_deg_SUMSS = dec_deg_SUMSS[select_SUMSS_source_ind]
fint = fint[select_SUMSS_source_ind]
fmajax = fmajax[select_SUMSS_source_ind]
fminax = fminax[select_SUMSS_source_ind]
fpa = fpa[select_SUMSS_source_ind]
dmajax = dmajax[select_SUMSS_source_ind]
dminax = dminax[select_SUMSS_source_ind]
spindex_SUMSS = spindex_SUMSS[select_SUMSS_source_ind]
valid_ind = NP.logical_and(fmajax > 0.0, fminax > 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[valid_ind]
dec_deg_SUMSS = dec_deg_SUMSS[valid_ind]
fint = fint[valid_ind]
fmajax = fmajax[valid_ind]
fminax = fminax[valid_ind]
fpa = fpa[valid_ind]
spindex_SUMSS = spindex_SUMSS[valid_ind]
freq_catalog = freq_SUMSS*1e9 + NP.zeros(fint.size)
catlabel = NP.repeat('SUMSS', fint.size)
ra_deg = ra_deg_SUMSS + 0.0
dec_deg = dec_deg_SUMSS
spindex = spindex_SUMSS
majax = fmajax/3.6e3
minax = fminax/3.6e3
fluxes = fint + 0.0
freq_NVSS = 1.4 # in GHz
hdulist = fits.open(NVSS_file)
ra_deg_NVSS = hdulist[1].data['RA(2000)']
dec_deg_NVSS = hdulist[1].data['DEC(2000)']
nvss_fpeak = hdulist[1].data['PEAK INT']
nvss_majax = hdulist[1].data['MAJOR AX']
nvss_minax = hdulist[1].data['MINOR AX']
hdulist.close()
if spindex_seed is None:
spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)
else:
NP.random.seed(2*spindex_seed)
spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)
not_in_SUMSS_ind = dec_deg_NVSS > -30.0
# not_in_SUMSS_ind = NP.logical_and(dec_deg_NVSS > -30.0, dec_deg_NVSS <= min(90.0, latitude+90.0))
if fluxcut_max is None:
select_source_ind = nvss_fpeak >= fluxcut_min * (freq_NVSS*1e9/fluxcut_freq)**spindex_NVSS
else:
select_source_ind = NP.logical_and(nvss_fpeak >= fluxcut_min * (freq_NVSS*1e9/fluxcut_freq)**spindex_NVSS, nvss_fpeak <= fluxcut_max * (freq_NVSS*1e9/fluxcut_freq)**spindex_NVSS)
if NP.sum(select_source_ind) == 0:
raise IndexError('No sources in the catalog found satisfying flux threshold criteria')
# select_source_ind = nvss_fpeak >= 10.0 * (freq_NVSS*1e9/freq)**(spindex_NVSS)
PS_ind = NP.sqrt(nvss_majax**2-(0.75/60.0)**2) < 14.0/3.6e3
count_valid = NP.sum(NP.logical_and(NP.logical_and(not_in_SUMSS_ind, select_source_ind), PS_ind))
if count_valid > 0:
nvss_fpeak = nvss_fpeak[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, select_source_ind), PS_ind)]
if NP.sum(select_SUMSS_source_ind) > 0:
freq_catalog = NP.concatenate((freq_catalog, freq_NVSS*1e9 + NP.zeros(count_valid)))
catlabel = NP.concatenate((catlabel, NP.repeat('NVSS',count_valid)))
ra_deg = NP.concatenate((ra_deg, ra_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, select_source_ind), PS_ind)]))
dec_deg = NP.concatenate((dec_deg, dec_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, select_source_ind), PS_ind)]))
spindex = NP.concatenate((spindex, spindex_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, select_source_ind), PS_ind)]))
majax = NP.concatenate((majax, nvss_majax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, select_source_ind), PS_ind)]))
minax = NP.concatenate((minax, nvss_minax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, select_source_ind), PS_ind)]))
fluxes = NP.concatenate((fluxes, nvss_fpeak))
else:
freq_catalog = freq_NVSS*1e9 + NP.zeros(count_valid)
catlabel = NP.repeat('NVSS',count_valid)
ra_deg = ra_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, select_source_ind), PS_ind)]
dec_deg = dec_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, select_source_ind), PS_ind)]
spindex = spindex_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, select_source_ind), PS_ind)]
majax = nvss_majax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, select_source_ind), PS_ind)]
minax = nvss_minax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, select_source_ind), PS_ind)]
fluxes = nvss_fpeak
elif NP.sum(select_SUMSS_source_ind) == 0:
raise IndexError('No sources in the catalog found satisfying flux threshold criteria')
spec_type = 'func'
spec_parms = {}
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
flux_unit = 'Jy'
skymod_init_parms = {'name': catlabel, 'frequency': chans*1e9, 'location': NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'spec_type': spec_type, 'spec_parms': spec_parms, 'src_shape': NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), 'src_shape_units': ['degree','degree','degree']}
skymod = SM.SkyModel(init_parms=skymod_init_parms, init_file=None)
elif use_SUMSS:
freq_SUMSS = 0.843 # in GHz
catalog = NP.loadtxt(SUMSS_file, usecols=(0,1,2,3,4,5,10,12,13,14,15,16))
ra_deg = 15.0 * (catalog[:,0] + catalog[:,1]/60.0 + catalog[:,2]/3.6e3)
dec_dd = NP.loadtxt(SUMSS_file, usecols=(3,), dtype="|S3")
sgn_dec_str = NP.asarray([dec_dd[i][0] for i in range(dec_dd.size)])
sgn_dec = 1.0*NP.ones(dec_dd.size)
sgn_dec[sgn_dec_str == '-'] = -1.0
dec_deg = sgn_dec * (NP.abs(catalog[:,3]) + catalog[:,4]/60.0 + catalog[:,5]/3.6e3)
fmajax = catalog[:,7]
fminax = catalog[:,8]
fpa = catalog[:,9]
dmajax = catalog[:,10]
dminax = catalog[:,11]
PS_ind = NP.logical_and(dmajax == 0.0, dminax == 0.0)
ra_deg = ra_deg[PS_ind]
dec_deg = dec_deg[PS_ind]
fint = catalog[PS_ind,6] * 1e-3
if spindex_seed is None:
spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)
else:
NP.random.seed(spindex_seed)
spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)
fmajax = fmajax[PS_ind]
fminax = fminax[PS_ind]
fpa = fpa[PS_ind]
dmajax = dmajax[PS_ind]
dminax = dminax[PS_ind]
if fluxcut_max is None:
select_source_ind = fint >= fluxcut_min * (freq_SUMSS*1e9/fluxcut_freq)**spindex_SUMSS
else:
select_source_ind = NP.logical_and(fint >= fluxcut_min * (freq_SUMSS*1e9/fluxcut_freq)**spindex_SUMSS, fint <= fluxcut_max * (freq_SUMSS*1e9/fluxcut_freq)**spindex_SUMSS)
if NP.sum(select_source_ind) == 0:
raise IndexError('No sources in the catalog found satisfying flux threshold criteria')
ra_deg = ra_deg[select_source_ind]
dec_deg = dec_deg[select_source_ind]
fint = fint[select_source_ind]
fmajax = fmajax[select_source_ind]
fminax = fminax[select_source_ind]
fpa = fpa[select_source_ind]
dmajax = dmajax[select_source_ind]
dminax = dminax[select_source_ind]
spindex_SUMSS = spindex_SUMSS[select_source_ind]
valid_ind = NP.logical_and(fmajax > 0.0, fminax > 0.0)
ra_deg = ra_deg[valid_ind]
dec_deg = dec_deg[valid_ind]
fint = fint[valid_ind]
fmajax = fmajax[valid_ind]
fminax = fminax[valid_ind]
fpa = fpa[valid_ind]
spindex_SUMSS = spindex_SUMSS[valid_ind]
freq_catalog = freq_SUMSS*1e9 + NP.zeros(fint.size)
catlabel = NP.repeat('SUMSS', fint.size)
spindex = spindex_SUMSS
majax = fmajax/3.6e3
minax = fminax/3.6e3
fluxes = fint + 0.0
spec_type = 'func'
spec_parms = {}
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
spec_parms['freq-ref'] = freq_catalog
spec_parms['flux-scale'] = fint
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = 1.0e-3 + NP.zeros(ra_deg.size)
flux_unit = 'Jy'
skymod_init_parms = {'name': catlabel, 'frequency': chans*1e9, 'location': NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'spec_type': spec_type, 'spec_parms': spec_parms, 'src_shape': NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), 'src_shape_units': ['degree','degree','degree']}
skymod = SM.SkyModel(init_parms=skymod_init_parms, init_file=None)
elif use_NVSS:
freq_NVSS = 1.4 # in GHz
hdulist = fits.open(NVSS_file)
ra_deg_NVSS = hdulist[1].data['RA(2000)']
dec_deg_NVSS = hdulist[1].data['DEC(2000)']
nvss_fpeak = hdulist[1].data['PEAK INT']
nvss_majax = hdulist[1].data['MAJOR AX']
nvss_minax = hdulist[1].data['MINOR AX']
hdulist.close()
if spindex_seed is None:
spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)
else:
NP.random.seed(2*spindex_seed)
spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)
if fluxcut_max is None:
select_source_ind = nvss_fpeak >= fluxcut_min * (freq_NVSS*1e9/fluxcut_freq)**spindex_NVSS
else:
select_source_ind = NP.logical_and(nvss_fpeak >= fluxcut_min * (freq_NVSS*1e9/fluxcut_freq)**spindex_NVSS, nvss_fpeak <= fluxcut_max * (freq_NVSS*1e9/fluxcut_freq)**spindex_NVSS)
if NP.sum(select_source_ind) == 0:
raise IndexError('No sources in the catalog found satisfying flux threshold criteria')
# select_source_ind = nvss_fpeak >= 10.0 * (freq_NVSS*1e9/freq)**(spindex_NVSS)
PS_ind = NP.sqrt(nvss_majax**2-(0.75/60.0)**2) < 14.0/3.6e3
count_valid = NP.sum(NP.logical_and(select_source_ind, PS_ind))
if count_valid > 0:
nvss_fpeak = nvss_fpeak[NP.logical_and(select_source_ind, PS_ind)]
freq_catalog = freq_NVSS*1e9 + NP.zeros(count_valid)
catlabel = NP.repeat('NVSS',count_valid)
ra_deg = ra_deg_NVSS[NP.logical_and(select_source_ind, PS_ind)]
dec_deg = dec_deg_NVSS[NP.logical_and(select_source_ind, PS_ind)]
spindex = spindex_NVSS[NP.logical_and(select_source_ind, PS_ind)]
majax = nvss_majax[NP.logical_and(select_source_ind, PS_ind)]
minax = nvss_minax[NP.logical_and(select_source_ind, PS_ind)]
fluxes = nvss_fpeak
else:
raise IndexError('No sources in the catalog found satisfying flux threshold and point source criteria')
spec_type = 'func'
spec_parms = {}
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
spec_parms['freq-ref'] = freq_catalog
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
flux_unit = 'Jy'
skymod_init_parms = {'name': catlabel, 'frequency': chans*1e9, 'location': NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'spec_type': spec_type, 'spec_parms': spec_parms, 'src_shape': NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), 'src_shape_units': ['degree','degree','degree']}
skymod = SM.SkyModel(init_parms=skymod_init_parms, init_file=None)
elif use_MSS:
pass
elif use_GLEAM:
reffreq = parms['skyparm']['custom_reffreq']
hdulist = fits.open(GLEAM_file)
colnames = [col.name for col in hdulist[1].columns if ('int_flux_' in col.name and 'err' not in col.name and 'fit' not in col.name and 'wide' not in col.name)]
colfreqs = NP.char.lstrip(colnames, 'int_flux_').astype(NP.float)
nearest_freq_ind = NP.argmin(NP.abs(colfreqs - reffreq*1e3))
freq_GLEAM = colfreqs[nearest_freq_ind] / 1e3 # in GHz
ra_deg_GLEAM = hdulist[1].data['RAJ2000']
dec_deg_GLEAM = hdulist[1].data['DEJ2000']
gleam_fint = hdulist[1].data[colnames[nearest_freq_ind]]
gleam_majax = 2 * hdulist[1].data['a_wide'] # Factor 2 to convert from semi-major axis to FWHM
gleam_minax = 2 * hdulist[1].data['b_wide'] # Factor 2 to convert from semi-minor axis to FWHM
gleam_pa = hdulist[1].data['pa_wide']
gleam_psf_majax = 2 * hdulist[1].data['psf_a_wide'] # Factor 2 to convert from semi-major axis to FWHM
gleam_psf_minax = 2 * hdulist[1].data['psf_b_wide'] # Factor 2 to convert from semi-minor axis to FWHM
spindex_GLEAM = hdulist[1].data['alpha']
hdulist.close()
nanind = NP.where(NP.isnan(spindex_GLEAM))[0]
if nanind.size > 0:
if spindex_seed is not None:
NP.random.seed(2*spindex_seed)
spindex_GLEAM = spindex + spindex_rms * NP.random.randn(gleam_fint.size)
if fluxcut_max is None:
select_source_ind = gleam_fint >= fluxcut_min * (freq_GLEAM*1e9/fluxcut_freq)**spindex_GLEAM
else:
select_source_ind = NP.logical_and(gleam_fint >= fluxcut_min * (freq_GLEAM*1e9/fluxcut_freq)**spindex_GLEAM, gleam_fint <= fluxcut_max * (freq_GLEAM*1e9/fluxcut_freq)**spindex_GLEAM)
if NP.sum(select_source_ind) == 0:
raise IndexError('No sources in the catalog found satisfying flux threshold criteria')
# bright_source_ind = gleam_fint >= 10.0 * (freq_GLEAM*1e9/freq)**spindex_GLEAM
PS_ind = NP.ones(gleam_fint.size, dtype=NP.bool)
# PS_ind = gleam_majax * gleam_minax <= 1.1 * gleam_psf_majax * gleam_psf_minax
valid_ind = NP.logical_and(select_source_ind, PS_ind)
ra_deg_GLEAM = ra_deg_GLEAM[valid_ind]
dec_deg_GLEAM = dec_deg_GLEAM[valid_ind]
gleam_fint = gleam_fint[valid_ind]
spindex_GLEAM = spindex_GLEAM[valid_ind]
gleam_majax = gleam_majax[valid_ind]
gleam_minax = gleam_minax[valid_ind]
gleam_pa = gleam_pa[valid_ind]
fluxes = gleam_fint + 0.0
catlabel = NP.repeat('GLEAM', gleam_fint.size)
ra_deg = ra_deg_GLEAM + 0.0
dec_deg = dec_deg_GLEAM + 0.0
freq_catalog = freq_GLEAM*1e9 + NP.zeros(gleam_fint.size)
majax = gleam_majax / 3.6e3
minax = gleam_minax / 3.6e3
spindex = spindex_GLEAM + 0.0
spec_type = 'func'
spec_parms = {}
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fluxes
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
flux_unit = 'Jy'
skymod_init_parms = {'name': catlabel, 'frequency': chans*1e9, 'location': NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'spec_type': spec_type, 'spec_parms': spec_parms, 'src_shape': NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), 'src_shape_units': ['degree','degree','degree']}
skymod = SM.SkyModel(init_parms=skymod_init_parms, init_file=None)
elif use_skymod:
skymod = SM.SkyModel(init_parms=None, init_file=skymod_file)
elif use_custom:
catdata = ascii.read(custom_catalog_file, comment='#', header_start=0, data_start=1)
ra_deg = catdata['RA'].data
dec_deg = catdata['DEC'].data
fint = catdata['F_INT'].data
spindex = catdata['SPINDEX'].data
majax = catdata['MAJAX'].data
minax = catdata['MINAX'].data
pa = catdata['PA'].data
freq_custom = parms['skyparm']['custom_reffreq']
freq_catalog = freq_custom * 1e9 + NP.zeros(fint.size)
catlabel = NP.repeat('custom', fint.size)
if fluxcut_max is None:
select_source_ind = fint >= fluxcut_min * (freq_custom*1e9/fluxcut_freq)**spindex
else:
select_source_ind = NP.logical_and(fint >= fluxcut_min * (freq_custom*1e9/fluxcut_freq)**spindex, fint <= fluxcut_max * (freq_custom*1e9/fluxcut_freq)**spindex)
if NP.sum(select_source_ind) == 0:
raise IndexError('No sources in the catalog found satisfying flux threshold criteria')
ra_deg = ra_deg[select_source_ind]
dec_deg = dec_deg[select_source_ind]
fint = fint[select_source_ind]
spindex = spindex[select_source_ind]
majax = majax[select_source_ind]
minax = minax[select_source_ind]
pa = pa[select_source_ind]
freq_catalog = freq_catalog[select_source_ind]
catlabel = catlabel[select_source_ind]
spec_type = 'func'
spec_parms = {}
spec_parms['name'] = NP.repeat('power-law', ra_deg.size)
spec_parms['power-law-index'] = spindex
spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)
spec_parms['flux-scale'] = fint
spec_parms['flux-offset'] = NP.zeros(ra_deg.size)
spec_parms['freq-width'] = NP.zeros(ra_deg.size)
flux_unit = 'Jy'
skymod_init_parms = {'name': catlabel, 'frequency': chans*1e9, 'location': NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'spec_type': spec_type, 'spec_parms': spec_parms, 'src_shape': NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fint.size).reshape(-1,1))), 'src_shape_units': ['degree','degree','degree']}
skymod = SM.SkyModel(init_parms=skymod_init_parms, init_file=None)
# Precess Sky model to observing epoch
skycoords = SkyCoord(ra=skymod.location[:,0]*U.deg, dec=skymod.location[:,1]*U.deg, frame='fk5', equinox=Time(skymod.epoch, format='jyear_str', scale='utc')).transform_to(FK5(equinox=tobjs[0]))
skymod.location = NP.hstack((skycoords.ra.deg.reshape(-1,1), skycoords.dec.deg.reshape(-1,1)))
skymod.epoch = 'J{0:.12f}'.format(skycoords.equinox.jyear)
try:
os.makedirs(rootdir+project_dir+simid+skymod_dir, 0755)
except OSError as exception:
if exception.errno == errno.EEXIST and os.path.isdir(rootdir+project_dir+simid+skymod_dir):
pass
else:
raise
skymod_extfile = rootdir+project_dir+simid+skymod_dir+'skymodel'
skymod.save(skymod_extfile, fileformat='hdf5', extspec_action='unload')
else:
skymod_extfile = None
skycoords = None
skymod_extfile = comm.bcast(skymod_extfile, root=0)
skycoords = comm.bcast(skycoords, root=0)
if rank != 0:
skymod = SM.SkyModel(init_parms=None, init_file=skymod_extfile+'.hdf5', load_spectrum=False)
# Set up chunking for parallelization
if rank == 0:
m1, m2, d12 = GEOM.spherematch(pointings_radec[:,0], pointings_radec[:,1], skycoords.ra.deg, skycoords.dec.deg, matchrad=roi_radius, nnearest=0, maxmatches=0)
m1 = NP.asarray(m1)
m2 = NP.asarray(m2)
d12 = NP.asarray(d12)
m2_lol = [m2[NP.where(m1==j)[0]] for j in range(n_acc)]
nsrc_used = max([listitem.size for listitem in m2_lol])
else:
m2_lol = None
nsrc_used = None
m2_lol = comm.bcast(m2_lol, root=0)
nsrc_used = comm.bcast(nsrc_used, root=0)
nsrc = skymod.location.shape[0]
npol = 1
nbl = total_baselines
if gradient_mode is not None:
if gradient_mode.lower() == 'baseline':
size_DFT_matrix = 1.0 * max([nsrc_used, 1]) * nchan * nbl * npol * 3
else:
raise ValueError('Specified gradient_mode is currently not supported')
else:
size_DFT_matrix = 1.0 * max([nsrc_used, 1]) * nchan * nbl * npol
if memsave: # 64 bits per complex sample (single precision)
nbytes_per_complex_sample = 8.0
else: # 128 bits per complex sample (double precision)
nbytes_per_complex_sample = 16.0
memory_DFT_matrix = size_DFT_matrix * nbytes_per_complex_sample
memory_DFT_matrix_per_process = memory_DFT_matrix / nproc
memory_use_per_process = float(memuse) / nproc
n_chunks_per_process = NP.ceil(memory_DFT_matrix/memuse)
n_chunks = NP.ceil(nproc * n_chunks_per_process)
if mpi_on_src:
src_chunk_size = int(NP.floor(1.0 * nchan / n_chunks))
if src_chunk_size == 0:
raise MemoryError('Too many chunks to fit in usable memory. Try changing number of parallel processes and amount of usable memory. Usually reducing the former or increasing the latter should help avoid this problem.')
src_bin_indices = range(0, nsrc, src_chunk_size)
src_chunk = range(len(src_bin_indices))
n_src_chunks = len(src_bin_indices)
elif mpi_on_freq:
frequency_chunk_size = int(NP.floor(1.0 * nchan / n_chunks))
if frequency_chunk_size <= 1:
raise MemoryError('Too many chunks to fit in usable memory. Try changing number of parallel processes and amount of usable memory. Usually reducing the former or increasing the latter should help avoid this problem.')
frequency_bin_indices = range(0, nchan, frequency_chunk_size)
if frequency_bin_indices[-1] == nchan-1:
if frequency_chunk_size > 2:
frequency_bin_indices[-1] -= 1
else:
warnings.warn('Chunking has run into a weird indexing problem. Rechunking is necessaray. Try changing number of parallel processes and amount of usable memory. Usually reducing either one of these should help avoid this problem.')
PDB.set_trace()
freq_chunk = range(len(frequency_bin_indices))
n_freq_chunks = len(frequency_bin_indices)
n_freq_chunk_per_rank = NP.zeros(nproc, dtype=int) + len(freq_chunk)/nproc
if len(freq_chunk) % nproc > 0:
n_freq_chunk_per_rank[:len(freq_chunk)%nproc] += 1
n_freq_chunk_per_rank = n_freq_chunk_per_rank[::-1] # Reverse for more equal distribution of chunk sizes over processes
cumm_freq_chunks = NP.concatenate(([0], NP.cumsum(n_freq_chunk_per_rank)))
else:
baseline_chunk_size = int(NP.floor(1.0 * nbl / n_chunks))
if baseline_chunk_size == 0:
raise MemoryError('Too many chunks to fit in usable given memory. Try changing number of parallel processes and amount of usable memory. Usually reducing the former or increasing the latter should help avoid this problem.')
baseline_bin_indices = range(0, nbl, baseline_chunk_size)
if baseline_bin_indices[-1] == nchan-1:
if baseline_chunk_size > 2:
baseline_bin_indices[-1] -= 1
else:
warnings.warn('Chunking has run into a weird indexing problem. Rechunking is necessaray. Try changing number of parallel processes and amount of usable memory. Usually reducing either one of these should help avoind this problem.')
PDB.set_trace()
bl_chunk = range(len(baseline_bin_indices))
n_bl_chunks = len(baseline_bin_indices)
n_bl_chunk_per_rank = NP.zeros(nproc, dtype=int) + len(bl_chunk)/nproc
if len(bl_chunk) % nproc > 0:
n_bl_chunk_per_rank[:len(bl_chunk)%nproc] += 1
n_bl_chunk_per_rank = n_bl_chunk_per_rank[::-1] # Reverse for more equal distribution of chunk sizes over processes
cumm_bl_chunks = NP.concatenate(([0], NP.cumsum(n_bl_chunk_per_rank)))
if rank == 0:
if mpi_on_freq:
chunkinfo = {'mpi_axis': 'frequency', 'naxis': nchan, 'nchunks': n_freq_chunks, 'chunk_size': frequency_chunk_size, 'nchunk_per_proc': float(NP.mean(n_freq_chunk_per_rank))}
if mpi_on_bl:
chunkinfo = {'mpi_axis': 'baseline', 'naxis': nbl, 'nchunks': n_bl_chunks, 'chunk_size': baseline_chunk_size, 'nchunk_per_proc': float(NP.mean(n_bl_chunk_per_rank))}
chunkinfo['nproc'] = nproc
chunkfile = rootdir+project_dir+simid+meta_dir+'chunkinfo.yaml'
with open(chunkfile, 'w') as cfile:
yaml.dump(chunkinfo, cfile, default_flow_style=False)
## Set up the observing run
if rank == 0:
pbinfo = None
process_complete = False
if mpi_on_src: # MPI based on source multiplexing
for i in range(len(bl_chunk)):
print('Working on baseline chunk # {0:0d} ...'.format(bl_chunk[i]))
ia = RI.InterferometerArray(labels[baseline_bin_indices[bl_chunk[i]]:min(baseline_bin_indices[bl_chunk[i]]+baseline_chunk_size,total_baselines)], bl[baseline_bin_indices[bl_chunk[i]]:min(baseline_bin_indices[bl_chunk[i]]+baseline_chunk_size,total_baselines),:], chans, telescope=telescope, eff_Q=eff_Q, latitude=latitude, longitude=longitude, altitude=altitude, A_eff=A_eff, layout=layout_info, freq_scale='GHz', pointing_coords='hadec', gaininfo=gaininfo, blgroupinfo={'groups': blgroups, 'reversemap': bl_reversemap})
if store_prev_sky:
store_prev_skymodel_file=rootdir+project_dir+simid+roi_dir+'_{0:0d}.hdf5'.format(i)
else:
store_prev_skymodel_file = None
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(), PGB.ETA()], maxval=n_acc).start()
for j in range(n_acc):
src_altaz = skycoords[m2_lol[j]].transform_to(AltAz(obstime=tobjs[j], location=EarthLocation(lon=telescope['longitude']*U.deg, lat=telescope['latitude']*U.deg, height=telescope['altitude']*U.m)))
src_altaz_current = NP.hstack((src_altaz.alt.deg.reshape(-1,1), src_altaz.az.deg.reshape(-1,1)))
roi_ind = NP.where(src_altaz_current[:,0] >= 0.0)[0]
n_src_per_rank = NP.zeros(nproc, dtype=int) + roi_ind.size/nproc
if roi_ind.size % nproc > 0:
n_src_per_rank[:roi_ind.size % nproc] += 1
cumm_src_count = NP.concatenate(([0], NP.cumsum(n_src_per_rank)))
pbinfo = None
if (telescope_id.lower() == 'mwa') or (telescope_id.lower() == 'mwa_tools') or (phased_array):
pbinfo = {}
pbinfo['delays'] = delays[j,:]
if (telescope_id.lower() == 'mwa') or (phased_array):
pbinfo['delayerr'] = phasedarray_delayerr
pbinfo['gainerr'] = phasedarray_gainerr
pbinfo['nrand'] = nrand
ts = time.time()
if j == 0:
ts0 = ts
ia.observe(tobjs[i], Tsysinfo, bpass, pointings_hadec[j,:], skymod.subset(m2_lol[j][roi_ind[cumm_src_count[rank]:cumm_src_count[rank+1]]].tolist(), axis='position'), t_acc[j], pb_info=pbinfo, brightness_units=flux_unit, bpcorrect=noise_bpcorr, roi_radius=roi_radius, roi_center=None, gradient_mode=gradient_mode, memsave=memsave, vmemavail=pvmemavail, store_prev_skymodel_file=store_prev_skymodel_file)
te = time.time()
progress.update(j+1)
progress.finish()
if rank == 0:
for k in range(1,nproc):
print('receiving from process {0}'.format(k))
ia.skyvis_freq = ia.skyvis_freq + comm.recv(source=k)
te0 = time.time()
print('Time on process 0 was {0:.1f} seconds'.format(te0-ts0))
ia.t_obs = t_obs
ia.delay_transform(oversampling_factor-1.0, freq_wts=window)
outfile = rootdir+project_dir+simid+sim_dir+'_part_{0:0d}'.format(i)
ia.save(outfile, fmt=savefmt, verbose=True, tabtype='BinTableHDU', npz=False, overwrite=True, uvfits_parms=None)
else:
comm.send(ia.skyvis_freq, dest=0)
elif mpi_on_freq: # MPI based on frequency multiplexing
for k in range(n_sky_sectors):
if n_sky_sectors == 1:
sky_sector_str = '_all_sky_'
else:
sky_sector_str = '_sky_sector_{0:0d}_'.format(k)
if rank == 0: # Compute ROI parameters for only one process and broadcast to all
roi = RI.ROI_parameters()
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(marker='-', left=' |', right='| '), PGB.Counter(), '/{0:0d} Snapshots '.format(n_acc), PGB.ETA()], maxval=n_acc).start()
for j in range(n_acc):
if m2_lol[j].size > 0:
src_altaz = skycoords[m2_lol[j]].transform_to(AltAz(obstime=tobjs[j], location=EarthLocation(lon=telescope['longitude']*U.deg, lat=telescope['latitude']*U.deg, height=telescope['altitude']*U.m)))
src_altaz_current = NP.hstack((src_altaz.alt.deg.reshape(-1,1), src_altaz.az.deg.reshape(-1,1)))
hemisphere_current = src_altaz_current[:,0] >= 0.0
src_az_current = NP.copy(src_altaz_current[:,1])
src_az_current[src_az_current > 360.0 - 0.5*180.0/n_sky_sectors] -= 360.0
roi_ind = NP.logical_or(NP.logical_and(src_az_current >= -0.5*180.0/n_sky_sectors + k*180.0/n_sky_sectors, src_az_current < -0.5*180.0/n_sky_sectors + (k+1)*180.0/n_sky_sectors), NP.logical_and(src_az_current >= 180.0 - 0.5*180.0/n_sky_sectors + k*180.0/n_sky_sectors, src_az_current < 180.0 - 0.5*180.0/n_sky_sectors + (k+1)*180.0/n_sky_sectors))
roi_subset = NP.where(NP.logical_and(hemisphere_current, roi_ind))[0].tolist()
# src_dircos_current_subset = GEOM.altaz2dircos(src_altaz_current[roi_subset,:], units='degrees')
pbinfo = {}
if (telescope_id.lower() == 'mwa') or (phased_array) or (telescope_id.lower() == 'mwa_tools'):
if pointing_file is not None:
pbinfo['delays'] = delays[j,:]
else:
pbinfo['pointing_center'] = pointings_altaz[j,:]
pbinfo['pointing_coords'] = 'altaz'
if (telescope_id.lower() == 'mwa') or (phased_array):
pbinfo['delayerr'] = phasedarray_delayerr
pbinfo['gainerr'] = phasedarray_gainerr
pbinfo['nrand'] = nrand
else:
pbinfo['pointing_center'] = pointings_altaz[j,:]
pbinfo['pointing_coords'] = 'altaz'
roiinfo = {}
roiinfo['ind'] = NP.asarray(m2_lol[j][roi_subset])
if use_external_beam:
theta_phi = NP.hstack((NP.pi/2-NP.radians(src_altaz_current[roi_subset,0]).reshape(-1,1), NP.radians(src_altaz_current[roi_subset,1]).reshape(-1,1)))
if beam_chromaticity:
interp_logbeam = OPS.healpix_interp_along_axis(NP.log10(external_beam), theta_phi=theta_phi, inloc_axis=external_beam_freqs, outloc_axis=chans*1e9, axis=1, kind=pbeam_spec_interp_method, assume_sorted=True)
else:
nearest_freq_ind = NP.argmin(NP.abs(external_beam_freqs - select_beam_freq))
interp_logbeam = OPS.healpix_interp_along_axis(NP.log10(NP.repeat(external_beam[:,nearest_freq_ind].reshape(-1,1), chans.size, axis=1)), theta_phi=theta_phi, inloc_axis=chans*1e9, outloc_axis=chans*1e9, axis=1, assume_sorted=True)
interp_logbeam_max = NP.nanmax(interp_logbeam, axis=0)
interp_logbeam_max[interp_logbeam_max <= 0.0] = 0.0
interp_logbeam_max = interp_logbeam_max.reshape(1,-1)
interp_logbeam = interp_logbeam - interp_logbeam_max
roiinfo['pbeam'] = 10**interp_logbeam
else:
roiinfo['pbeam'] = None
roiinfo['pbeam_chromaticity'] = beam_chromaticity
roiinfo['pbeam_reffreq'] = select_beam_freq
roiinfo['radius'] = roi_radius
# roiinfo_center_altaz = AltAz(alt=NP.asarray([90.0])*U.deg, az=NP.asarray([270.0])*U.deg, obstime=tobjs[j], location=EarthLocation(lon=telescope['longitude']*U.deg, lat=telescope['latitude']*U.deg, height=telescope['altitude']*U.m))
roiinfo_center_hadec = GEOM.altaz2hadec(NP.asarray([90.0, 270.0]).reshape(1,-1), latitude, units='degrees').ravel() # Seems to be a hard-coding of ROI center to zenith, but that's only to determine the sources in the upper hemisphere
roiinfo_center_radec = [lst[j]-roiinfo_center_hadec[0], roiinfo_center_hadec[1]]
# roiinfo_center_radec = ET.altaz2radec(NP.asarray([90.0, 270.0]).reshape(1,-1), EarthLocation(lon=telescope['longitude']*U.deg, lat=telescope['latitude']*U.deg, height=telescope['altitude']*U.m), obstime=tobjs[j], epoch_RA=tobjs[j])
roiinfo['center'] = NP.asarray(roiinfo_center_radec).reshape(1,-1)
roiinfo['center_coords'] = 'radec'
roi.append_settings(skymod, chans, pinfo=pbinfo, lst=lst[j], time_jd=tobjs[j].jd, roi_info=roiinfo, telescope=telescope, freq_scale='GHz')
else: # Empty sky
roi.append_settings(None, chans, telescope=telescope, freq_scale='GHz')
progress.update(j+1)
progress.finish()
roifile = rootdir+project_dir+simid+roi_dir+'roiinfo'
roi.save(roifile, tabtype='BinTableHDU', overwrite=True, verbose=True)
del roi # to save memory if primary beam arrays or n_acc are large
else:
roi = None
pbinfo = None
roifile = None
roifile = comm.bcast(roifile, root=0) # Broadcast saved RoI filename
pbinfo = comm.bcast(pbinfo, root=0) # Broadcast PB synthesis info
frequency_bin_indices_bounds = frequency_bin_indices + [nchan]
for i in range(cumm_freq_chunks[rank], cumm_freq_chunks[rank+1]):
print('Process {0:0d} working on frequency chunk # {1:0d} ... ({2:0d}/{3:0d})'.format(rank, freq_chunk[i], i-cumm_freq_chunks[rank]+1, n_freq_chunk_per_rank[rank]))
chans_chunk_indices = NP.arange(frequency_bin_indices_bounds[i], frequency_bin_indices_bounds[i+1])
chans_chunk = NP.asarray(chans[chans_chunk_indices]).reshape(-1)
nchan_chunk = chans_chunk.size
f0_chunk = NP.mean(chans_chunk)
bw_chunk_str = '{0:0d}x{1:.1f}_kHz'.format(nchan_chunk, freq_resolution/1e3)
outfile = rootdir+project_dir+simid+sim_dir+'_part_{0:0d}'.format(i)
ia = RI.InterferometerArray(labels, bl, chans_chunk, telescope=telescope, eff_Q=eff_Q, latitude=latitude, longitude=longitude, altitude=altitude, A_eff=A_eff, layout=layout_info, freq_scale='GHz', pointing_coords='hadec', gaininfo=gaininfo, blgroupinfo={'groups': blgroups, 'reversemap': bl_reversemap})
if store_prev_sky:
store_prev_skymodel_file=rootdir+project_dir+simid+roi_dir+'_{0:0d}.hdf5'.format(i)
else:
store_prev_skymodel_file = None
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(marker='-', left=' |', right='| '), PGB.Counter(), '/{0:0d} Snapshots '.format(n_acc), PGB.ETA()], maxval=n_acc).start()
for j in range(n_acc):
if m2_lol[j].size > 0:
roi_ind_snap = fits.getdata(roifile+'.fits', extname='IND_{0:0d}'.format(j), memmap=False)
roi_pbeam_snap = fits.getdata(roifile+'.fits', extname='PB_{0:0d}'.format(j), memmap=False)
roi_pbeam_snap = roi_pbeam_snap[:,chans_chunk_indices]
else:
roi_ind_snap = NP.asarray([])
roi_pbeam_snap = NP.asarray([])
roi_snap_info = {'ind': roi_ind_snap, 'pbeam': roi_pbeam_snap}
ts = time.time()
if j == 0:
ts0 = ts
ia.observe(tobjs[j], Tsysinfo, bpass[chans_chunk_indices], pointings_hadec[j,:], skymod, t_acc[j], pb_info=pbinfo, brightness_units=flux_unit, bpcorrect=noise_bpcorr[chans_chunk_indices], roi_info=roi_snap_info, roi_radius=roi_radius, roi_center=None, gradient_mode=gradient_mode, memsave=memsave, vmemavail=pvmemavail, store_prev_skymodel_file=store_prev_skymodel_file)
te = time.time()
del roi_ind_snap
del roi_pbeam_snap
progress.update(j+1)
numbytes = []
variables = []
var = None
obj = None
for var,obj in locals().iteritems():
if isinstance(obj, NP.ndarray):
variables += [var]
numbytes += [obj.nbytes]
nGB = NP.asarray(numbytes) / 2.0**30
totalmemGB = NP.sum(nGB)
progress.finish()
te0 = time.time()
print('Process {0:0d} took {1:.1f} minutes to complete frequency chunk # {2:0d} ({3:0d}/{4:0d})'.format(rank, (te0-ts0)/60.0, freq_chunk[i], i-cumm_freq_chunks[rank]+1, n_freq_chunk_per_rank[rank]))
if os.path.exists(store_prev_skymodel_file):
os.remove(store_prev_skymodel_file) # Remove the temporary skymodel file
ia.project_baselines(ref_point={'location': ia.pointing_center, 'coords': ia.pointing_coords})
ia.save(outfile, fmt=savefmt, verbose=True, tabtype='BinTableHDU', npz=False, overwrite=True, uvfits_parms=None)
else: # MPI based on baseline multiplexing
if mpi_async: # does not impose equal volume per process
print('Processing next baseline chunk asynchronously...')
processed_chunks = []
process_sequence = []
counter = my_MPI.Counter(comm)
count = -1
ptb = time.time()
ptb_str = str(DT.datetime.now())
while (count+1 < len(bl_chunk)):
count = counter.next()
if count < len(bl_chunk):
processed_chunks.append(count)
process_sequence.append(rank)
print('Process {0:0d} working on baseline chunk # {1:0d} ...'.format(rank, count))
outfile = rootdir+project_dir+simid+sim_dir+'_part_{0:0d}'.format(count)
ia = RI.InterferometerArray(labels[baseline_bin_indices[count]:min(baseline_bin_indices[count]+baseline_chunk_size,total_baselines)], bl[baseline_bin_indices[count]:min(baseline_bin_indices[count]+baseline_chunk_size,total_baselines),:], chans, telescope=telescope, eff_Q=eff_Q, latitude=latitude, longitude=longitude, altitude=altitude, A_eff=A_eff, layout=layout_info, freq_scale='GHz', pointing_coords='hadec', gaininfo=gaininfo, blgroupinfo={'groups': blgroups, 'reversemap': bl_reversemap})
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(), PGB.ETA()], maxval=n_acc).start()
for j in range(n_acc):
pbinfo = None
if (telescope_id.lower() == 'mwa') or (telescope_id.lower() == 'mwa_tools') or (phased_array):
pbinfo = {}
pbinfo['delays'] = delays[j,:]
if (telescope_id.lower() == 'mwa') or (phased_array):
pbinfo['delayerr'] = phasedarray_delayerr
pbinfo['gainerr'] = phasedarray_gainerr
pbinfo['nrand'] = nrand
ts = time.time()
if j == 0:
ts0 = ts
ia.observe(tobjs[j], Tsysinfo, bpass, pointings_hadec[j,:], skymod, t_acc[j], pb_info=pbinfo, brightness_units=flux_unit, bpcorrect=noise_bpcorr, roi_radius=roi_radius, roi_center=None, gradient_mode=gradient_mode, memsave=memsave, vmemavail=pvmemavail)
te = time.time()
progress.update(j+1)
progress.finish()
te0 = time.time()
print('Process {0:0d} took {1:.1f} minutes to complete baseline chunk # {2:0d}'.format(rank, (te0-ts0)/60.0, count))
ia.t_obs = t_obs
ia.delay_transform(oversampling_factor-1.0, freq_wts=window)
ia.save(outfile, fmt=savefmt, verbose=True, tabtype='BinTableHDU', npz=False, overwrite=True, uvfits_parms=None)
counter.free()
pte = time.time()
pte_str = str(DT.datetime.now())
pt = pte - ptb
processed_chunks = comm.allreduce(processed_chunks)
process_sequence = comm.allreduce(process_sequence)
else: # impose equal volume per process
ptb_str = str(DT.datetime.now())
for k in range(n_sky_sectors):
if n_sky_sectors == 1:
sky_sector_str = '_all_sky_'
else:
sky_sector_str = '_sky_sector_{0:0d}_'.format(k)
if rank == 0: # Compute ROI parameters for only one process and broadcast to all
roi = RI.ROI_parameters()
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(marker='-', left=' |', right='| '), PGB.Counter(), '/{0:0d} Snapshots '.format(n_acc), PGB.ETA()], maxval=n_acc).start()
for j in range(n_acc):
src_altaz = skycoords[m2_lol[j]].transform_to(AltAz(obstime=tobjs[j], location=EarthLocation(lon=telescope['longitude']*U.deg, lat=telescope['latitude']*U.deg, height=telescope['altitude']*U.m)))
src_altaz_current = NP.hstack((src_altaz.alt.deg.reshape(-1,1), src_altaz.az.deg.reshape(-1,1)))
hemisphere_current = src_altaz_current[:,0] >= 0.0
# hemisphere_src_altaz_current = src_altaz_current[hemisphere_current,:]
src_az_current = NP.copy(src_altaz_current[:,1])
src_az_current[src_az_current > 360.0 - 0.5*180.0/n_sky_sectors] -= 360.0
roi_ind = NP.logical_or(NP.logical_and(src_az_current >= -0.5*180.0/n_sky_sectors + k*180.0/n_sky_sectors, src_az_current < -0.5*180.0/n_sky_sectors + (k+1)*180.0/n_sky_sectors), NP.logical_and(src_az_current >= 180.0 - 0.5*180.0/n_sky_sectors + k*180.0/n_sky_sectors, src_az_current < 180.0 - 0.5*180.0/n_sky_sectors + (k+1)*180.0/n_sky_sectors))
roi_subset = NP.where(NP.logical_and(hemisphere_current, roi_ind))[0].tolist()
# src_dircos_current_subset = GEOM.altaz2dircos(src_altaz_current[roi_subset,:], units='degrees')
pbinfo = {}
if (telescope_id.lower() == 'mwa') or (phased_array) or (telescope_id.lower() == 'mwa_tools'):
if pointing_file is not None:
pbinfo['delays'] = delays[j,:]
else:
pbinfo['pointing_center'] = pointings_altaz[j,:]
pbinfo['pointing_coords'] = 'altaz'
if (telescope_id.lower() == 'mwa') or (phased_array):
# pbinfo['element_locs'] = element_locs
pbinfo['delayerr'] = phasedarray_delayerr
pbinfo['gainerr'] = phasedarray_gainerr
pbinfo['nrand'] = nrand
else:
pbinfo['pointing_center'] = pointings_altaz[j,:]
pbinfo['pointing_coords'] = 'altaz'
roiinfo = {}
roiinfo['ind'] = NP.asarray(m2_lol[j][roi_subset])
if use_external_beam:
theta_phi = NP.hstack((NP.pi/2-NP.radians(src_altaz_current[roi_subset,0]).reshape(-1,1), NP.radians(src_altaz_current[roi_subset,1]).reshape(-1,1)))
if beam_chromaticity:
interp_logbeam = OPS.healpix_interp_along_axis(NP.log10(external_beam), theta_phi=theta_phi, inloc_axis=external_beam_freqs, outloc_axis=chans*1e9, axis=1, kind=pbeam_spec_interp_method, assume_sorted=True)
else:
nearest_freq_ind = NP.argmin(NP.abs(external_beam_freqs - select_beam_freq))
interp_logbeam = OPS.healpix_interp_along_axis(NP.log10(NP.repeat(external_beam[:,nearest_freq_ind].reshape(-1,1), chans.size, axis=1)), theta_phi=theta_phi, inloc_axis=chans*1e9, outloc_axis=chans*1e9, axis=1, assume_sorted=True)
interp_logbeam_max = NP.nanmax(interp_logbeam, axis=0)
interp_logbeam_max[interp_logbeam_max <= 0.0] = 0.0
interp_logbeam_max = interp_logbeam_max.reshape(1,-1)
interp_logbeam = interp_logbeam - interp_logbeam_max
roiinfo['pbeam'] = 10**interp_logbeam
else:
roiinfo['pbeam'] = None
roiinfo['pbeam_chromaticity'] = beam_chromaticity
roiinfo['pbeam_reffreq'] = select_beam_freq
roiinfo['radius'] = roi_radius
# roiinfo_center_altaz = AltAz(alt=NP.asarray([90.0])*U.deg, az=NP.asarray([270.0])*U.deg, obstime=tobjs[j], location=EarthLocation(lon=telescope['longitude']*U.deg, lat=telescope['latitude']*U.deg, height=telescope['altitude']*U.m))
roiinfo_center_hadec = GEOM.altaz2hadec(NP.asarray([90.0, 270.0]).reshape(1,-1), latitude, units='degrees').ravel() # Seems to be a hard-coding of ROI center to zenith, but that's only to determine the sources in the upper hemisphere
roiinfo_center_radec = [lst[j]-roiinfo_center_hadec[0], roiinfo_center_hadec[1]]
# roiinfo_center_radec = ET.altaz2radec(NP.asarray([90.0, 270.0]).reshape(1,-1), EarthLocation(lon=telescope['longitude']*U.deg, lat=telescope['latitude']*U.deg, height=telescope['altitude']*U.m), obstime=tobjs[j], epoch_RA=tobjs[j])
roiinfo['center'] = NP.asarray(roiinfo_center_radec).reshape(1,-1)
roiinfo['center_coords'] = 'radec'
roi.append_settings(skymod, chans, pinfo=pbinfo, lst=lst[j], roi_info=roiinfo, telescope=telescope, freq_scale='GHz')
progress.update(j+1)
progress.finish()
roifile = rootdir+project_dir+simid+roi_dir+'roiinfo'
roi.save(roifile, tabtype='BinTableHDU', overwrite=True, verbose=True)
del roi # to save memory if primary beam arrays or n_acc are large
else:
roi = None
pbinfo = None
roifile = None
roifile = comm.bcast(roifile, root=0) # Broadcast saved RoI filename
pbinfo = comm.bcast(pbinfo, root=0) # Broadcast PB synthesis info
if rank == 0:
if plots:
for j in xrange(n_acc):
src_ra = roi.skymodel.location[roi.info['ind'][j],0]
src_dec = roi.skymodel.location[roi.info['ind'][j],1]
src_ra[src_ra > 180.0] = src_ra[src_ra > 180.0] - 360.0
fig, axs = PLT.subplots(2, sharex=True, sharey=True, figsize=(6,6))
modelsky = axs[0].scatter(src_ra, src_dec, c=roi.skymod.spec_parms['flux-scale'][roi.info['ind'][j]], norm=PLTC.LogNorm(vmin=roi.skymod.spec_parms['flux-scale'].min(), vmax=roi.skymod.spec_parms['flux-scale'].max()), edgecolor='none', s=20)
axs[0].set_xlim(180.0, -180.0)
axs[0].set_ylim(-90.0, 90.0)
pbsky = axs[1].scatter(src_ra, src_dec, c=roi.info['pbeam'][j][:,NP.argmax(NP.abs(chans-freq))], norm=PLTC.LogNorm(vmin=roi.info['pbeam'][j].min(), vmax=1.0), edgecolor='none', s=20)
axs[1].set_xlim(180.0, -180.0)
axs[1].set_ylim(-90.0, 90.0)
cbax0 = fig.add_axes([0.88, 0.5, 0.02, 0.35])
cbar0 = fig.colorbar(modelsky, cax=cbax0, orientation='vertical')
cbax0.set_ylabel('Flux Density [Jy]', labelpad=0, fontsize=14)
cbax1 = fig.add_axes([0.88, 0.1, 0.02, 0.35])
cbar1 = fig.colorbar(pbsky, cax=cbax1, orientation='vertical')
fig.subplots_adjust(hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_ylabel(r'$\delta$ [degrees]', fontsize=16, weight='medium', labelpad=30)
big_ax.set_xlabel(r'$\alpha$ [degrees]', fontsize=16, weight='medium', labelpad=20)
fig.subplots_adjust(right=0.88)
baseline_bin_indices_bounds = baseline_bin_indices + [nbl]
for i in range(cumm_bl_chunks[rank], cumm_bl_chunks[rank+1]):
print('Process {0:0d} working on baseline chunk # {1:0d} ... ({2:0d}/{3:0d})'.format(rank, bl_chunk[i], i-cumm_bl_chunks[rank]+1, n_bl_chunk_per_rank[rank]))
bls_chunk_indices = NP.arange(baseline_bin_indices_bounds[i], baseline_bin_indices_bounds[i+1])
bls_chunk = NP.asarray(bl[bls_chunk_indices,:]).reshape(-1,3)
nbl_chunk = bls_chunk.shape[0]
outfile = rootdir+project_dir+simid+sim_dir+'_part_{0:0d}'.format(i)
ia = RI.InterferometerArray(labels[bls_chunk_indices], bls_chunk, chans, telescope=telescope, eff_Q=eff_Q, latitude=latitude, longitude=longitude, altitude=altitude, A_eff=A_eff, layout=layout_info, freq_scale='GHz', pointing_coords='hadec', gaininfo=gaininfo, blgroupinfo={'groups': blgroups, 'reversemap': bl_reversemap})
# ia = RI.InterferometerArray(labels[baseline_bin_indices[bl_chunk[i]]:min(baseline_bin_indices[bl_chunk[i]]+baseline_chunk_size,total_baselines)], bl[baseline_bin_indices[bl_chunk[i]]:min(baseline_bin_indices[bl_chunk[i]]+baseline_chunk_size,total_baselines),:], chans, telescope=telescope, eff_Q=eff_Q, latitude=latitude, longitude=longitude, altitude=altitude, A_eff=A_eff, layout=layout_info, freq_scale='GHz', pointing_coords='hadec', gaininfo=gaininfo, blgroupinfo={'groups': blgroups, 'reversemap': bl_reversemap})
if store_prev_sky:
store_prev_skymodel_file=rootdir+project_dir+simid+roi_dir+'_{0:0d}.hdf5'.format(i)
else:
store_prev_skymodel_file = None
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(marker='-', left=' |', right='| '), PGB.Counter(), '/{0:0d} Snapshots '.format(n_acc), PGB.ETA()], maxval=n_acc).start()
for j in range(n_acc):
roi_ind_snap = fits.getdata(roifile+'.fits', extname='IND_{0:0d}'.format(j), memmap=False)
roi_pbeam_snap = fits.getdata(roifile+'.fits', extname='PB_{0:0d}'.format(j), memmap=False)
if obs_mode in ['custom', 'dns', 'lstbin']:
timestamp = obs_id[j]
else:
# timestamp = lst[j]
timestamp = timestamps[j]
ts = time.time()
if j == 0:
ts0 = ts
ia.observe(tobjs[j], Tsysinfo, bpass, pointings_hadec[j,:], skymod, t_acc[j], pb_info=pbinfo, brightness_units=flux_unit, bpcorrect=noise_bpcorr, roi_info={'ind': roi_ind_snap, 'pbeam': roi_pbeam_snap}, roi_radius=roi_radius, roi_center=None, gradient_mode=gradient_mode, memsave=memsave, vmemavail=pvmemavail, store_prev_skymodel_file=store_prev_skymodel_file)
te = time.time()
del roi_ind_snap
del roi_pbeam_snap
progress.update(j+1)
progress.finish()
te0 = time.time()
print('Process {0:0d} took {1:.1f} minutes to complete baseline chunk # {2:0d}'.format(rank, (te0-ts0)/60, bl_chunk[i]))
ia.t_obs = t_obs
# ia.generate_noise()
# ia.add_noise()
# ia.delay_transform(oversampling_factor-1.0, freq_wts=window*NP.abs(ant_bpass)**2)
ia.project_baselines(ref_point={'location': ia.pointing_center, 'coords': ia.pointing_coords})
ia.save(outfile, fmt=savefmt, verbose=True, tabtype='BinTableHDU', npz=False, overwrite=True, uvfits_parms=None)
if os.path.exists(store_prev_skymodel_file):
os.remove(store_prev_skymodel_file) # Remove the temporary skymodel file
pte_str = str(DT.datetime.now())
if rank == 0:
parmsfile = rootdir+project_dir+simid+meta_dir+'simparms.yaml'
with open(parmsfile, 'w') as pfile:
yaml.dump(parms, pfile, default_flow_style=False)
minfo = {'user': pwd.getpwuid(os.getuid())[0], 'git#': prisim.__githash__, 'PRISim': prisim.__version__}
metafile = rootdir+project_dir+simid+meta_dir+'meta.yaml'
with open(metafile, 'w') as mfile:
yaml.dump(minfo, mfile, default_flow_style=False)
process_complete = True
all_process_complete = comm.gather(process_complete, root=0)
if rank == 0:
for k in range(n_sky_sectors):
if n_sky_sectors == 1:
sky_sector_str = '_all_sky_'
else:
sky_sector_str = '_sky_sector_{0:0d}_'.format(k)
if mpi_on_bl:
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(marker='-', left=' |', right='| '), PGB.Counter(), '/{0:0d} Baseline chunks '.format(n_bl_chunks), PGB.ETA()], maxval=n_bl_chunks).start()
for i in range(0, n_bl_chunks):
bls_chunk_indices = NP.arange(baseline_bin_indices_bounds[i], baseline_bin_indices_bounds[i+1])
bls_chunk = NP.asarray(bl[bls_chunk_indices,:]).reshape(-1)
nbls_chunk = bls_chunk.shape[0]
blchunk_infile = rootdir+project_dir+simid+sim_dir+'_part_{0:0d}'.format(i)
if i == 0:
simvis = RI.InterferometerArray(None, None, None, init_file=blchunk_infile)
else:
simvis_next = RI.InterferometerArray(None, None, None, init_file=blchunk_infile)
simvis.concatenate(simvis_next, axis=0)
if cleanup >= 1:
if os.path.isfile(blchunk_infile+'.'+savefmt.lower()):
os.remove(blchunk_infile+'.'+savefmt.lower())
if os.path.isfile(blchunk_infile+'.gains.hdf5'):
os.remove(blchunk_infile+'.gains.hdf5')
progress.update(i+1)
progress.finish()
elif mpi_on_freq:
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(marker='-', left=' |', right='| '), PGB.Counter(), '/{0:0d} Frequency chunks '.format(n_freq_chunks), PGB.ETA()], maxval=n_freq_chunks).start()
frequency_bin_indices_bounds = frequency_bin_indices + [nchan]
for i in range(0, n_freq_chunks):
chans_chunk_indices = NP.arange(frequency_bin_indices_bounds[i], frequency_bin_indices_bounds[i+1])
chans_chunk = NP.asarray(chans[chans_chunk_indices]).reshape(-1)
nchan_chunk = chans_chunk.size
f0_chunk = NP.mean(chans_chunk)
bw_chunk_str = '{0:0d}x{1:.1f}_kHz'.format(nchan_chunk, freq_resolution/1e3)
freqchunk_infile = rootdir+project_dir+simid+sim_dir+'_part_{0:0d}'.format(i)
if i == 0:
simvis = RI.InterferometerArray(None, None, None, init_file=freqchunk_infile)
else:
simvis_next = RI.InterferometerArray(None, None, None, init_file=freqchunk_infile)
simvis.concatenate(simvis_next, axis=1)
if cleanup > 1:
if os.path.isfile(freqchunk_infile+'.'+savefmt.lower()):
os.remove(freqchunk_infile+'.'+savefmt.lower())
if os.path.isfile(freqchunk_infile+'.gains.hdf5'):
os.remove(freqchunk_infile+'.gains.hdf5')
progress.update(i+1)
progress.finish()
simvis.generate_noise()
simvis.add_noise()
simvis.simparms_file = parmsfile
ref_point = {'coords': pc_coords, 'location': NP.asarray(pc).reshape(1,-1)}
simvis.rotate_visibilities(ref_point, do_delay_transform=do_delay_transform, verbose=True)
if do_delay_transform:
simvis.delay_transform(oversampling_factor-1.0, freq_wts=window*NP.abs(ant_bpass)**2)
consolidated_outfile = rootdir+project_dir+simid+sim_dir+'simvis'
simvis.save(consolidated_outfile, fmt=savefmt, verbose=True, tabtype='BinTableHDU', npz=save_to_npz, overwrite=True, uvfits_parms=None)
pyuvdata_formats = []
if save_to_uvh5:
pyuvdata_formats += ['uvh5']
if save_to_uvfits:
pyuvdata_formats += ['uvfits']
if len(pyuvdata_formats) > 0:
simvis_orig = copy.deepcopy(simvis)
if save_redundant: # Duplicate the redundant visibilities
consolidated_outfile = rootdir+project_dir+simid+sim_dir+'all-simvis'
for pyuvdata_fmt in pyuvdata_formats:
simvis = copy.deepcopy(simvis_orig)
uvfits_parms = None
if pyuvdata_fmt == 'uvfits':
if save_formats['phase_center'] is None:
phase_center = simvis.pointing_center[0,:].reshape(1,-1)
phase_center_coords = simvis.pointing_coords
if phase_center_coords == 'dircos':
phase_center = GEOM.dircos2altaz(phase_center, units='degrees')
phase_center_coords = 'altaz'
if phase_center_coords == 'altaz':
phase_center = GEOM.altaz2hadec(phase_center, simvis.latitude, units='degrees')
phase_center_coords = 'hadec'
if phase_center_coords == 'hadec':
phase_center = NP.hstack((simvis.lst[0]-phase_center[0,0], phase_center[0,1]))
phase_center_coords = 'radec'
if phase_center_coords != 'radec':
raise ValueError('Invalid phase center coordinate system')
uvfits_ref_point = {'location': phase_center.reshape(1,-1), 'coords': 'radec'}
else:
uvfits_ref_point = {'location': NP.asarray(save_formats['phase_center']).reshape(1,-1), 'coords': 'radec'}
# Phase the visibilities to a phase reference point
simvis.rotate_visibilities(uvfits_ref_point)
uvfits_parms = {'ref_point': None, 'datapool': None, 'method': save_formats['uvfits_method']}
if save_redundant: # Duplicate the redundant visibilities
simvis.duplicate_measurements(blgroups=blgroups)
simvis.pyuvdata_write(consolidated_outfile, formats=[pyuvdata_fmt], uvfits_parms=uvfits_parms, overwrite=True)
if cleanup >= 3:
dir_to_be_removed = rootdir+project_dir+simid+skymod_dir
shutil.rmtree(dir_to_be_removed, ignore_errors=True)
if cleanup >= 2:
dir_to_be_removed = rootdir+project_dir+simid+roi_dir
shutil.rmtree(dir_to_be_removed, ignore_errors=True)
print('Process {0} has completed.'.format(rank))
if diagnosis_parms['wait_after_run']:
PDB.set_trace()
| 122,758 | 51.461111 | 537 | py |
dstqa | dstqa-master/multiwoz_format.py | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
// Licensed under the Amazon Software License http://aws.amazon.com/asl/
import sys
import os
import json
import pdb
import copy
import random
assert(len(sys.argv) == 4)
ontology_path = "ontology/domain_slot_list_sp.txt"
data_ratio = 100
if sys.argv[1] == "all":
domains_keep = set(["restaurant", "hotel", "train", "attraction", "taxi"])
else:
domains_keep = set([sys.argv[1]])
input_file_path = sys.argv[2]
output_file_path = sys.argv[3]
train_file_path = input_file_path + "/train_dials.json"
dev_file_path = input_file_path + "/dev_dials.json"
test_file_path = input_file_path + "/test_dials.json"
def read_ds():
with open(ontology_path) as fp:
ds = []
for line in fp:
if line[0] == "#": continue
line_arr = line.split("\t")
ds.append(line_arr[0] + "-" + line_arr[1])
return ds
ds = read_ds()
# the following function is from https://raw.githubusercontent.com/jasonwu0731/trade-dst/master/utils/fix_label.py
def fix_general_label_error(labels, type):
slots = [k.replace(" ","").lower() if ("book" not in k) else k.lower() for k in ds]
label_dict = dict([ (l[0], l[1]) for l in labels]) if type else dict([ (l["slots"][0][0], l["slots"][0][1]) for l in labels])
GENERAL_TYPO = {
# type
"guesthouse":"guest house", "guesthouses":"guest house", "guest":"guest house", "mutiple sports":"multiple sports",
"sports":"multiple sports", "mutliple sports":"multiple sports","swimmingpool":"swimming pool", "concerthall":"concert hall",
"concert":"concert hall", "pool":"swimming pool", "night club":"nightclub", "mus":"museum", "ol":"architecture",
"colleges":"college", "coll":"college", "architectural":"architecture", "musuem":"museum", "churches":"church",
# area
"center":"centre", "center of town":"centre", "near city center":"centre", "in the north":"north", "cen":"centre", "east side":"east",
"east area":"east", "west part of town":"west", "ce":"centre", "town center":"centre", "centre of cambridge":"centre",
"city center":"centre", "the south":"south", "scentre":"centre", "town centre":"centre", "in town":"centre", "north part of town":"north",
"centre of town":"centre", "cb30aq": "none",
# price
"mode":"moderate", "moderate -ly": "moderate", "mo":"moderate",
# day
"next friday":"friday", "monda": "monday", "thur": "thursday", "not given": "none",
# parking
"free parking":"free",
# internet
"free internet":"yes",
# star
"4 star":"4", "4 stars":"4", "0 star rarting":"none",
# others
"y":"yes", "any":"dontcare", "n":"no", "does not care":"dontcare", "not men":"none", "not":"none", "not mentioned":"none",
'':"none", "not mendtioned":"none", "3 .":"3", "does not":"no", "fun":"none", "art":"none", "no mentioned": "none",
}
for slot in slots:
if slot in label_dict.keys():
# general typos
if label_dict[slot] in GENERAL_TYPO.keys():
label_dict[slot] = label_dict[slot].replace(label_dict[slot], GENERAL_TYPO[label_dict[slot]])
# miss match slot and value
if slot == "hotel-type" and label_dict[slot] in ["nigh", "moderate -ly priced", "bed and breakfast", "centre", "venetian", "intern", "a cheap -er hotel"] or \
slot == "hotel-internet" and label_dict[slot] == "4" or \
slot == "hotel-pricerange" and label_dict[slot] == "2" or \
slot == "attraction-type" and label_dict[slot] in ["gastropub", "la raza", "galleria", "gallery", "science", "m"] or \
"area" in slot and label_dict[slot] in ["moderate"] or \
"day" in slot and label_dict[slot] == "t":
label_dict[slot] = "none"
elif slot == "hotel-type" and label_dict[slot] in ["hotel with free parking and free wifi", "4", "3 star hotel"]:
label_dict[slot] = "hotel"
elif slot == "hotel-star" and label_dict[slot] == "3 star hotel":
label_dict[slot] = "3"
elif "area" in slot:
if label_dict[slot] == "no": label_dict[slot] = "north"
elif label_dict[slot] == "we": label_dict[slot] = "west"
elif label_dict[slot] == "cent": label_dict[slot] = "centre"
elif "day" in slot:
if label_dict[slot] == "we": label_dict[slot] = "wednesday"
elif label_dict[slot] == "no": label_dict[slot] = "none"
elif "price" in slot and label_dict[slot] == "ch":
label_dict[slot] = "cheap"
elif "internet" in slot and label_dict[slot] == "free":
label_dict[slot] = "yes"
# some out-of-define classification slot values
if slot == "restaurant-area" and label_dict[slot] in ["stansted airport", "cambridge", "silver street"] or \
slot == "attraction-area" and label_dict[slot] in ["norwich", "ely", "museum", "same area as hotel"]:
label_dict[slot] = "none"
return label_dict
def bs_format(bs):
res = {"restaurant": {"semi": {}},
"hotel": {"semi": {}},
"train": {"semi": {}},
"attraction": {"semi": {}},
"taxi": {"semi": {}},
}
for ds, v in bs.items():
d = ds.split("-")[0]
s = ds.split("-")[1]
if v == "dontcare":
v = "dont care"
if v == "does not care":
v = "dont care"
if v == "corsican":
v = "corsica"
if v == "barbeque":
v = "barbecue"
if v == "center":
v = "centre"
if v == "east side":
v = "east"
if s == "pricerange":
s = "price range"
if s == "price range" and v == "mode":
v = "moderate"
if v == "not mentioned":
v = ""
if v == "thai and chinese": # only one such type, throw away
v = "chinese"
if s == "area" and v == "n":
v = "north"
if s == "price range" and v == "ch":
v = "cheap"
if v == "moderate -ly":
v = "moderate"
if s == "area" and v == "city center":
v = "centre"
if s == "food" and v == "sushi": # sushi only appear once in the training dataset. doesnt matter throw it away or not
v = "japanese"
if v == "oak bistro":
v = "the oak bistro"
if v == "golden curry":
v = "the golden curry"
if v == "meze bar restaurant":
v = "meze bar"
if v == "golden house golden house":
v = "golden house"
if v == "missing sock":
v = "the missing sock"
if v == "the yippee noodle bar":
v = "yippee noodle bar"
if v == "fitzbillies":
v = "fitzbillies restaurant"
if v == "slug and lettuce":
v = "the slug and lettuce"
if v == "copper kettle":
v = "the copper kettle"
if v == "city stop":
v = "city stop restaurant"
if v == "cambridge lodge":
v = "cambridge lodge restaurant"
if v == "ian hong house":
v = "lan hong house"
if v == "lan hong":
v = "lan hong house"
if v == "hotpot":
v = "the hotpot"
if v == "the dojo noodle bar":
v = "dojo noodle bar"
if v == "cambridge chop house":
v = "the cambridge chop house"
if v == "nirala":
v = "the nirala"
if v == "gardenia":
v = "the gardenia"
if v == "the americas":
v = "americas"
if v == "guest house":
v = "guesthouse"
if v == "margherita":
v = "la margherita"
if v == "gonville":
v = "gonville hotel"
if s == "parking" and v == "free":
v = "yes"
if d == "hotel" and s == "name":
if v == "acorn" or v == "acorn house":
v = "acorn guest house"
if v == "cambridge belfry":
v = "the cambridge belfry"
if v == "huntingdon hotel":
v = "huntingdon marriott hotel"
if v == "alexander":
v = "alexander bed and breakfast"
if v == "lensfield hotel":
v = "the lensfield hotel"
if v == "university arms":
v = "university arms hotel"
if v == "city roomz":
v = "cityroomz"
if v == "ashley":
v = "ashley hotel"
if d == "train":
if s == "destination" or s == "departure":
if v == "bishop stortford":
v = "bishops stortford"
if v == "bishops storford":
v = "bishops stortford"
if v == "birmingham":
v = "birmingham new street"
if v == "stansted":
v = "stansted airport"
if v == "leicaster":
v = "leicester"
if d == "attraction":
if v == "cambridge temporary art":
v = "contemporary art museum"
if v == "cafe jello":
v = "cafe jello gallery"
if v == "fitzwilliam" or v == "fitzwilliam museum":
v = "the fitzwilliam museum"
if v == "contemporary art museum":
v = "cambridge contemporary art"
if v == "man on the moon":
v = "the man on the moon"
if v == "christ college":
v = "christ s college"
if v == "old school":
v = "old schools"
if v == "cambridge punter":
v= "the cambridge punter"
if v == "queen s college":
v = "queens college"
if v == "all saint s church":
v = "all saints church"
if v == "fez club":
v = "the fez club"
if v == "parkside":
v = "parkside pools"
if v == "saint john s college .":
v = "saint john s college"
if v == "the mumford theatre":
v = "mumford theatre"
if v == "corn cambridge exchange":
v = "the cambridge corn exchange"
if d == "taxi":
if v == "london kings cross train station":
v = "london kings cross"
if v == "stevenage train station":
v = "stevenage"
if v == "junction theatre":
v = "the junction"
if v == "bishops stortford train station":
v = "bishops stortford"
if v == "cambridge train station":
v = "cambridge"
if v == "citiroomz":
v = "cityroomz"
if v == "london liverpool street train station":
v = "london liverpool street"
if v == "norwich train station":
v = "norwich"
if v == "kings college":
v = "king s college"
if v == "the ghandi" or v == "ghandi":
v = "the gandhi"
if v == "ely train station":
v = "ely"
if v == "stevenage train station":
v = "stevenage"
if v == "peterborough train station":
v = "peterborough"
if v == "london kings cross train station":
v = "london kings cross"
if v == "kings lynn train station":
v = "kings lynn"
if v == "stansted airport train station":
v = "stansted airport"
if v == "acorn house":
v = "acorn guest house"
if v == "queen s college":
v = "queens college"
if v == "leicester train station":
v = "leicester"
if v == "the gallery at 12":
v = "gallery at 12 a high street"
if v == "caffee uno":
v = "caffe uno"
if v == "stevenage train station":
v = "stevenage"
if v == "finches":
v = "finches bed and breakfast"
if v == "broxbourne train station":
v = "broxbourne"
if v == "country folk museum":
v = "cambridge and county folk museum"
if v == "ian hong":
v = "lan hong house"
if v == "the byard art museum":
v = "byard art"
if v == "cambridge belfry":
v = "the cambridge belfry"
if v == "birmingham new street train station":
v = "birmingham new street"
if v == "man on the moon concert hall":
v = "the man on the moon"
if v == "st . john s college":
v = "saint john s college"
if v == "st johns chop house":
v = "saint johns chop house"
if v == "fitzwilliam museum":
v = "the fitzwilliam museum"
if v == "cherry hinton village centre":
v = "the cherry hinton village centre"
if v == "maharajah tandoori restaurant4":
v = "maharajah tandoori restaurant"
if v == "the soul tree":
v = "soul tree nightclub"
if v == "cherry hinton village center":
v = "the cherry hinton village centre"
if v == "aylesbray lodge":
v = "aylesbray lodge guest house"
if v == "the alexander bed and breakfast":
v = "alexander bed and breakfast"
if v == "shiraz .":
v = "shiraz restaurant"
if v == "tranh binh":
v = "thanh binh"
if v == "riverboat georginawd":
v = "riverboat georgina"
if v == "lovell ldoge":
v = "lovell lodge"
if v == "alyesbray lodge hotel":
v = "aylesbray lodge guest house"
if v == "wandlebury county park":
v = "wandlebury country park"
if v == "the galleria":
v = "galleria"
if v == "cambridge artw2orks":
v = "cambridge artworks"
if d not in domains_keep:
continue
res[d]["semi"][s] = v
return res
def utt_format(utt):
utt = utt.replace("barbeque", "barbecue")
utt = utt.replace("center", "centre")
return utt
def process(file_path, is_training=False):
dialog_json = []
with open(file_path) as fp:
data_json = json.load(fp)
if is_training and data_ratio != 100:
random.Random(10).shuffle(data_json)
data_json = data_json[:int(len(data_json)*0.01*data_ratio)]
for dialog in data_json:
is_filter = True
for domain in dialog["domains"]:
if domain in domains_keep:
is_filter = False
break
if is_filter: continue
cur_dialog = {}
cur_dialog["dialogue_idx"] = dialog["dialogue_idx"]
cur_dialog["dialogue"] = []
for i, turn_info in enumerate(dialog["dialogue"]):
cur_turn = {}
cur_turn["transcript"] = utt_format(turn_info["transcript"])
cur_turn["system_transcript"] = utt_format(turn_info["system_transcript"])
cur_turn["belief_state"] = fix_general_label_error(turn_info["belief_state"], False)
cur_turn["belief_state"] = bs_format(cur_turn["belief_state"])
cur_dialog["dialogue"].append(cur_turn)
dialog_json.append(cur_dialog)
return dialog_json
# train
train_dialogs = process(train_file_path, True)
ofp = open(os.path.join(output_file_path,"./train.json"), "w")
ofp.write(json.dumps(train_dialogs, indent=2))
# dev
dev_dialogs = process(dev_file_path)
ofp = open(os.path.join(output_file_path, "./dev.json"), "w")
ofp.write(json.dumps(dev_dialogs, indent=2))
# test
test_dialogs = process(test_file_path)
ofp = open(os.path.join(output_file_path, "./test.json"), "w")
ofp.write(json.dumps(test_dialogs, indent=2))
# prediction. same as test, but one instance per line
ofp = open(os.path.join(output_file_path, "./prediction.json"), "w")
for dialog in test_dialogs:
ofp.write(json.dumps(dialog))
ofp.write("\n")
| 15,023 | 35.914005 | 171 | py |
dstqa | dstqa-master/multiwoz_2.1_format.py | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
// Licensed under the Amazon Software License http://aws.amazon.com/asl/
import sys
import os
import json
import pdb
import copy
import random
assert(len(sys.argv) == 4)
ontology_path = "ontology/domain_slot_list_sp.txt"
data_ratio = 100
if sys.argv[1] == "all":
domains_keep = set(["restaurant", "hotel", "train", "attraction", "taxi"])
else:
domains_keep = set([sys.argv[1]])
input_file_path = sys.argv[2]
output_file_path = sys.argv[3]
train_file_path = input_file_path + "/train_dials.json"
dev_file_path = input_file_path + "/dev_dials.json"
test_file_path = input_file_path + "/test_dials.json"
def read_ds():
with open(ontology_path) as fp:
ds = []
for line in fp:
if line[0] == "#": continue
line_arr = line.split("\t")
ds.append(line_arr[0] + "-" + line_arr[1])
return ds
ds = read_ds()
# the following function is from https://raw.githubusercontent.com/jasonwu0731/trade-dst/master/utils/fix_label.py
def fix_general_label_error(labels, type):
slots = [k.replace(" ","").lower() if ("book" not in k) else k.lower() for k in ds]
label_dict = dict([ (l[0], l[1]) for l in labels]) if type else dict([ (l["slots"][0][0], l["slots"][0][1]) for l in labels])
GENERAL_TYPO = {
# type
"guesthouse":"guest house", "guesthouses":"guest house", "guest":"guest house", "mutiple sports":"multiple sports",
"sports":"multiple sports", "mutliple sports":"multiple sports","swimmingpool":"swimming pool", "concerthall":"concert hall",
"concert":"concert hall", "pool":"swimming pool", "night club":"nightclub", "mus":"museum", "ol":"architecture",
"colleges":"college", "coll":"college", "architectural":"architecture", "musuem":"museum", "churches":"church",
# area
"center":"centre", "center of town":"centre", "near city center":"centre", "in the north":"north", "cen":"centre", "east side":"east",
"east area":"east", "west part of town":"west", "ce":"centre", "town center":"centre", "centre of cambridge":"centre",
"city center":"centre", "the south":"south", "scentre":"centre", "town centre":"centre", "in town":"centre", "north part of town":"north",
"centre of town":"centre", "cb30aq": "none",
# price
"mode":"moderate", "moderate -ly": "moderate", "mo":"moderate",
# day
"next friday":"friday", "monda": "monday", "thur": "thursday", "not given": "none",
# parking
"free parking":"free",
# internet
"free internet":"yes",
# star
"4 star":"4", "4 stars":"4", "0 star rarting":"none",
# others
"y":"yes", "any":"dontcare", "n":"no", "does not care":"dontcare", "not men":"none", "not":"none", "not mentioned":"none",
'':"none", "not mendtioned":"none", "3 .":"3", "does not":"no", "fun":"none", "art":"none", "no mentioned": "none",
}
for slot in slots:
if slot in label_dict.keys():
# general typos
if label_dict[slot] in GENERAL_TYPO.keys():
label_dict[slot] = label_dict[slot].replace(label_dict[slot], GENERAL_TYPO[label_dict[slot]])
# miss match slot and value
if slot == "hotel-type" and label_dict[slot] in ["nigh", "moderate -ly priced", "bed and breakfast", "centre", "venetian", "intern", "a cheap -er hotel"] or \
slot == "hotel-internet" and label_dict[slot] == "4" or \
slot == "hotel-pricerange" and label_dict[slot] == "2" or \
slot == "attraction-type" and label_dict[slot] in ["gastropub", "la raza", "galleria", "gallery", "science", "m"] or \
"area" in slot and label_dict[slot] in ["moderate"] or \
"day" in slot and label_dict[slot] == "t":
label_dict[slot] = "none"
elif slot == "hotel-type" and label_dict[slot] in ["hotel with free parking and free wifi", "4", "3 star hotel"]:
label_dict[slot] = "hotel"
elif slot == "hotel-star" and label_dict[slot] == "3 star hotel":
label_dict[slot] = "3"
elif "area" in slot:
if label_dict[slot] == "no": label_dict[slot] = "north"
elif label_dict[slot] == "we": label_dict[slot] = "west"
elif label_dict[slot] == "cent": label_dict[slot] = "centre"
elif "day" in slot:
if label_dict[slot] == "we": label_dict[slot] = "wednesday"
elif label_dict[slot] == "no": label_dict[slot] = "none"
elif "price" in slot and label_dict[slot] == "ch":
label_dict[slot] = "cheap"
elif "internet" in slot and label_dict[slot] == "free":
label_dict[slot] = "yes"
# some out-of-define classification slot values
if slot == "restaurant-area" and label_dict[slot] in ["stansted airport", "cambridge", "silver street"] or \
slot == "attraction-area" and label_dict[slot] in ["norwich", "ely", "museum", "same area as hotel"]:
label_dict[slot] = "none"
return label_dict
def bs_format(bs):
res = {"restaurant": {"semi": {}},
"hotel": {"semi": {}},
"train": {"semi": {}},
"attraction": {"semi": {}},
"taxi": {"semi": {}},
}
for ds, v in bs.items():
d = ds.split("-")[0]
s = ds.split("-")[1]
if v == "cambridge contemporary art museum":
v = "cambridge contemporary art"
if v == "cafe jello museum":
v = "cafe jello gallery"
if v == "whippple museum":
v = "whipple museum of the history of science"
if v == "st christs college":
v = "christ s college"
if v == "abc theatre":
v = "adc theatre"
if d == "train" and v == "london":
v = "london kings cross"
if v == "the castle galleries":
v = "castle galleries"
if v == "cafe jello":
v = "cafe jello gallery"
if v == "cafe uno":
v = "caffe uno"
if v == "el shaddia guesthouse":
v = "el shaddai"
if v == "kings college":
v = "king s college"
if v == "saint johns college":
v = "saint john s college"
if v == "kettles yard":
v = "kettle s yard"
if v == "grafton hotel":
v = "grafton hotel restaurant"
if v == "churchills college":
v = "churchill college"
if v == "the churchill college":
v = "churchill college"
if v == "portugese":
v = "portuguese"
if v == "lensfield hotel":
v = "the lensfield hotel"
if v == "rosas bed and breakfast":
v = "rosa s bed and breakfast"
if v == "pizza hut fenditton":
v = "pizza hut fen ditton"
if v == "great saint marys church":
v = "great saint mary s church"
if v == "alimentum":
v = "restaurant alimentum"
if v == "cow pizza kitchen and bar":
v = "the cow pizza kitchen and bar"
if v == "shiraz":
v = "shiraz restaurant"
if v == "cherry hinton village centre":
v = "the cherry hinton village centre"
if v == "christ college":
v = "christ s college"
if v == "peoples portraits exhibition at girton college":
v = "people s portraits exhibition at girton college"
if v == "saint catharines college":
v = "saint catharine s college"
if v == "the maharajah tandoor":
v = "maharajah tandoori restaurant"
if v == "efes":
v = "efes restaurant"
if v == "the gonvile hotel":
v = "gonville hotel"
if v == "abbey pool":
v = "abbey pool and astroturf pitch"
if v == "the cambridge arts theatre":
v = "cambridge arts theatre"
if v == "sheeps green and lammas land park fen causeway":
v = "sheep s green and lammas land park fen causeway"
if v == "lensfield hotel":
v = "the lensfield hotel"
if v == "rosas bed and breakfast":
v = "rosa s bed and breakfast"
if v == "little saint marys church":
v = "little saint mary s church"
if v == "cambridge punter":
v = "the cambridge punter"
if v == "pizza hut":
v = "pizza hut city centre"
if v == "good luck":
v = "the good luck chinese food takeaway"
if v == "lucky star":
v = "the lucky star"
if v == "cambridge contemporary art museum":
v = "cambridge contemporary art"
if v == "cow pizza kitchen and bar":
v = "the cow pizza kitchen and bar"
if v == "river bar steakhouse and grill":
v = "the river bar steakhouse and grill"
if v == "chiquito":
v = "chiquito restaurant bar"
if v == "king hedges learner pool":
v = "kings hedges learner pool"
if v == "dontcare":
v = "dont care"
if v == "does not care":
v = "dont care"
if v == "corsican":
v = "corsica"
if v == "barbeque":
v = "barbecue"
if v == "center":
v = "centre"
if v == "east side":
v = "east"
if s == "pricerange":
s = "price range"
if s == "price range" and v == "mode":
v = "moderate"
if v == "not mentioned":
v = ""
if v == "thai and chinese": # only one such type, throw away
v = "chinese"
if s == "area" and v == "n":
v = "north"
if s == "price range" and v == "ch":
v = "cheap"
if v == "moderate -ly":
v = "moderate"
if s == "area" and v == "city center":
v = "centre"
if s == "food" and v == "sushi": # sushi only appear once in the training dataset. doesnt matter throw it away or not
v = "japanese"
if v == "oak bistro":
v = "the oak bistro"
if v == "golden curry":
v = "the golden curry"
if v == "meze bar restaurant":
v = "meze bar"
if v == "golden house golden house":
v = "golden house"
if v == "missing sock":
v = "the missing sock"
if v == "the yippee noodle bar":
v = "yippee noodle bar"
if v == "fitzbillies":
v = "fitzbillies restaurant"
if v == "slug and lettuce":
v = "the slug and lettuce"
if v == "copper kettle":
v = "the copper kettle"
if v == "city stop":
v = "city stop restaurant"
if v == "cambridge lodge":
v = "cambridge lodge restaurant"
if v == "ian hong house":
v = "lan hong house"
if v == "lan hong":
v = "lan hong house"
if v == "hotpot":
v = "the hotpot"
if v == "the dojo noodle bar":
v = "dojo noodle bar"
if v == "cambridge chop house":
v = "the cambridge chop house"
if v == "nirala":
v = "the nirala"
if v == "gardenia":
v = "the gardenia"
if v == "the americas":
v = "americas"
if v == "guest house":
v = "guesthouse"
if v == "margherita":
v = "la margherita"
if v == "gonville":
v = "gonville hotel"
if s == "parking" and v == "free":
v = "yes"
if d == "hotel" and s == "name":
if v == "acorn" or v == "acorn house":
v = "acorn guest house"
if v == "cambridge belfry":
v = "the cambridge belfry"
if v == "huntingdon hotel":
v = "huntingdon marriott hotel"
if v == "alexander":
v = "alexander bed and breakfast"
if v == "lensfield hotel":
v = "the lensfield hotel"
if v == "university arms":
v = "university arms hotel"
if v == "city roomz":
v = "cityroomz"
if v == "ashley":
v = "ashley hotel"
if d == "train":
if s == "destination" or s == "departure":
if v == "bishop stortford":
v = "bishops stortford"
if v == "bishops storford":
v = "bishops stortford"
if v == "birmingham":
v = "birmingham new street"
if v == "stansted":
v = "stansted airport"
if v == "leicaster":
v = "leicester"
if d == "attraction":
if v == "cambridge temporary art":
v = "contemporary art museum"
if v == "cafe jello":
v = "cafe jello gallery"
if v == "fitzwilliam" or v == "fitzwilliam museum":
v = "the fitzwilliam museum"
if v == "contemporary art museum":
v = "cambridge contemporary art"
if v == "man on the moon":
v = "the man on the moon"
if v == "christ college":
v = "christ s college"
if v == "old school":
v = "old schools"
if v == "cambridge punter":
v= "the cambridge punter"
if v == "queen s college":
v = "queens college"
if v == "all saint s church":
v = "all saints church"
if v == "fez club":
v = "the fez club"
if v == "parkside":
v = "parkside pools"
if v == "saint john s college .":
v = "saint john s college"
if v == "the mumford theatre":
v = "mumford theatre"
if v == "corn cambridge exchange":
v = "the cambridge corn exchange"
if d == "taxi":
if v == "london kings cross train station":
v = "london kings cross"
if v == "stevenage train station":
v = "stevenage"
if v == "junction theatre":
v = "the junction"
if v == "bishops stortford train station":
v = "bishops stortford"
if v == "cambridge train station":
v = "cambridge"
if v == "citiroomz":
v = "cityroomz"
if v == "london liverpool street train station":
v = "london liverpool street"
if v == "norwich train station":
v = "norwich"
if v == "kings college":
v = "king s college"
if v == "the ghandi" or v == "ghandi":
v = "the gandhi"
if v == "ely train station":
v = "ely"
if v == "stevenage train station":
v = "stevenage"
if v == "peterborough train station":
v = "peterborough"
if v == "london kings cross train station":
v = "london kings cross"
if v == "kings lynn train station":
v = "kings lynn"
if v == "stansted airport train station":
v = "stansted airport"
if v == "acorn house":
v = "acorn guest house"
if v == "queen s college":
v = "queens college"
if v == "leicester train station":
v = "leicester"
if v == "the gallery at 12":
v = "gallery at 12 a high street"
if v == "caffee uno":
v = "caffe uno"
if v == "stevenage train station":
v = "stevenage"
if v == "finches":
v = "finches bed and breakfast"
if v == "broxbourne train station":
v = "broxbourne"
if v == "country folk museum":
v = "cambridge and county folk museum"
if v == "ian hong":
v = "lan hong house"
if v == "the byard art museum":
v = "byard art"
if v == "cambridge belfry":
v = "the cambridge belfry"
if v == "birmingham new street train station":
v = "birmingham new street"
if v == "man on the moon concert hall":
v = "the man on the moon"
if v == "st . john s college":
v = "saint john s college"
if v == "st johns chop house":
v = "saint johns chop house"
if v == "fitzwilliam museum":
v = "the fitzwilliam museum"
if v == "cherry hinton village centre":
v = "the cherry hinton village centre"
if v == "maharajah tandoori restaurant4":
v = "maharajah tandoori restaurant"
if v == "the soul tree":
v = "soul tree nightclub"
if v == "cherry hinton village center":
v = "the cherry hinton village centre"
if v == "aylesbray lodge":
v = "aylesbray lodge guest house"
if v == "the alexander bed and breakfast":
v = "alexander bed and breakfast"
if v == "shiraz .":
v = "shiraz restaurant"
if v == "tranh binh":
v = "thanh binh"
if v == "riverboat georginawd":
v = "riverboat georgina"
if v == "lovell ldoge":
v = "lovell lodge"
if v == "alyesbray lodge hotel":
v = "aylesbray lodge guest house"
if v == "wandlebury county park":
v = "wandlebury country park"
if v == "the galleria":
v = "galleria"
if v == "cambridge artw2orks":
v = "cambridge artworks"
if d not in domains_keep:
continue
res[d]["semi"][s] = v
return res
def utt_format(utt):
utt = utt.replace("barbeque", "barbecue")
utt = utt.replace("center", "centre")
return utt
def process(file_path, is_training=False):
dialog_json = []
with open(file_path) as fp:
data_json = json.load(fp)
if is_training and data_ratio != 100:
random.Random(10).shuffle(data_json)
data_json = data_json[:int(len(data_json)*0.01*data_ratio)]
for dialog in data_json:
is_filter = True
for domain in dialog["domains"]:
if domain in domains_keep:
is_filter = False
break
if is_filter: continue
cur_dialog = {}
cur_dialog["dialogue_idx"] = dialog["dialogue_idx"]
cur_dialog["dialogue"] = []
for i, turn_info in enumerate(dialog["dialogue"]):
cur_turn = {}
cur_turn["transcript"] = utt_format(turn_info["transcript"])
cur_turn["system_transcript"] = utt_format(turn_info["system_transcript"])
cur_turn["belief_state"] = fix_general_label_error(turn_info["belief_state"], False)
cur_turn["belief_state"] = bs_format(cur_turn["belief_state"])
cur_dialog["dialogue"].append(cur_turn)
dialog_json.append(cur_dialog)
return dialog_json
# train
train_dialogs = process(train_file_path, True)
ofp = open(os.path.join(output_file_path,"./train.json"), "w")
ofp.write(json.dumps(train_dialogs, indent=2))
# dev
dev_dialogs = process(dev_file_path)
ofp = open(os.path.join(output_file_path, "./dev.json"), "w")
ofp.write(json.dumps(dev_dialogs, indent=2))
# test
test_dialogs = process(test_file_path)
ofp = open(os.path.join(output_file_path, "./test.json"), "w")
ofp.write(json.dumps(test_dialogs, indent=2))
# prediction. same as test, but one instance per line
ofp = open(os.path.join(output_file_path, "./prediction.json"), "w")
for dialog in test_dialogs:
ofp.write(json.dumps(dialog))
ofp.write("\n")
| 18,246 | 35.567134 | 171 | py |
dstqa | dstqa-master/dstqa/dstqa.py | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
// Licensed under the Amazon Software License http://aws.amazon.com/asl/
import pdb
import math
import logging
import os.path
import pickle
import random
from typing import Any, Dict, List
from overrides import overrides
import numpy as np
import torch
import torch.nn.functional as F
from torch.nn.functional import nll_loss
from torch.nn import BCEWithLogitsLoss
from torch.nn import CrossEntropyLoss
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WordTokenizer
from allennlp.data.fields import Field, TextField, ArrayField
from allennlp.common.checks import check_dimensions_match
from allennlp.data import Token, Vocabulary, Instance
from allennlp.data.dataset import Batch
from allennlp.models.model import Model
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TokenEmbedder, TextFieldEmbedder, FeedForward, ScalarMix
from allennlp.modules.input_variational_dropout import InputVariationalDropout
from allennlp.modules.matrix_attention.linear_matrix_attention import LinearMatrixAttention
from allennlp.modules.seq2seq_encoders.pytorch_seq2seq_wrapper import PytorchSeq2SeqWrapper
from allennlp.modules.layer_norm import LayerNorm
from allennlp.nn import Activation
from allennlp.nn import InitializerApplicator, util
from allennlp.nn.util import logsumexp
from allennlp.tools import squad_eval
from allennlp.training.metrics import Average, BooleanAccuracy, CategoricalAccuracy
from allennlp.modules.elmo import batch_to_ids as elmo_batch_to_ids
from allennlp.modules.elmo import Elmo
from .accuracy import Accuracy
from . import dstqa_util
logger = logging.getLogger(__name__)
@Model.register("dstqa")
class DSTQA(Model):
def __init__(self, vocab: Vocabulary,
base_dim,
loss_scale_by_num_values,
use_pre_calc_elmo_embeddings,
elmo_embedding_path,
domain_slot_list_path,
word_embeddings,
token_indexers: Dict[str, TokenIndexer],
text_field_embedder: TextFieldEmbedder,
text_field_char_embedder: TextFieldEmbedder,
symbol_embedder: TextFieldEmbedder,
phrase_layer: Seq2SeqEncoder,
class_prediction_layer: FeedForward,
span_prediction_layer: FeedForward,
span_start_encoder: FeedForward,
span_end_encoder: FeedForward,
span_label_predictor: FeedForward,
initializer: InitializerApplicator,
use_graph,
bi_dropout: float = 0.2,
dropout: float = 0.2) -> None:
super().__init__(vocab)
self._is_in_training_mode = False
self._loss_scale_by_num_values = loss_scale_by_num_values
self._use_pre_calc_elmo_embeddings = use_pre_calc_elmo_embeddings
self._word_embeddings = word_embeddings
self._is_use_elmo = True if self._word_embeddings == "elmo" else False
self._is_use_graph = use_graph
if self._is_use_elmo and use_pre_calc_elmo_embeddings:
self._dialog_elmo_embeddings = self.load_elmo_embeddings(elmo_embedding_path)
self._dialog_scalar_mix = ScalarMix(mixture_size = 3, trainable=True)
self._domains, self._ds_id2text, self._ds_text2id, self.value_file_path, \
self._ds_type, self._ds_use_value_list, num_ds_use_value, self._ds_masked \
= self.read_domain_slot_list(domain_slot_list_path)
self._value_id2text, self._value_text2id = self.load_value_list(domain_slot_list_path)
self._span_id2text, self._class_id2text = dstqa_util.gen_id2text(self._ds_id2text, self._ds_type)
self._token_indexers = token_indexers
self._text_field_embedder = text_field_embedder
self._text_field_char_embedder = text_field_char_embedder
self._symbol_embedder = symbol_embedder
self._ds_dialog_attention = LinearMatrixAttention(base_dim, base_dim, 'x,y,x*y')
self._dialog_dsv_attention = LinearMatrixAttention(base_dim, base_dim, 'x,y,x*y')
self._dsv_dialog_attention = LinearMatrixAttention(base_dim, base_dim, 'x,y,x*y')
self._ds_attention = LinearMatrixAttention(base_dim, base_dim, 'x,y,x*y')
self._dsv_attention = LinearMatrixAttention(base_dim, base_dim, 'x,y,x*y')
self._agg_value = torch.nn.Linear(base_dim, base_dim)
self._agg_nodes = torch.nn.Linear(base_dim, base_dim)
self._graph_gamma = torch.nn.Linear(base_dim, 1)
self._class_prediction_layer = class_prediction_layer
self._span_prediction_layer = span_prediction_layer
self._span_label_predictor = span_label_predictor
self._span_start_encoder = span_start_encoder
self._span_end_encoder = span_end_encoder
self._phrase_layer = phrase_layer
self._cross_entropy = CrossEntropyLoss(ignore_index=-1)
self._accuracy = Accuracy(self._ds_id2text, self._ds_type)
self._dropout = torch.nn.Dropout(dropout)
self._bi_dropout = torch.nn.Dropout(bi_dropout)
self._dropout2 = torch.nn.Dropout(0.1)
self._sigmoid = torch.nn.Sigmoid()
initializer(self)
def load_elmo_embeddings(self, elmo_embedding_path):
elmo_embeddings = {}
for suffix in ["train", "dev", "test"]:
with open(elmo_embedding_path + suffix, "rb") as fp:
elmo_embeddings.update(pickle.load(fp))
return elmo_embeddings
def gen_utt_masks(self, turn_offset, batch_size, max_turn_count, max_dialog_len):
masks = torch.arange(0, max_dialog_len).unsqueeze(0).unsqueeze(0).cuda()
masks = masks.repeat(batch_size, max_turn_count, 1)
repeated_turn_offset = turn_offset.unsqueeze(2).repeat(1, 1, max_dialog_len)
masks = masks < repeated_turn_offset
# two types of masks: (1) all previous and current utt are marked as 1, (2) only current utt are marked as 1
bmasks = masks.clone().detach()
bmasks = (~bmasks)[:, :-1, :]
cmasks = masks.clone().detach()
cmasks[:, 1:, :] = cmasks[:, 1:, :] & bmasks
return masks, cmasks
def mix_dialog_embeddings(self, dialog_indices):
dialog_embeddings = []
max_dialog_len = 0
for idx in dialog_indices:
elmo_embeddings_cuda = []
for v in self._dialog_elmo_embeddings[idx]:
elmo_embeddings_cuda.append(v.cuda())
dialog_embeddings.append(self._dialog_scalar_mix(elmo_embeddings_cuda))
if max_dialog_len < dialog_embeddings[-1].size(0):
max_dialog_len = dialog_embeddings[-1].size(0)
for i, e in enumerate(dialog_embeddings):
pad = torch.zeros(max_dialog_len - e.size(0), e.size(1)). cuda()
dialog_embeddings[i] = torch.cat((e, pad), dim=0)
dialog_embeddings = torch.stack(dialog_embeddings, dim=0)
return dialog_embeddings
def mask_time_step(self, dialogs, dialog_masks):
batch_size, max_dialog_len, max_char_len = dialogs['token_characters'].size()
masks = self._dropout2(torch.ones(batch_size, max_dialog_len))
masks = masks < 0.5
char_masked = torch.tensor([259, 260] + [0] * (max_char_len - 2)).cuda()
char_padded = torch.tensor([0] * max_char_len).cuda()
dialogs["token_characters"][masks] = char_masked
dialogs["token_characters"][dialog_masks == 0] = char_padded
if "tokens" in dialogs:
dialogs["tokens"][masks] = 1 # 1 is the index for unknown
dialogs["tokens"][dialog_masks == 0] = 0
if "elmo" in dialogs:
elmo_masked = torch.tensor([259, 260] + [261] * (50 - 2)).cuda()
elmo_padded = torch.tensor([0] * 50).cuda()
dialogs["elmo"][masks] = elmo_masked
dialogs["elmo"][dialog_masks == 0] = elmo_padded
def forward(self, dialogs, tags, utt_lens, exact_match, dialog_indices, epoch_num = None,
labels=None, spans_start=None, spans_end=None, metadata=None, span_labels=None):
self._is_in_training_mode = self.training
# dialog embeddings
batch_size, max_dialog_len, _ = dialogs['token_characters'].size()
dialog_masks = util.get_text_field_mask(dialogs, num_wrapping_dims=0)
self.mask_time_step(dialogs, dialog_masks)
char_embedder_input = {'token_characters':dialogs['token_characters']}
dialog_char_embeddings = self._text_field_char_embedder(char_embedder_input, num_wrapping_dims=0)
if self._is_use_elmo:
if self._use_pre_calc_elmo_embeddings == False:
elmo_embedder_input = {'elmo':dialogs['elmo']}
dialog_elmo_embeddings = self._text_field_embedder(elmo_embedder_input, num_wrapping_dims=0)
dialog_embeddings = torch.cat((dialog_elmo_embeddings, dialog_char_embeddings), dim = 2)
else:
dialog_elmo_embeddings = self.mix_dialog_embeddings(dialog_indices)
dialog_embeddings = torch.cat((dialog_elmo_embeddings, dialog_char_embeddings), dim=2)
else:
embedder_input = {'tokens':dialogs['tokens']}
dialog_elmo_embeddings = self._text_field_embedder(embedder_input, num_wrapping_dims=0)
dialog_embeddings = torch.cat((dialog_elmo_embeddings, dialog_char_embeddings), dim = 2)
tag_embeddings = self._symbol_embedder(tags, num_wrapping_dims=0)
turn_offset = torch.cumsum(utt_lens, dim=1)
max_turn_count = utt_lens.size(1)
context_masks, utt_masks = self.gen_utt_masks(turn_offset, batch_size, max_turn_count, max_dialog_len)
# dsv embeddings
ds_embeddings, v_embeddings = self.get_dsv_embeddings()
# phrase layer
merged_dialog_embeddings = torch.cat((dialog_embeddings, tag_embeddings, exact_match), dim=2)
total_loss = 0.0
predictions = []
if self._is_in_training_mode == True: # # only train one domain per turn for GPU memory limits
sampled_turn = random.choice(list(range(max_turn_count)))
for turn_i in range(max_turn_count):
predictions.append(({}, {}))
if self._is_in_training_mode == True and self._is_use_graph == False:
if turn_i != sampled_turn:
continue
if self._is_in_training_mode == True:
if turn_i < sampled_turn:
self.set_module_to_eval()
if turn_i > sampled_turn: break
# compute new domain slot embeddings
attention_ds_embeddings = None
if turn_i > 0 and self._is_use_graph:
attention_ds_embeddings = self.ds_graph_embeddings(batch_size, predictions[turn_i - 1], ds_embeddings, v_embeddings)
repeated_ds_embeddings = ds_embeddings.unsqueeze(0).repeat(batch_size, 1, 1)
reduced_dialog_masks = self._phrase_layer(self._dropout(merged_dialog_embeddings), context_masks[:, turn_i, :])
span_ds_i = 0
for ds_i, ds_name in enumerate(self._ds_id2text):
cur_repeated_ds_embeddings = repeated_ds_embeddings[:, ds_i, :].unsqueeze(1)
cur_context_masks = context_masks[:, turn_i, :]
if self._ds_type[ds_name] == "classification":
cur_labels = labels[:, turn_i, ds_i]
cur_v_embeddings = v_embeddings[ds_name]
loss, prediction = self.forward_classification(ds_name, reduced_dialog_masks, cur_repeated_ds_embeddings, cur_v_embeddings, cur_context_masks, cur_labels, attention_ds_embeddings)
predictions[turn_i][0][ds_name] = prediction
if self._loss_scale_by_num_values:
loss = loss * max(1.0, math.log(cur_v_embeddings.size(0)))
elif self._ds_type[ds_name] == "span":
cur_span_labels = span_labels[:, turn_i, span_ds_i]
cur_spans_start = spans_start[:, turn_i, span_ds_i]
cur_spans_end = spans_end[:, turn_i, span_ds_i]
loss, prediction = self.forward_span(ds_name, reduced_dialog_masks, cur_repeated_ds_embeddings, cur_context_masks, cur_span_labels, cur_spans_start, cur_spans_end)
predictions[turn_i][1][ds_name] = prediction
span_ds_i += 1
if self._is_in_training_mode == True and turn_i == sampled_turn:
if not self._ds_masked[ds_name]:
total_loss += loss
if self._is_in_training_mode == True:
if turn_i < sampled_turn:
self.set_module_to_train()
output = {}
if self._is_in_training_mode == True:
output["loss"] = total_loss
output["predictions"] = predictions
output["metadata"] = metadata
return output
def set_module_to_eval(self):
self.eval()
self._phrase_layer.eval()
self._class_prediction_layer.eval()
self._span_prediction_layer.eval()
self._span_start_encoder.eval()
self._span_end_encoder.eval()
self._span_label_predictor.eval()
torch.set_grad_enabled(False)
def set_module_to_train(self):
self.train()
self._phrase_layer.train()
self._class_prediction_layer.train()
self._span_prediction_layer.train()
self._span_start_encoder.train()
self._span_end_encoder.train()
self._span_label_predictor.train()
torch.set_grad_enabled(True)
def bi_att(self, dialog_embeddings, dsv_embeddings, context_masks):
batch_size, max_dialog_len = context_masks.size()
num_values = dsv_embeddings.size(1)
dialog_dsv_similarity = self._dialog_dsv_attention(self._bi_dropout(dialog_embeddings), self._bi_dropout(dsv_embeddings))
# attention on dsv
dialog_dsv_att = util.masked_softmax(dialog_dsv_similarity.view(-1, num_values), None)
dialog_dsv_att = dialog_dsv_att.view(batch_size, max_dialog_len, num_values)
dialog_dsv = util.weighted_sum(dsv_embeddings, dialog_dsv_att)
new_dialog_embeddings = dialog_embeddings + dialog_dsv
# attention on dialog
dsv_dialog_att = util.masked_softmax(dialog_dsv_similarity.transpose(1, 2).contiguous().view(-1, max_dialog_len), context_masks.unsqueeze(1).repeat(1,num_values,1).view(-1, max_dialog_len))
dsv_dialog_att = dsv_dialog_att.view(batch_size, num_values, max_dialog_len)
dsv_dialog = util.weighted_sum(dialog_embeddings, dsv_dialog_att)
new_dsv_embeddings = dsv_embeddings + dsv_dialog
return new_dialog_embeddings, new_dsv_embeddings
def forward_classification(self, ds_name, dialog_repr, ds_embeddings, value_embeddings, context_masks, labels=None, attention_ds_embeddings=None):
batch_size, max_dialog_len = context_masks.size()
num_values = value_embeddings.size(0)
repeated_dsv_embeddings = ds_embeddings.repeat(1, num_values, 1)
repeated_dsv_embeddings += value_embeddings.unsqueeze(0).repeat(batch_size, 1, 1)
dialog_repr, repeated_dsv_embeddings = self.bi_att(dialog_repr, repeated_dsv_embeddings, context_masks)
ds_dialog_sim = self._ds_dialog_attention(self._bi_dropout(ds_embeddings), self._bi_dropout(dialog_repr))
ds_dialog_att = util.masked_softmax(ds_dialog_sim.view(-1, max_dialog_len), context_masks.view(-1, max_dialog_len))
ds_dialog_att = ds_dialog_att.view(batch_size, max_dialog_len)
ds_dialog_repr = util.weighted_sum(dialog_repr, ds_dialog_att)
if attention_ds_embeddings is not None:
self_att_matrix = self._ds_attention(self._bi_dropout(ds_dialog_repr.unsqueeze(1)), attention_ds_embeddings)
self_probs = util.masked_softmax(self_att_matrix, None)
ret = util.weighted_sum(attention_ds_embeddings, self_probs).squeeze(1)
gamma = torch.sigmoid(self._graph_gamma(ds_dialog_repr + ret))
ds_dialog_repr = (1-gamma) * ds_dialog_repr + gamma * ret
w = self._class_prediction_layer(self._bi_dropout(ds_dialog_repr)).unsqueeze(1)
logits = torch.bmm(w, repeated_dsv_embeddings.transpose(1,2)).squeeze(1)
prediction = torch.argmax(logits, dim=1)
loss = self._cross_entropy(logits.view(-1, num_values), labels.view(-1))
if labels is not None:
self._accuracy.value_acc(ds_name, logits, labels, labels != -1)
return loss, prediction
def forward_span(self, ds_name, dialog_repr, repeated_ds_embeddings, context_masks, span_labels=None, spans_start = None, spans_end = None):
batch_size, max_dialog_len = context_masks.size()
ds_dialog_sim = self._ds_dialog_attention(self._dropout(repeated_ds_embeddings), self._dropout(dialog_repr))
ds_dialog_att = util.masked_softmax(ds_dialog_sim.view(-1, max_dialog_len), context_masks.view(-1, max_dialog_len))
ds_dialog_att = ds_dialog_att.view(batch_size, max_dialog_len)
ds_dialog_repr = util.weighted_sum(dialog_repr, ds_dialog_att)
ds_dialog_repr = ds_dialog_repr + repeated_ds_embeddings.squeeze(1)
span_label_logits = self._span_label_predictor(F.relu(self._dropout(ds_dialog_repr)))
span_label_prediction = torch.argmax(span_label_logits, dim=1)
span_label_loss = 0.0
if span_labels is not None:
span_label_loss = self._cross_entropy(span_label_logits, span_labels) # loss averaged by #turn
self._accuracy.span_label_acc(ds_name, span_label_logits, span_labels, span_labels != -1)
loss = span_label_loss
w = self._span_prediction_layer(self._dropout(ds_dialog_repr)).unsqueeze(1)
span_start_repr = self._span_start_encoder(self._dropout(dialog_repr))
span_start_logits = torch.bmm(w, span_start_repr.transpose(1,2)).squeeze(1)
span_start_probs = util.masked_softmax(span_start_logits, context_masks)
span_start_logits = util.replace_masked_values(span_start_logits, context_masks.to(dtype=torch.int8), -1e7)
span_end_repr = self._span_end_encoder(self._dropout(span_start_repr))
span_end_logits = torch.bmm(w, span_end_repr.transpose(1,2)).squeeze(1)
span_end_probs = util.masked_softmax(span_end_logits, context_masks)
span_end_logits = util.replace_masked_values(span_end_logits, context_masks.to(dtype=torch.int8), -1e7)
best_span = self.get_best_span(span_start_logits, span_end_logits)
best_span = best_span.view(batch_size, -1)
spans_loss = 0.0
if spans_start is not None:
spans_loss = self._cross_entropy(span_start_logits, spans_start)
self._accuracy.span_start_acc(ds_name, span_start_logits, spans_start, spans_start != -1)
spans_loss += self._cross_entropy(span_end_logits, spans_end)
self._accuracy.span_end_acc(ds_name, span_end_logits, spans_end, spans_end != -1)
loss += spans_loss
return loss, (span_label_prediction, best_span)
@overrides
def decode(self, output_dict):
num_turns = len(output_dict["predictions"])
class_output = []
for t in range(num_turns):
class_predictions = output_dict["predictions"][t][0]
res = []
for ds_name, pred in class_predictions.items():
value = self._value_id2text[ds_name][pred.item()]
res.append(ds_name+":"+value)
class_output.append(res)
span_output = []
for t in range(num_turns):
span_predictions = output_dict["predictions"][t][1]
res = []
for ds_name, pred in span_predictions.items():
span_label = pred[0]
if span_label == 0: value = "none"
if span_label == 1: value = "dont care"
if span_label == 2:
start, end = pred[1][0][0], pred[1][0][1]
value = " ".join([output_dict["metadata"][0][i].text for i in range(start, end+1)])
value = value.lower()
res.append(ds_name+":" + value)
span_output.append(res)
# merge class output and span output
output = []
if len(span_output) != 0 and len(class_output) != 0:
for x, y in zip(class_output, span_output):
output.append(x + y)
elif len(span_output) == 0:
output = class_output
elif len(class_output) == 0:
output = span_output
else:
assert(False)
output_dict["predicted_labels"] = [output]
del output_dict["metadata"]
del output_dict["predictions"]
return output_dict
def get_metrics(self, reset = False):
acc = self._accuracy.get_metrics(reset)
return acc
def get_dsv_embeddings(self):
def batch_to_id(batch: List[List[str]]):
instances = []
for b in batch:
tokens = [Token(w) for w in b.split(" ")]
field = TextField(tokens, self._token_indexers)
instance = Instance({"b": field})
instances.append(instance)
dataset = Batch(instances)
vocab = self.vocab
dataset.index_instances(vocab)
res = {}
for k, v in dataset.as_tensor_dict()['b'].items():
res[k] = v.cuda()
return res
ds_ids = batch_to_id(self._ds_id2text)
if 'tokens' in ds_ids:
elmo_embedder_input = {'tokens':ds_ids['tokens']}
elif 'elmo' in ds_ids:
elmo_embedder_input = {'elmo':ds_ids['elmo']}
ds_elmo_embeddings = self._text_field_embedder(elmo_embedder_input, num_wrapping_dims=0).sum(1)
char_embedder_input = {'token_characters':ds_ids['token_characters']}
ds_char_embeddings = self._text_field_char_embedder(char_embedder_input, num_wrapping_dims=0).sum(1)
ds_embeddings = torch.cat((ds_elmo_embeddings, ds_char_embeddings), dim=1)
ds_masks = util.get_text_field_mask(ds_ids, num_wrapping_dims=0).sum(1).float()
ds_embeddings = ds_embeddings / ds_masks.unsqueeze(1).repeat(1, ds_embeddings.size(1))
v_embeddings = {}
for v, v_list in self._value_id2text.items():
v_ids = batch_to_id(v_list)
if 'tokens' in v_ids:
elmo_embedder_input = {'tokens':v_ids['tokens']}
elif 'elmo' in v_ids:
elmo_embedder_input = {'elmo':v_ids['elmo']}
v_elmo_embeddings = self._text_field_embedder(elmo_embedder_input, num_wrapping_dims=0).sum(1)
char_embedder_input = {'token_characters':v_ids['token_characters']}
v_char_embeddings = self._text_field_char_embedder(char_embedder_input, num_wrapping_dims=0).sum(1)
v_embeddings[v] = torch.cat((v_elmo_embeddings, v_char_embeddings), dim=1)
v_masks = util.get_text_field_mask(v_ids, num_wrapping_dims=0).sum(1).float()
v_embeddings[v] = v_embeddings[v] / v_masks.unsqueeze(1).repeat(1, v_embeddings[v].size(1))
return ds_embeddings, v_embeddings
def read_domain_slot_list(self, filename):
with open(filename) as fp:
lines = fp.readlines()
domains = []
domain_slots = []
value_file_path = {}
domain_slots_type = {}
domain_slots_use_value_list = {}
ds_masked = {}
num_ds_use_value = 0
for line in lines:
line = line.strip("\n ")
if line.startswith("#"):
continue
if len(line.strip("\n ")) == 0 :
continue
line_arr = line.split("\t")
ds = line_arr[0] + " " + line_arr[1]
if line_arr[3] == "n":
domains.append(line_arr[0])
domain_slots.append(ds)
value_file_path[ds] = line_arr[4].strip(" \n")
domain_slots_type[ds] = line_arr[2]
domain_slots_use_value_list[ds] = True if line_arr[5] == "y" else False
num_ds_use_value += 1 if line_arr[5] == "y" else 0
ds_masked[ds] = True if line_arr[6] == "y" else False
ds_text2id = {}
for i, s in enumerate(domain_slots):
ds_text2id[s] = i
return domains, domain_slots, ds_text2id, value_file_path, domain_slots_type, domain_slots_use_value_list, num_ds_use_value, ds_masked
def load_value_list(self, ds_path):
def read_value_list(ds_path, ds, value_path_list):
dir_path = os.path.dirname(ds_path)
filename = dir_path + "/" + value_path_list[ds]
with open(filename) as fp:
lines = fp.readlines()
values = []
for line_i, line in enumerate(lines):
if len(line.strip("\n ")) == 0:
continue
values.append(line.strip("\n "))
value2id = {}
for i, v in enumerate(values):
value2id[v] = i
return values, value2id
value_text2id = {}
value_id2text = {}
for ds in self._ds_text2id.keys():
if not self._ds_use_value_list[ds]: continue
id2v, v2id =read_value_list(ds_path, ds, self.value_file_path)
value_text2id[ds] = v2id
value_id2text[ds] = id2v
return value_id2text, value_text2id
# code from https://github.com/allenai/allennlp/blob/master/allennlp/models/reading_comprehension/bidaf.py
def get_best_span(self, span_start_logits, span_end_logits):
# We call the inputs "logits" - they could either be unnormalized logits or normalized log
# probabilities. A log_softmax operation is a constant shifting of the entire logit
# vector, so taking an argmax over either one gives the same result.
if span_start_logits.dim() != 2 or span_end_logits.dim() != 2:
raise ValueError("Input shapes must be (batch_size, passage_length)")
batch_size, passage_length = span_start_logits.size()
device = span_start_logits.device
# (batch_size, passage_length, passage_length)
span_log_probs = span_start_logits.unsqueeze(2) + span_end_logits.unsqueeze(1)
# Only the upper triangle of the span matrix is valid; the lower triangle has entries where
# the span ends before it starts.
span_log_mask = torch.triu(torch.ones((passage_length, passage_length),
device=device)).log().unsqueeze(0)
valid_span_log_probs = span_log_probs + span_log_mask
# Here we take the span matrix and flatten it, then find the best span using argmax. We
# can recover the start and end indices from this flattened list using simple modular
# arithmetic.
# (batch_size, passage_length * passage_length)
best_spans = valid_span_log_probs.view(batch_size, -1).argmax(-1)
span_start_indices = best_spans // passage_length
span_end_indices = best_spans % passage_length
return torch.stack([span_start_indices, span_end_indices], dim=-1)
def ds_graph_embeddings(self, batch_size, predictions, ds_embeddings, v_embeddings):
repeated_ds_embeddings = ds_embeddings.unsqueeze(0).repeat(batch_size, 1, 1)
for node_i, node in enumerate(self._ds_id2text):
if not self._ds_use_value_list[node]: continue
val_node = v_embeddings[node][predictions[0][node]]
ds_node = repeated_ds_embeddings[:, self._ds_text2id[node], :]
ds_node = ds_node + val_node
repeated_ds_embeddings = repeated_ds_embeddings.clone()
repeated_ds_embeddings[:, self._ds_text2id[node], :] = ds_node
return repeated_ds_embeddings
| 26,073 | 48.103578 | 193 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/Lorenz/generate.py | from turtle import color
import numpy as np
import math
import torch
import timeit
import numpy as np
import matplotlib.pyplot as plt
# import matplotlib
# matplotlib.rcParams['font.sans-serif'] = 'NSimSun,Times New Roman'
# matplotlib.rcParams['text.usetex'] = True
colors = [
[233/256, 110/256, 236/256], # #e96eec
# [0.6, 0.6, 0.2], # olive
# [0.5333333333333333, 0.13333333333333333, 0.3333333333333333], # wine
[255/255, 165/255, 0],
# [0.8666666666666667, 0.8, 0.4666666666666667], # sand
# [223/256, 73/256, 54/256], # #df4936
[107/256, 161/256,255/256], # #6ba1ff
[0.6, 0.4, 0.8], # amethyst
[0.0, 0.0, 1.0], # ao
[0.55, 0.71, 0.0], # applegreen
# [0.4, 1.0, 0.0], # brightgreen
[0.99, 0.76, 0.8], # bubblegum
[0.93, 0.53, 0.18], # cadmiumorange
[11/255, 132/255, 147/255], # deblue
[204/255, 119/255, 34/255], # {ocra}
]
colors = np.array(colors)
np.random.seed(10)
class Net(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(Net, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_hidden)
self.layer3 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(x))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
return out
D_in = 3
H1 = 10
D_out = 3
model = Net(D_in,H1,D_out)
# set_state0 = torch.tensor([[3.0,5.0,6.0]])
def control_data(model,random_seed,set_state0,N=20000,dt=0.00001):
start = timeit.default_timer()
torch.manual_seed(random_seed)
X = torch.zeros([3,N])
z = torch.randn(N)
X[0,0] = set_state0[0,0]
X[1,0] = set_state0[0,1]
X[2,0] = set_state0[0,2]
for i in range(N-1):
x1 = X[0,i]
x2 = X[1,i]
x3 = X[2,i]
with torch.no_grad():
u = model(torch.tensor([x1,x2,x3]))
new_x1 = x1 + 10*(x2-x1)*dt + x1*u[0]*z[i]*math.sqrt(dt)
new_x2 = x2 + (x1*(28-x3)-x2)*dt + x2*u[1]*z[i]*math.sqrt(dt)
new_x3 = x3 + (x1*x2-8/3*x3)*dt + x3*u[2]*z[i]*math.sqrt(dt)
X[0,i+1] = new_x1
X[1,i+1] = new_x2
X[2,i+1] = new_x3
stop = timeit.default_timer()
print(stop-start)
return X
def modify_control_data(model,random_seed,set_state0,N=20000,dt=0.00001):
start = timeit.default_timer()
torch.manual_seed(random_seed)
X = torch.zeros([3,N])
z = torch.randn(N)
e = torch.tensor([6.0*math.sqrt(2), 6.0*math.sqrt(2) , 27.0])
e1,e2,e3=e
X[0,0] = set_state0[0,0]
X[1,0] = set_state0[0,1]
X[2,0] = set_state0[0,1]
for i in range(N-1):
x1 = X[0,i]
x2 = X[1,i]
x3 = X[2,i]
with torch.no_grad():
u = model(torch.tensor([x1-e1,x2-e2,x3-e3]))
new_x1 = x1 + 10*(x2-x1)*dt + (x1-e1)*u[0]*z[i]*math.sqrt(dt)
new_x2 = x2 + (x1*(28-x3)-x2)*dt + (x2-e2)*u[1]*z[i]*math.sqrt(dt)
new_x3 = x3 + (x1*x2-8/3*x3)*dt + (x3-e3)*u[2]*z[i]*math.sqrt(dt)
X[0,i+1] = new_x1
X[1,i+1] = new_x2
X[2,i+1] = new_x3
stop = timeit.default_timer()
print(stop-start)
return X
def original_data(set_state0,N=50000,dt=0.001):
start = timeit.default_timer()
X = torch.zeros([3,N])
X[0,0] = set_state0[0,0]
X[1,0] = set_state0[0,1]
X[2,0] = set_state0[0,1]
for i in range(N-1):
x1 = X[0,i]
x2 = X[1,i]
x3 = X[2,i]
new_x1 = x1 + 10*(x2-x1)*dt
new_x2 = x2 + (x1*(28-x3)-x2)*dt
new_x3 = x3 + (x1*x2-8/3*x3)*dt
X[0,i+1] = new_x1
X[1,i+1] = new_x2
X[2,i+1] = new_x3
stop = timeit.default_timer()
print(stop-start)
torch.save(X,'./data/Lorenz/original_data.pt')
return X
def plot_original_orbit():
fig = plt.figure()
X = torch.load('./data/Lorenz/original_data.pt')[:,0:50000:10]
x1,x2,x3=X[0,:],X[1,:],X[2,:]
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.5, hspace=0.5)
ax = fig.add_subplot(111,projection = '3d')
ax.plot3D(x1,x2,x3,color=[1.0, 0.8, 0.6])
ax.plot3D(0,0,0,marker='*',label=r'$P_1$',color=colors[0])
ax.plot3D(6*math.sqrt(2),6*math.sqrt(2),27,marker='*',label=r'$P_2$',color=colors[3])
ax.plot3D(-6*math.sqrt(2),-6*math.sqrt(2),27,marker='*',label=r'$P_3$',color=colors[2])
plt.legend()
def orbit1(ax,path1,P1):
# fig = plt.figure()
Q1 =np.load('./data/Lorenz/{}_data_{}_Q1.npy'.format(path1,P1))[0,:,0:100000:10]
Q2 =np.load('./data/Lorenz/{}_data_{}_Q2.npy'.format(path1,P1))[0,:,0:100000:10]
Q3 =np.load('./data/Lorenz/{}_data_{}_Q3.npy'.format(path1,P1))[0,:,0:100000:10]
# plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.5, hspace=0.5)
# ax = fig.add_subplot(111,projection = '3d')
ax.plot3D(Q1[0,:],Q1[1,:],Q1[2,:],color=colors[4],alpha=0.5)
ax.plot3D(Q2[0,:],Q2[1,:],Q2[2,:],color=colors[5],alpha=0.5)
ax.plot3D(Q3[0,:],Q3[1,:],Q3[2,:],color=colors[7],alpha=0.5)
ax.plot3D(0,0,0,marker='*',label=r'$P_1$',markersize=10,color=colors[0])
# ax.plot3D(6*math.sqrt(2),6*math.sqrt(2),27,marker='*',label=r'$P_2$')
# ax.plot3D(-6*math.sqrt(2),-6*math.sqrt(2),27,marker='*',label=r'$P_3$')
ax.plot3D(9,6,8,marker='*',label=r'$Q_1$',markersize=10,color=colors[4])
ax.plot3D(3,5,6,marker='*',label=r'$Q_2$',markersize=10,color=colors[5])
ax.plot3D(1,9,2,marker='*',label=r'$Q_3$',markersize=10,color=colors[7])
# ax.plot3D(8,2,1,marker='^',label=r'$Q_4$')
ax.set_xlabel(r'$X$')
# ax.set_xlim(0, 10)
ax.set_ylabel(r'$Y$')
# ax.set_ylim(0, 10)
ax.set_zlabel(r'$Z$')
# ax.set_zlim(0, 10)
plt.legend(fontsize=8,markerscale=0.5,labelspacing=0.05,borderpad=0.1,handlelength=1.0)
def orbit2(ax,path1,P1):
# fig = plt.figure()
Q1 =np.load('./data/Lorenz/{}_data_{}_Q1.npy'.format(path1,P1))[0,:,0:200000:10]
Q2 =np.load('./data/Lorenz/{}_data_{}_Q2.npy'.format(path1,P1))[0,:,0:200000:10]
Q3 =np.load('./data/Lorenz/{}_data_{}_Q3.npy'.format(path1,P1))[0,:,0:200000:10]
# plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.5, hspace=0.5)
# ax = fig.add_subplot(111,projection = '3d')
ax.plot3D(Q1[0,:],Q1[1,:],Q1[2,:],color=colors[4],alpha=0.5)
ax.plot3D(Q2[0,:],Q2[1,:],Q2[2,:],color=colors[5],alpha=0.5)
ax.plot3D(Q3[0,:],Q3[1,:],Q3[2,:],color=colors[7],alpha=0.5)
# ax.plot3D(0,0,0,marker='*',label=r'$P_1$',markersize=10)
ax.plot3D(6*math.sqrt(2),6*math.sqrt(2),27,marker='*',label=r'$P_2$',markersize=10,color=colors[3])
# ax.plot3D(-6*math.sqrt(2),-6*math.sqrt(2),27,marker='*',label=r'$P_3$')
ax.plot3D(9,6,8,marker='*',label=r'$Q_1$',markersize=10,color=colors[4])
ax.plot3D(3,5,6,marker='*',label=r'$Q_2$',markersize=10,color=colors[5])
ax.plot3D(1,9,2,marker='*',label=r'$Q_3$',markersize=10,color=colors[7])
ax.set_xlabel(r'$X$')
# ax.set_xlim(0, 10)
ax.set_ylabel(r'$Y$')
# ax.set_ylim(0, 10)
ax.set_zlabel(r'$Z$')
# ax.set_zlim(0, 10)
plt.legend(fontsize=8,markerscale=0.5,labelspacing=0.05,borderpad=0.1,handlelength=1.0)
# plt.legend(loc='upper right',labelspacing=0.1,borderpad=0.2,handlelength=1.2)
def plot_original_tra():
X = torch.load('./data/Lorenz/original_data.pt')[:,0:40000:10]
x1,x2,x3=X[0,:],X[1,:],X[2,:]
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.5, hspace=0.5)
plt.subplot(131)
plt.xticks([])
plt.plot(np.arange(len(x1)),x1,label='x',color='r')
plt.ylabel(r'$x$')
plt.subplot(132)
plt.xticks([])
plt.plot(np.arange(len(x1)),x2,label='y',color='g')
plt.ylabel(r'$y$')
plt.subplot(133)
plt.xticks([0,1000,2000,3000,4000],[0,10,20,30,40])
plt.plot(np.arange(len(x1)),x3,label='z',color='b')
plt.ylabel(r'$z$')
plt.xlabel('Time')
def plot_grid():
plt.grid(b=True, which='major', color='gray', alpha=0.6, linestyle='dashdot', lw=1.5)
# minor grid lines
plt.minorticks_on()
plt.grid(b=True, which='minor', color='beige', alpha=0.8, ls='-', lw=1)
def plot_tra(path1,P1,Q1,length=200000):
X = np.load('./data/Lorenz/{}_data_{}_{}.npy'.format(path1,P1,Q1))[0,:,0:length:10]
x1,x2,x3=X[0,:],X[1,:],X[2,:]
plt.plot(np.arange(len(x1)),x1,label='x',color='r')
plt.plot(np.arange(len(x1)),x2,label='y',color='g')
plt.plot(np.arange(len(x1)),x3,label='z',color='b')
plot_grid()
plt.legend(loc='upper right',labelspacing=0.1,borderpad=0.2,handlelength=1.2)
def quad_generate(set_state0,m,N,dt,case):
X = torch.zeros(m,3,N)
# model.load_state_dict(torch.load('./neural_sde/Lorenz/ES_quad_net_modify_0.pkl'))
# model.load_state_dict(torch.load('./neural_sde/Lorenz/ES_quad_net_modify_1.pkl'))
if case == 0:
model.load_state_dict(torch.load('./data/Lorenz/ES_quad_net_modify_0.pkl'))
for i in range(m):
X[i,:] = control_data(model,i*6+2,set_state0,N,dt)
print(case,i)
X = X.detach().numpy()
np.save('./data/Lorenz/quad_data_P1_Q2_20',X)
else:
model.load_state_dict(torch.load('./data/Lorenz/ES_quad_net_modify_1.pkl'))
for i in range(m):
X[i,:] = modify_control_data(model,i*6+2,set_state0,N,dt)
print(case,i)
X = X.detach().numpy()
np.save('./data/Lorenz/quad_data_P2_Q2_20',X)
# return X
def icnn_generate(set_state0,m,N,dt,case):
X = torch.zeros(m,3,N)
# model.load_state_dict(torch.load('./neural_sde/Lorenz/ES_icnn_net_100.pkl'))
# model.load_state_dict(torch.load('./neural_sde/Lorenz/ES_icnn_net_modify_1.pkl'))
if case == 0:
model.load_state_dict(torch.load('./data/Lorenz/ES_icnn_net_100.pkl'))
for i in range(m):
X[i,:] = control_data(model,i*6+6,set_state0,N,dt)
print(case,i)
X = X.detach().numpy()
np.save('./data/Lorenz/icnn_data_P1_Q2_20',X)
else:
model.load_state_dict(torch.load('./data/Lorenz/ES_icnn_net_modify_1.pkl'))
for i in range(m):
X[i,:] = modify_control_data(model,i*6+6,set_state0,N,dt)
print(case,i)
X = X.detach().numpy()
np.save('./data/Lorenz/icnn_data_P2_Q2_20',X)
# return X
font_size = 15
def plot1():
fig = plt.figure()
ax1 = fig.add_subplot(4,4,4,projection = '3d')
orbit1(ax1,'icnn','P1')
plt.title('Orbit')
ax2 = fig.add_subplot(4,4,8,projection = '3d')
orbit1(ax2,'quad','P1')
ax3 = fig.add_subplot(4,4,12,projection = '3d')
orbit2(ax3,'icnn','P2')
ax4 = fig.add_subplot(4,4,16,projection = '3d')
orbit2(ax4,'quad','P2')
def plot2():
for i in range(3):
plt.subplot(4,3,i+1)
plot_tra('icnn','P1','Q{}'.format(i+1),5000)
plt.xticks([0,200,400],['0','0.02','0.04'])
plt.title(r'$Q_{}$'.format(i+1),fontsize=font_size)
if i ==0:
plt.ylabel(r'$Value$',fontsize=font_size)
plt.text(0.1,4,r'$ICNN : P_1$',rotation=90,fontsize=font_size)
if i==1:
plt.xlabel('Time',fontsize=font_size)
for i in range(3):
plt.subplot(4,3,3+i+1)
plot_tra('quad','P1','Q{}'.format(i+1),5000)
plt.xticks([0,200,400],['0','0.02','0.04'])
if i==1:
plt.xlabel('Time',fontsize=font_size)
if i ==0:
plt.ylabel(r'$Value$',fontsize=font_size)
plt.text(0.1,3,r'$Quad : P_1$',rotation=90,fontsize=font_size)
for i in range(3):
plt.subplot(4,3,6+i+1)
plot_tra('icnn','P2','Q{}'.format(i+1),200000)
plt.xticks([0,10000,20000],['0','1.0','2.0'])
plt.ylim(-10,35)
if i==1:
plt.xlabel('Time',fontsize=font_size)
if i ==0:
plt.ylabel(r'$Value$',fontsize=font_size)
plt.text(-0.5,2,r'$ICNN : P_2$',rotation=90,fontsize=font_size)
for i in range(3):
plt.subplot(4,3,9+i+1)
plot_tra('quad','P2','Q{}'.format(i+1),200000)
plt.xticks([0,10000,20000],['0','1.0','2.0'])
plt.ylim(-10,35)
if i==1:
plt.xlabel('Time',fontsize=font_size)
if i ==0:
plt.ylabel(r'$Value$',fontsize=font_size)
plt.text(-0.5,1,r'$Quad : P_2$',rotation=90,fontsize=font_size)
if __name__ == '__main__':
Q1 = torch.tensor([[9.0,6.0,8.0]])
Q2 = torch.tensor([[3.0,5.0,6.0]])
Q3 = torch.tensor([[1.0,9.0,2.0]])
'''
generate control data
'''
icnn_generate(Q2,20,200000,0.00001,0)
quad_generate(Q2,20,200000,0.00001,0)
icnn_generate(Q2,20,200000,0.00001,1)
quad_generate(Q2,20,200000,0.0001,1)
'''
Plot figure in Lorenz Experiment
'''
# plot1()
# plot2()
# original_data(set_state0)
# plot_original_orbit()
# plot_original_tra()
# plt.show() | 12,969 | 35.432584 | 103 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/Lorenz/ES_ICNN.py | import torch.nn.functional as F
import timeit
from hessian import hessian
from hessian import jacobian
# from gradient import hessian
# from gradient import jacobian
import torch
import random
import numpy as np
def setup_seed(seed):
torch.manual_seed(seed)
# torch.cuda.manual_seed_all(seed)
# torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
setup_seed(10)
from Control_Nonlinear_Icnn import *
import math
import argparse
parser = argparse.ArgumentParser('ODE demo')
parser.add_argument('--N', type=int, default=10000)
parser.add_argument('--D_in', type=int, default=3)
parser.add_argument('--D_h', type=int, default=10)
parser.add_argument('--lr', type=float, default=0.03)
parser.add_argument('--b', type=float, default=2.1)
parser.add_argument('--niters', type=int, default=200)
parser.add_argument('--batch_size', type=int, default=100)
args = parser.parse_args()
def Lorenz_value(x):
y = []
for i in range(0,len(x)):
x1,x2,x3 = x[i,0],x[i,1],x[i,2]
f = [10*(x2-x1),x1*(28-x3)-x2,x1*x2-x3*8/3]
y.append(f)
y = torch.tensor(y)
return y
def modify_Lorenz_value(x):
y = []
e = torch.tensor([6.0*math.sqrt(2), 6.0*math.sqrt(2) , 27.0])
for i in range(0,len(x)):
x1,x2,x3 = x[i,:] + e
f = [10*(x2-x1),x1*(28-x3)-x2,x1*x2-x3*8/3]
y.append(f)
y = torch.tensor(y)
return y
def get_batch(data):
s = torch.from_numpy(np.random.choice(np.arange(args.N, dtype=np.int64), args.batch_size, replace=False))
batch_x = data[s,:] # (M, D)
return batch_x
'''
For learning
'''
N = args.N # sample size
D_in = args.D_in # input dimension
H1 = args.D_h # hidden dimension
D_out = D_in # output dimension
data_x = torch.Tensor(N, D_in).uniform_(0, 10)
eps = 0.001
start = timeit.default_timer()
model = LyapunovFunction(D_in,H1,D_out,(D_in,),0.1,[12,12,12,1],eps)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
max_iters = 2000
for r in range(1, args.niters + 1):
# break
x = get_batch(data_x)
i = 0
L = []
while i < max_iters:
output, u = model(x)
g = u*x
f = Lorenz_value(x)
# f = modify_Lorenz_value(x)
x = x.clone().detach().requires_grad_(True)
ws = model._icnn._ws
bs = model._icnn._bs
us = model._icnn._us
smooth = model.smooth_relu
input_shape = (D_in,)
V1 = lya(ws,bs,us,smooth,x,input_shape)
V0 = lya(ws,bs,us,smooth,torch.zeros_like(x),input_shape)
num_V = smooth(V1-V0)+eps*x.pow(2).sum(dim=1)
V = torch.sum(smooth(V1-V0)+eps*x.pow(2).sum(dim=1))
Vx = jacobian(V,x)
Vxx = hessian(V,x)
loss = torch.zeros(N)
for r in range(args.batch_size):
L_V = torch.sum(Vx[0,D_in*r:D_in*r+D_in]*f[r,:]) + 0.5*torch.mm(g[r,:].unsqueeze(0),torch.mm(Vxx[D_in*r:D_in*r+D_in,D_in*r:D_in*r+D_in],g[r,:].unsqueeze(1)))
Vxg = torch.sum(Vx[0,D_in*r:D_in*r+D_in]*g[r,:])
v = num_V[0,r]
loss[r] = Vxg**2/(v**2) - args.b*L_V/v
Lyapunov_risk = (F.relu(-loss)).mean()
L.append(Lyapunov_risk.item())
print(i, "Lyapunov Risk=",Lyapunov_risk.item())
optimizer.zero_grad()
Lyapunov_risk.backward()
optimizer.step()
if Lyapunov_risk < 1.0:
optimizer = torch.optim.Adam(model.parameters(), lr=0.005)
elif Lyapunov_risk > 1.0:
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
if Lyapunov_risk == 0.0:
print(Lyapunov_risk)
break
i += 1
# torch.save(model._control.state_dict(),'ES_icnn_net.pkl')
# torch.save(model._icnn.state_dict(),'ES_icnn_V_net.pkl')
stop = timeit.default_timer()
print('\n')
print("Total time: ", stop - start)
# torch.save(model._control.state_dict(),'ES_icnn_net.pkl')
# torch.save(model._icnn.state_dict(),'ES_icnn_V_net.pkl')
# torch.save(model._control.state_dict(),'./neural_sde/Lorenz/ES_icnn_net_modify_1.pkl')
# torch.save(model._icnn.state_dict(),'./neural_sde/Lorenz/ES_icnn_V_net_modify_1.pkl') | 4,181 | 31.169231 | 169 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/Lorenz/ES_Quadratic.py | import torch.nn.functional as F
import timeit
from hessian import hessian
from hessian import jacobian
# from gradient import hessian
# from gradient import jacobian
import torch
import random
import math
import numpy as np
def setup_seed(seed):
torch.manual_seed(seed)
# torch.cuda.manual_seed_all(seed)
# torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
setup_seed(10)
import argparse
parser = argparse.ArgumentParser('ODE demo')
parser.add_argument('--N', type=int, default=10000)
parser.add_argument('--D_in', type=int, default=3)
parser.add_argument('--D_h', type=int, default=10)
parser.add_argument('--lr', type=float, default=0.03)
parser.add_argument('--b', type=float, default=2.1)
parser.add_argument('--niters', type=int, default=200)
parser.add_argument('--batch_size', type=int, default=100)
args = parser.parse_args()
class ControlNet(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(ControlNet, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_hidden)
self.layer3 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(x))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
return out
class VNet(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(VNet, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden, n_hidden)
self.layer3 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.Tanh()
h_1 = sigmoid(self.layer1(x))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
return out
class Net(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(Net, self).__init__()
torch.manual_seed(2)
self._v = VNet(n_input,12,n_output)
self._control = ControlNet(n_input,n_hidden,n_output)
def forward(self,x):
v = self._v(x)
u = self._control(x)
return v,u*x
def Lorenz_value(x):
y = []
for i in range(0,len(x)):
x1,x2,x3 = x[i,0],x[i,1],x[i,2]
f = [10*(x2-x1),x1*(28-x3)-x2,x1*x2-x3*8/3]
y.append(f)
y = torch.tensor(y)
return y
def modify_Lorenz_value(x):
y = []
e = torch.tensor([6.0*math.sqrt(2), 6.0*math.sqrt(2) , 27.0])
for i in range(0,len(x)):
x1,x2,x3 = x[i,:] + e
f = [10*(x2-x1),x1*(28-x3)-x2,x1*x2-x3*8/3]
y.append(f)
y = torch.tensor(y)
return y
def get_batch(data):
s = torch.from_numpy(np.random.choice(np.arange(args.N, dtype=np.int64), args.batch_size, replace=False))
batch_x = data[s,:] # (M, D)
return batch_x
'''
For learning
'''
N = args.N # sample size
D_in = args.D_in # input dimension
H1 = args.D_h # hidden dimension
D_out = D_in # output dimension
# torch.manual_seed(10)
data_x = torch.Tensor(N, D_in).uniform_(0, 10)
# x = torch.Tensor(N, D_in).uniform_(-10, 10)
l = 0.001
start = timeit.default_timer()
model = Net(D_in,H1, D_out)
max_iters = 2000
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
for r in range(1, args.niters + 1):
i = 0
L = []
x = get_batch(data_x)
while i < max_iters:
V_net, u = model(x)
W1 = model._v.layer1.weight
W2 = model._v.layer2.weight
W3 = model._v.layer3.weight
# W4 = model._v.layer4.weight
B1 = model._v.layer1.bias
B2 = model._v.layer2.bias
B3 = model._v.layer3.bias
# B4 = model._v.layer4.bias
f = Lorenz_value(x)
# f = modify_Lorenz_value(x)
g = u
x = x.clone().detach().requires_grad_(True)
output = torch.mm(F.tanh(torch.mm(F.tanh(torch.mm(x,W1.T)+B1),W2.T)+B2),W3.T)+B3
# output = torch.mm(torch.tanh(torch.mm(x,W1.T)+B1),W2.T)+B2
# V = torch.sum(output)
num_v = torch.sum(l*x*x + ( x*output)**2,1)
# num_v = torch.sum(output,1)
V = torch.sum(l*x*x + (x*output)**2)
Vx = jacobian(V,x)
Vxx = hessian(V,x)
loss = torch.zeros(N)
for r in range(args.batch_size):
L_V = torch.sum(Vx[0,3*r:3*r+3]*f[r,:]) + 0.5*torch.mm(g[r,:].unsqueeze(0),torch.mm(Vxx[3*r:3*r+3,3*r:3*r+3],g[r,:].unsqueeze(1)))
Vxg = torch.sum(Vx[0,3*r:3*r+3]*g[r,:])
v = num_v[r]
loss[r] = Vxg**2/(v**2) - args.b*L_V/v
Lyapunov_risk = (F.relu(-loss)).mean()
L.append(Lyapunov_risk.item())
print(i, "Lyapunov Risk=",Lyapunov_risk.item())
optimizer.zero_grad()
Lyapunov_risk.backward()
optimizer.step()
if Lyapunov_risk < 1.0:
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
elif Lyapunov_risk > 1.0:
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
if Lyapunov_risk == 0.0:
break
i += 1
stop = timeit.default_timer()
print('\n')
print("Total time: ", stop - start)
# torch.save(model._control.state_dict(),'ES_net.pkl')
# torch.save(model._v.state_dict(),'ES_V_net.pkl')
# torch.save(model._control.state_dict(),'./data/Lorenz/ES_quad_net_modify_1.pkl')
# torch.save(model._v.state_dict(),'./data/Lorenz/ES_quad_V_net_modify_1.pkl') | 5,582 | 30.016667 | 142 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/Lorenz/Control_Nonlinear_Icnn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class ICNN(nn.Module):
def __init__(self, input_shape, layer_sizes, activation_fn):
super(ICNN, self).__init__()
self._input_shape = input_shape
self._layer_sizes = layer_sizes
self._activation_fn = activation_fn
ws = []
bs = []
us = []
prev_layer = input_shape
w = torch.empty(layer_sizes[0], *input_shape)
nn.init.xavier_normal_(w)
ws.append(nn.Parameter(w))
b = torch.empty([layer_sizes[0], 1])
nn.init.xavier_normal_(b)
bs.append(nn.Parameter(b))
for i in range(len(layer_sizes))[1:]:
w = torch.empty(layer_sizes[i], *input_shape)
nn.init.xavier_normal_(w)
ws.append(nn.Parameter(w))
b = torch.empty([layer_sizes[i], 1])
nn.init.xavier_normal_(b)
bs.append(nn.Parameter(b))
u = torch.empty([layer_sizes[i], layer_sizes[i-1]])
nn.init.xavier_normal_(u)
us.append(nn.Parameter(u))
self._ws = nn.ParameterList(ws)
self._bs = nn.ParameterList(bs)
self._us = nn.ParameterList(us)
def forward(self, x):
# x: [batch, data]
if len(x.shape) < 2:
x = x.unsqueeze(0)
else:
data_dims = list(range(1, len(self._input_shape) + 1))
x = x.permute(*data_dims, 0)
z = self._activation_fn(torch.addmm(self._bs[0], self._ws[0], x))
for i in range(len(self._us)):
u = F.softplus(self._us[i])
w = self._ws[i + 1]
b = self._bs[i + 1]
z = self._activation_fn(torch.addmm(b, w, x) + torch.mm(u, z))
return z
class ControlNet(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(ControlNet, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_hidden)
self.layer3 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(x))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
return out
class LyapunovFunction(nn.Module):
def __init__(self,n_input,n_hidden,n_output,input_shape,smooth_relu_thresh=0.1,layer_sizes=[64, 64],lr=3e-4,eps=1e-3):
super(LyapunovFunction, self).__init__()
torch.manual_seed(2)
self._d = smooth_relu_thresh
self._icnn = ICNN(input_shape, layer_sizes, self.smooth_relu)
self._eps = eps
self._control = ControlNet(n_input,n_hidden,n_output)
def forward(self, x):
g = self._icnn(x)
g0 = self._icnn(torch.zeros_like(x))
u = self._control(x)
u0 = self._control(torch.zeros_like(x))
return self.smooth_relu(g - g0) + self._eps * x.pow(2).sum(dim=1), u*x
# return self.smooth_relu(g - g0) + self._eps * x.pow(2).sum(dim=1), u-u0
def smooth_relu(self, x):
relu = x.relu()
# TODO: Is there a clean way to avoid computing both of these on all elements?
sq = (2*self._d*relu.pow(3) -relu.pow(4)) / (2 * self._d**3)
lin = x - self._d/2
return torch.where(relu < self._d, sq, lin)
def lya(ws,bs,us,smooth,x,input_shape):
if len(x.shape) < 2:
x = x.unsqueeze(0)
else:
data_dims = list(range(1, len(input_shape) + 1))
x = x.permute(*data_dims, 0)
z = smooth(torch.addmm(bs[0],ws[0], x))
for i in range(len(us)):
u = F.softplus(us[i])
w = ws[i + 1]
b = bs[i + 1]
z = smooth(torch.addmm(b, w, x) + torch.mm(u, z))
return z
| 3,750 | 34.386792 | 122 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/Energy/AS.py | import torch
import torch.nn.functional as F
import timeit
import math
class Net(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(Net, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(x))
out = self.layer2(h_1)
return out
def f_value(x):
y = []
for i in range(0,len(x)):
f = [x[i]*math.log(1+abs(x[i]))]
y.append(f)
y = torch.tensor(y)
return y
'''
For learning
'''
N = 4000 # sample size
D_in = 1 # input dimension
H1 = 6 # hidden dimension
D_out = 1 # output dimension
torch.manual_seed(10)
x = torch.Tensor(N, D_in).uniform_(0,50)
theta = 0.9
out_iters = 0
while out_iters < 1:
start = timeit.default_timer()
model = Net(D_in,H1, D_out)
i = 0
t = 0
max_iters = 100
learning_rate = 0.1
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
while i < max_iters:
out = model(x)
g = out*x
f = f_value(x)
loss = (2-theta)*((x*g)**2)-x**2*(2*x*f+g**2)
Lyapunov_risk = (F.relu(-loss)).mean()
print(i, "Lyapunov Risk=",Lyapunov_risk.item())
optimizer.zero_grad()
Lyapunov_risk.backward()
optimizer.step()
i += 1
stop = timeit.default_timer()
print('\n')
print("Total time: ", stop - start)
print("Verified time: ", t)
out_iters+=1
torch.save(model.state_dict(), './theta0.9_1d_log_net.pkl') | 1,720 | 21.064103 | 70 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/Energy/functions.py | import numpy as np
import math
import torch
import timeit
from scipy import integrate
start = timeit.default_timer()
np.random.seed(1)
class Net(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(Net, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.ReLU()
# sigmoid2 = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(x))
out = self.layer2(h_1)
return out
log_model = Net(1,6,1)
log_model.load_state_dict(torch.load('./data/Energy/theta0.9_1d_log_net.pkl'))
N = 100000
dt = 0.00001
m = 20
T = 50
x0 = [0.5] #initial
def k_list(N,dt,k,m):
# x0 = [0.5]
x0 = [20.0]
data = torch.zeros([N+1,m])
for r in range(m):
X = []
X.append(x0)
z = np.random.normal(0,1,N)
for i in range(N):
x = X[i][0]
new_x = x + x*math.log(1+abs(x))*dt + k*x*math.sqrt(dt)*z[i]
X.append([new_x])
X = torch.tensor(X)
data[:,r] = X[:,0]
return data
def learning_control(N,dt,m):
x0 = [20.0]
data = torch.zeros([2,N+1,m])
for r in range(m):
X,Y = [],[]
X.append(x0),Y.append(x0)
np.random.seed(r*4+1)
z = np.random.normal(0,1,N)
for i in range(N):
x = X[i][0]
y = Y[i][0]
k = log_model(torch.tensor([X[i]]))
new_x = x + x*math.log(1+abs(x))*dt + k[0]*x*math.sqrt(dt)*z[i]
new_y = y + y*math.log(1+abs(y))*dt + 6*y*math.sqrt(dt)*z[i]
X.append([new_x]),Y.append([new_y])
X = torch.tensor(X)
Y = torch.tensor(Y)
data[0,:,r] = X[:,0]
data[1,:,r] = Y[:,0]
print(r)
return data
def k_data():
endpoint = torch.zeros(T)
Data = torch.zeros(T,N+1,m)
for i in range(T):
k = i*0.2+0.2
data = k_list(N,dt,k,m)
endpoint[i] = data[-1].mean()
Data[i,:] = data
print(i)
torch.save({'data':Data,'end':endpoint},'./data/Energy/k_table_x0_20.pt')
def learning_data():
# data = learning_control(200000,dt,10)
data = learning_control(100000,dt,20)
# torch.save({'data':data},'./neural_sde/Energy/20_learning_control.pt')
torch.save({'data':data},'./data/Energy/20seed_learning_control.pt')
def k_energy_cost():
Data = torch.load('./data/Energy/k_table.pt')
data = Data['data']
X = data[29,:75001,:]
N = 75000
dt = 0.00001
gx = 6*X**2
a = np.linspace(0, dt*N, N+1)
print(a.shape)
v_x = 0
for i in range(20):
g_x = gx[:,i]
v_x += integrate.trapz(np.array(g_x), a)
print(i)
print(v_x/20)
def energy_cost():
Data = torch.load('./data/Energy/20seed_learning_control.pt')
data = Data['data'].detach().numpy()
X = data[1,:]
Y = data[0,:][:,np.delete(np.arange(20),15)]# Delete the diverge trajectory due to the dt is not small enough in Euler method
N = 100000
dt = 0.00001
v_x = 0
v_y = 0
# a = np.linspace(0, dt*N, N+1)
for i in range(Y.shape[1]):
g_x = 36*X[:,i]**2
g_y = (log_model(torch.tensor(Y[:,i]).unsqueeze(1))[:,0].detach().numpy()*Y[:,i])**2
norm_x = np.abs(X[:,i])
norm_y = np.abs(Y[:,i])
ind1 = np.where(norm_x<0.1)[0][0]
ind2 = np.where(norm_y<0.1)[0][0]
a1 = np.linspace(0, dt*ind1, ind1+1)
a2 = np.linspace(0, dt*ind2, ind2+1)
v_x += integrate.trapz(g_x[0:ind1+1], a1)
v_y += integrate.trapz(g_y[0:ind2+1], a2)
print(i)
print(v_x/20,v_y/19)
# energy_cost()
# learning_data()
# k_data()
stop= timeit.default_timer()
print('time:',stop-start)
| 3,792 | 26.092857 | 129 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/Energy/plot.py | import numpy as np
import matplotlib.pyplot as plt
import torch
import matplotlib
matplotlib.rcParams['font.sans-serif'] = 'NSimSun,Times New Roman'
matplotlib.rcParams['text.usetex'] = True
def plot_grid():
plt.grid(b=True, which='major', color='gray', alpha=0.6, linestyle='dashdot', lw=1.5)
# minor grid lines
plt.minorticks_on()
plt.grid(b=True, which='minor', color='beige', alpha=0.8, ls='-', lw=1)
# plt.grid(b=True, which='both', color='beige', alpha=0.1, ls='-', lw=1)
pass
'''
Data corresponding to (a) in Figure 4, strength k from 0.2:10:0.2, 20 sample trajectories for each k,
we choose dt=1e-5 and N=1e5 in Euler method. Data form is dictionary with key 'data' and 'end', the size
for 'data' is [50,10001,20], 'end' corresponds to the average position over 20 trajectories for each k, the size is [50]
'''
Data = torch.load('./k_table_x0_20.pt')
data = Data['data']
endpoint = Data['end']
endpoint = torch.log(1+endpoint)
T = len(data)
dt = 0.00001
fontsize = 30
fig = plt.figure()
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.5, hspace=0.5)
fig1 = plt.subplot(141)
plt.scatter(np.arange(T) / 5,endpoint, s=45, c=endpoint, marker='.',alpha=0.85,cmap='rainbow')
plt.axvline(28/5,ls="--",linewidth=2.5,color="#dc8ff6",alpha=0.5)
plt.ylabel(r'$\log(1+x)$', fontsize=fontsize)
plt.xlabel(r'$k$', fontsize=fontsize)
# cb = plt.colorbar()
# cb.set_ticks([0, 5, 10, 15])
# cb.ax.tick_params(labelsize=fontsize)
plt.xticks([0, 2, 4, 6, 8, 10],
# ["0", "", "0.5", "","1.0", "", "1.5", "", "2.0"]
)
plt.yticks([0, 5, 10, 15],
# ["0", "", "0.5", "","1.0", "", "1.5", "", "2.0"]
)
plot_grid()
plt.tick_params(labelsize=fontsize)
'''
Fix k=6,20 trajectories for linear control and neural stochastic control from initial 20.0,we set dt = 1e-5, N = 1e5
in Euler method, the random seeds are set as 4*r+1 for r in range(20), the data form is dictionary with key 'data', the
data size is [2,10001,20], data[0,:] corresponds to trajectories for learning control, data[1,:] corresponds to linear control.
'''
# Data = torch.load('./neural_sde/Energy/20seed_learning_control.pt')
Data = torch.load('./data/Energy/20seed_learning_control.pt')
data = Data['data']
fig2 = plt.subplot(154)
X = data[1,:]
X = X[:50000,:]
mean_data = torch.mean(X,1)
std_data = torch.std(X,1)
plt.fill_between(np.arange(len(X)) * dt,mean_data-std_data,mean_data+std_data,color='r',alpha=0.2)
plt.plot(np.arange(len(X)) * dt,mean_data,color='r',alpha=0.9,label='Linear control')
# plt.title('ME:{}'.format(38418))
plt.ylim([-100, 200])
plt.xlabel(r'Time', fontsize=fontsize)
plt.ylabel(r'$x$', fontsize=fontsize)
plt.xticks([0, 0.125, 0.25, 0.375, 0.5],
["$0$", "$~$","$0.25$","$~$", "$0.5$"]
)
plt.yticks([-100, 0, 100, 200])
plt.legend(fontsize=fontsize * 0.5)
plot_grid()
plt.tick_params(labelsize=fontsize)
fig3 = plt.subplot(155)
Y = data[0,:]
Y = Y[:14000,:]
mean_data = torch.mean(Y,1)
std_data = torch.std(Y,1)
plt.fill_between(np.arange(len(Y))*dt,mean_data-std_data,mean_data+std_data,color='g',alpha=0.2)
plt.plot(np.arange(len(Y))*dt,mean_data,color='g',alpha=0.9,label='Learned control')
# plt.ylim([-100, 200])
plt.xlabel(r'Time', fontsize=fontsize)
plt.xticks([0, 0.075/2, 0.075, (0.075 + 0.15)/2, 0.15],
["$0$", "$~$","$0.075$", "$~$", "$0.15$"]
)
plt.ylabel(r'$x$', fontsize=fontsize)
plt.yticks([-20, 0, 20, 40],
# ["0", "0.05","0.1", "0.15"]
)
plt.legend(fontsize=fontsize * 0.5)
plot_grid()
plt.tick_params(labelsize=fontsize)
# plt.title('ME:{}'.format(1375))
plt.show() | 3,641 | 34.359223 | 127 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/stuart/AS.py | import torch
import torch.nn.functional as F
import numpy as np
import timeit
class Net(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(Net, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(x))
out = self.layer2(h_1)
return out
'''
For learning
'''
n = 20
D_in = 2*n-1 # input dimension
H1 = 4*n # hidden dimension
D_out = 2*n-1 # output dimension
Data = torch.load('./data/stuart/20_train_data_small.pt')
# Data = torch.load('./data/stuart/20_train_data.pt')
x = Data['X']
f = Data['Y']
print(x[:,20:])
theta = 0.75
out_iters = 0
valid=True
while out_iters < 1 and valid == True:
# break
start = timeit.default_timer()
model = Net(D_in,H1, D_out)
i = 0
t = 0
max_iters = 1000
learning_rate = 0.01
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
L = torch.zeros(1000)
while i < max_iters:
out = model(x)
g = out*x
loss = (2-theta)*((x*g)**2)-x**2*(2*x*f+g**2)
Lyapunov_risk = (F.relu(-loss)).mean()
# Lyapunov_risk.requires_grad_(True)
print(i, "Lyapunov Risk=",Lyapunov_risk.item())
optimizer.zero_grad()
Lyapunov_risk.backward()
optimizer.step()
L[i] = Lyapunov_risk
i += 1
stop = timeit.default_timer()
print('\n')
print("Total time: ", stop - start)
print("Verified time: ", t)
out_iters+=1
torch.save({'loss':L},'./data/stuart/loss.pt')
# torch.save(model.state_dict(), './neural_sde/stuart/n_20/20_net_small.pkl')
| 1,843 | 22.341772 | 82 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/stuart/generate.py | import numpy as np
from scipy import integrate
import torch
import matplotlib.pyplot as plt
import math
import timeit
from scipy.integrate import odeint
import sys
sys.path.append('./neural_sde/stuart')
from AS import *
from functions import *
start = timeit.default_timer()
stuart_model = Net(D_in,H1,D_out)
# stuart_model.load_state_dict(torch.load('./neural_sde/stuart/n_20/20_net.pkl'))
stuart_model.load_state_dict(torch.load('./data/stuart/20_net_small.pkl'))
torch.manual_seed(6)
n = 20
L = torch.eye(n)-torch.ones([n,n])/n
N = 60000
dt = 0.0001
x0 = torch.cat([torch.Tensor(n).uniform_(0, 5),torch.Tensor(n-1).uniform_(-1.0,1.0)],0)
R = x0[:20]
dW = x0[20:]
def original_20():
# W = theta(dW)
# x0 = torch.cat([R-1,W],0)
X = torch.load('./data/stuart/20_original_data.pt')
X = X['X']
x0 = X[-1]
X = torch.zeros(N+1,2*n)
X[0,:] = x0
for i in range(N):
x = X[i,:]
dx = original_f_value(x,L)
new_x = x + dx*dt
X[i+1,:]=new_x
if i%100 == 0:
print(i)
torch.save({'X':X},'./data/stuart/20_original_data_add.pt')
def test():
torch.manual_seed(7)
X = torch.load('./data/stuart/20_test_data_try.pt')
X = X['X']
x0 = X[-1]
length = len(X)-1
# length = 0
# x0 = torch.cat([torch.Tensor(n).uniform_(0, 5),torch.Tensor(n-1).uniform_(-1.0,1.0)],0)
X = torch.zeros(N+1,2*n-1)
X[0,:] = x0
z = torch.randn(length+N,2*n-1)[length:,:]
for i in range(N):
x = X[i,:]
with torch.no_grad():
u = stuart_model(x)
dx = f_value(x,L)
new_x = x + dx*dt + x*u*z[i,:]*math.sqrt(dt)
X[i+1,:]=new_x
if i%100 == 0:
print(i)
torch.save({'X':X},'./data/stuart/20_test_data_try_add.pt')
if __name__ == '__main__':
original_20()
# test()
stop = timeit.default_timer()
print(stop-start)
| 1,915 | 24.210526 | 96 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/stuart/functions.py | import torch
import numpy as np
import timeit
import matplotlib.pyplot as plt
'''
x = rho_1,rho_2,rho_n, w1,w2,wn-1
'''
#Transform \Tilde{\theta} to \theta
def theta(W):
W = torch.cat([W,torch.tensor([1.0])],0)
T = torch.eye(len(W))
for i in range(len(T)):
for k in range(len(T)):
if k>i:
T[i,k]=1.0
W = W.unsqueeze(1)
ang = torch.mm(T,W)
return ang[:,0]
#Transform \theta to \Tilde{\theta}
def diff_theta(W):
T = torch.eye(len(W))
for i in range(len(W)):
for j in range(len(W)):
T[i,j] = W[j] - W[i]
return T
#Equation for \Tilde{\rho},\Tilde{\theta}
def f_value(x,L):
c1 = -1.8
c2 = 4
sigma = 0.01
k = int((len(x)+1)/2)
R = x[:k]+1.0
W = x[k:]
diff_ang = diff_theta(theta(W))
f_R = torch.zeros_like(R)
f_W = torch.zeros_like(W)
for j in range(len(R)):
f_R[j] = R[j]-R[j]**3-sigma*torch.sum(L[j,:]*R*(torch.cos(diff_ang[j,:])-c1*torch.sin(diff_ang[j,:])))
for j in range(len(W)):
f_W[j] = -c2*(R[j]**2-R[j+1]**2)-sigma*(torch.sum(L[j,:]*R*(c1*torch.cos(diff_ang[j,:])+torch.sin(diff_ang[j,:])))/R[j]\
-torch.sum(L[j+1,:]*R*(c1*torch.cos(diff_ang[j+1,:])+torch.sin(diff_ang[j+1,:])))/R[j+1])
return torch.cat([f_R,f_W],0)
#Equation for \rho, \theta
def original_f_value(x,L):
c1 = -1.8
c2 = 4
sigma = 0.01
k = int(len(x)/2)
R = x[:k]
W = x[k:]
diff_ang = diff_theta(W)
f_R = torch.zeros_like(R)
f_W = torch.zeros_like(W)
for j in range(len(R)):
f_R[j] = R[j]-R[j]**3-sigma*torch.sum(L[j,:]*R*(torch.cos(diff_ang[j,:])-c1*torch.sin(diff_ang[j,:])))
f_W[j] = -c2*(R[j]**2)-sigma*(torch.sum(L[j,:]*R*(c1*torch.cos(diff_ang[j,:])+torch.sin(diff_ang[j,:])))/R[j])
return torch.cat([f_R,f_W],0)
# Transform polar coordinate to euclidean coordinate
def transform(n,X):
Y = torch.zeros_like(X)
for i in range(n):
Y[:,i] = X[:,i]*torch.cos(X[:,i+n])
Y[:,i+n] = X[:,i]*torch.sin(X[:,i+n])
return Y
#Generate control data
def generate():
N = 5000
n = 20
torch.manual_seed(10)
# R = torch.Tensor(N, n).uniform_(0, 10)
# W = torch.Tensor(N, n-1).uniform_(-15, 15)
R = torch.Tensor(N, n).uniform_(0, 5)
W = torch.Tensor(N, n-1).uniform_(-10, 10)
X = torch.cat([R,W],1)
Y = torch.zeros_like(X)
L = torch.eye(n)-torch.ones([n,n])/n
for i in range(N):
x = X[i,:]
Y[i,:] = f_value(x,L)
if i%100:
print(i)
torch.save({'X':X,'Y':Y},'./neural_sde/stuart/n_20/20_train_data_small.pt')
# Joint trajcetories on two adjacent time intervals
def cat_data(path0='./neural_sde/stuart/n_20/20_original_data_cat.pt',path1='./neural_sde/stuart/n_20/20_original_data.pt',path2='./neural_sde/stuart/n_20/20_original_data_add.pt'):
X = torch.load(path1)
Y = torch.load(path2)
X = X['X'][0:80001:10]
Y = Y['X']
torch.save({'X':torch.cat([X,Y[1:,:]],0)},path0)
# Get the controlled trajectory for \rho,\theta
def diff_to_orig(n,path1='./neural_sde/stuart/n_20/20_original_data.pt',path2='./neural_sde/stuart/n_20/20_test_data.pt'):
X = torch.load(path1)
Y = torch.load(path2)
orig_data = X['X']
trans_data = Y['X']
Wn = orig_data[:,-1:]
R = trans_data[:,:n]
dW = trans_data[:,n:]
R = R+1
W = torch.cat([dW,Wn],1).T
T = torch.eye(len(W))
for i in range(len(T)):
for k in range(len(T)):
if k>i:
T[i,k]=1.0
orig_W = torch.mm(T,W)
return torch.cat([R,orig_W.T],1)
if __name__ == '__main__':
cat_data('./data/stuart/20_original_data_cat.pt','./data/stuart/20_original_data.pt','./data/stuart/20_original_data_add.pt')
cat_data('./data/stuart/20_test_data_cat.pt','./data/stuart/20_test_data_try.pt','./data/stuart/20_test_data_try_add.pt')
generate() | 3,921 | 30.376 | 181 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/stuart/plot.py | from functions import *
import numpy as np
import torch
import matplotlib.pyplot as plt
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
# import matplotlib
# matplotlib.rcParams['font.sans-serif'] = 'NSimSun,Times New Roman'
# matplotlib.rcParams['text.usetex'] = True
font_size = 35
def plot_grid():
plt.grid(b=True, which='major', color='gray', alpha=0.6, linestyle='dashdot', lw=1.5)
# minor grid lines
plt.minorticks_on()
plt.grid(b=True, which='minor', color='beige', alpha=0.8, ls='-', lw=1)
'''
Plot trajectories and orbits
'''
L = 20000
E = 50000
plt1 = plt.subplot(231)
X = torch.load('./data/stuart/20_original_data_cat.pt')
X = X['X'][L:E:10,:]
X = transform(20,X)
for i in range(20):
plt.plot(np.arange(len(X[:,0])),X[:,i],color = plt.cm.Accent(i/45))
plt.xticks([0,1000,2000,3000],[0,1.0,2.0,3.0],fontsize=font_size)
plt.yticks([-1,0,1],fontsize=font_size)
plot_grid()
plt.title(r'$x$',fontsize=font_size)
plt.ylabel('Without Control',fontsize=font_size)
plt2 = plt.subplot(232)
for i in range(20):
plt.plot(np.arange(len(X[:,0])),X[:,i+20],color = plt.cm.Accent(i/45))
plt.xticks([0,1000,2000,3000],[0,1.0,2.0,3.0],fontsize=font_size)
plt.title(r'$y$',fontsize=font_size)
plt.yticks([-1,0,1],fontsize=font_size)
plot_grid()
plt3 = plt.subplot(233)
for i in range(20):
plt.plot(X[:,i+0],X[:,i+20],color = plt.cm.Accent(i/45),label='{}'.format(i))
plt.xticks([-1,0,1],fontsize=font_size)
plt.yticks([-1,0,1],fontsize=font_size)
plt.xlabel(r"$x$",fontsize=font_size)
plt.ylabel(r'$y$',fontsize=font_size)
plot_grid()
plt.title('Orbit',fontsize=font_size)
plt4 = plt.subplot(234)
X = diff_to_orig(20,'./data/stuart/20_original_data_cat.pt','./neural_sde/stuart/n_20/20_test_data_cat.pt')[L:E:10,:]
X = transform(20,X)
for i in range(20):
plt.plot(np.arange(len(X[:,0])),X[:,i],color = plt.cm.Accent(i/45))
plot_grid()
plt.ylabel('With Control',fontsize=font_size)
plt.xticks([0,1000,2000,3000],[0,1.0,2.0,3.0],fontsize=font_size)
plt.yticks([-1,0,1],fontsize=font_size)
plt.xlabel('Time',fontsize=font_size)
plt5 = plt.subplot(235)
for i in range(20):
plt.plot(np.arange(len(X[:,0])),X[:,i+20],color = plt.cm.Accent(i/45))
plot_grid()
plt.xticks([0,1000,2000,3000],[0,1.0,2.0,3.0],fontsize=font_size)
plt.yticks([-1,0,1],fontsize=font_size)
plt.xlabel('Time',fontsize=font_size)
plt6 = plt.subplot(236)
for i in range(20):
plt.plot(X[:,i+0],X[:,i+20],color = plt.cm.Accent(i/45),label='{}'.format(i))
plt.xticks([-1,0,1],fontsize=font_size)
plt.yticks([-1,0,1],fontsize=font_size)
plt.xlabel(r"$x$",fontsize=font_size)
plt.ylabel(r'$y$',fontsize=font_size)
plot_grid()
plt.show()
'''
Plot loss function
'''
# loss = torch.load('./data/stuart/loss.pt')
# loss = loss['loss'].detach()
# loss = loss[:30]
# fig = plt.figure(figsize=(6,8))
# plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.5, hspace=0.5)
# plt1 = plt.subplot(121)
# # loss = loss.detach().numpy()
# plt.plot(np.arange(len(loss)),loss)
# plt2=plt.subplot(122)
# loss = loss[10:30]
# # loss = loss.detach().numpy()
# plt.plot(np.arange(len(loss)),loss)
# plt.plot()
# #% start: automatic generated code from pylustrator
# plt.figure(1).ax_dict = {ax.get_label(): ax for ax in plt.figure(1).axes}
# import matplotlib as mpl
# plt.figure(1).set_size_inches(14.120000/2.54, 9.110000/2.54, forward=True)
# plt.figure(1).axes[0].set_position([0.109847, 0.124637, 0.880047, 0.838141])
# plt.figure(1).axes[0].get_xaxis().get_label().set_text("iterations")
# plt.figure(1).axes[0].get_yaxis().get_label().set_text("loss")
# plt.figure(1).axes[1].set_xlim(-0.9500000000000001, 20.0)
# plt.figure(1).axes[1].set_ylim(-0.09267258382915317, 1.9471967105529984)
# plt.figure(1).axes[1].set_xticks([0.0, 10.0, 20.0])
# plt.figure(1).axes[1].set_yticks([0.0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75])
# plt.figure(1).axes[1].set_xticklabels(["10", "20", "30"], fontsize=10.0, fontweight="normal", color="black", fontstyle="normal", fontname="DejaVu Sans", horizontalalignment="center")
# plt.figure(1).axes[1].set_yticklabels(["0.00", "0.25", "0.50", "0.75", "1.00", "1.25", "1.50", "1.75"], fontsize=10)
# plt.figure(1).axes[1].set_position([0.610715, 0.504267, 0.336851, 0.396884])
# #% end: automatic generated code from pylustrator
# plt.show() | 4,294 | 34.204918 | 184 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/inverted_pendulum/invert_pendulum_control_1227.py | import numpy as np
import math
import torch
import matplotlib.pyplot as plt
from matplotlib import cm
import matplotlib.gridspec as gridspec
from functions import *
from base_function import colors
alpha = 1.0
fontsize=35
fontsize_legend = 20
MarkerSize = 60
linewidth = 5
color_w = 0.15 #0.5
framealpha = 0.7
N_seg = 100
def plt_tick_1():
# plt.ylim([-2.5, 2.5])
# plt.xlim([-2.5, 2.5])
# plt.xticks([-5, -2.5, 0, 2.5, 5], ['$-5$', '', '$0$', '', '$5$'])
# plt.yticks([-5, -2.5, 0, 2.5, 5], ['$-5$', '', '$0$', '', '$5$'])
plt.xticks([-10, -5, 0, 5, 10], ['$-10$', '', '$0$', '', '$10$'])
plt.yticks([-10, -5, 0, 5, 10], ['$-10$', '', '$0$', '', '$10$'])
def plt_tick_2():
# plt.ylim([-2.5, 2.5])
plt.xticks([0, 0.075, 0.15, 0.225, 0.3], ['$0$', '', '$0.15$', '', '$0.3$'])
plt.yticks([-10, -5, 0, 5, 10], ['$-10$', '', '$0$', '', '$10$'])
def plot_jianbian_line(
X, Y, start_color=np.array([1.0, 0.0, 0.0]),
end_color=np.array([0.0, 1.0, 0.0]),
scale = 1/3,
width_rate = 9/10,
):
# start_color = 1 - start_color
start_color= end_color
data_len = len(X)
# plt.plot(data[0,:1000], data[1, :1000], '-', alpha=alpha)
n = N_seg
seg_len = data_len // n
print('data_len:{}, n:{}, seg_len:{}'.format(data_len, n, seg_len))
for i in range(n - 1):
w = ((i) / n) ** (scale)
now_color = start_color + w * (end_color - start_color)
# print('i:{}, now_color:{}'.format(i, now_color))
# plt.plot(data[0,i:i+3], data[1,i:i+3], '-', color=now_color, alpha=alpha)
plt.plot(X[max(seg_len * i - 1, 0):seg_len * (i+1)], Y[max(seg_len * i - 1, 0):seg_len * (i+1)],
'-', color=now_color, alpha=alpha, linewidth= linewidth - w * linewidth * width_rate )
#五次倒立摆实验,angle和velocity分别保存为X1,X2
data = torch.load('./control_data.pt')
X1 = data['X1'].clone().detach() #data size=[5,10000]
X2 = data['X2'].clone().detach() #data size=[5,10000]
# fig = plt.figure()
# plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.5, hspace=0.5)
# ax1 = plt.subplot(121)
show_indx = [0, 2, 4]
def plot_fig1(ax1):
xd = np.linspace(-10, 10, 20)
yd = np.linspace(-10, 10, 20)
Xd, Yd = np.meshgrid(xd,yd)
Plotflow(Xd, Yd) #绘制向量场
# #添加水平直线
# C1 = plt.scatter(0,0,marker='o',color='g')
# C2 = plt.scatter(math.pi,0,marker='o',color='r')
# C3 = plt.scatter(-math.pi,0,marker='o',color='b')
# ax1.add_artist(C1)
# ax1.add_artist(C2)
# ax1.add_artist(C3)
color_id = 0
# for i in range(2):
for i in show_indx:
# plt.plot(X1[i,0],X2[i,0],marker='*',color=cm.Accent(i*2))
# plt.plot(X1[i,:2000],X2[i,:2000],color=cm.Accent(i*2),alpha=0.95) #选择合适的长度
plot_jianbian_line(X=X1[i,:2000], Y=X2[i,:2000], start_color=colors[color_id] * color_w, end_color=colors[color_id],
scale=1/3, width_rate=0.5)
# plt.plot(state[0,0],state[1,0],marker='*', color=cm.Accent(i*2))
color_id += 1
color_id = 0
for i in show_indx:
# plt.scatter(X1[i,0],X2[i,0], marker='*', s=MarkerSize * 5, color='k', zorder=10)
# plt.scatter(X1[i,0],X2[i,0], marker='*', s=MarkerSize * 5, color=colors[color_id] * color_w, zorder=10)
plt.scatter(X1[i,0],X2[i,0], marker='*', s=MarkerSize * 5, color=colors[color_id]/max(colors[color_id]) * 0.7, zorder=10)
color_id += 1
#添加水平轴
C1 = plt.scatter(0, 0,marker='o',color='g', s=MarkerSize, zorder=10)
C2 = plt.scatter(math.pi,0,marker='o',color='r', s=MarkerSize, zorder=10)
C3 = plt.scatter(-math.pi,0,marker='o',color='b', s=MarkerSize, zorder=10)
ax1.add_artist(C1)
ax1.add_artist(C2)
ax1.add_artist(C3)
plt.xlim(-6,6)
plt.ylim(-6,6)
# plt.title('Orbits under Stochastic Control')
plt.legend([C1,C2,C3],[r'$(0,~0)$',r'$(\pi,~0)$',r'$(-\pi,~0)$'],loc='upper right',
borderpad=0.05, labelspacing=0.05,fontsize=fontsize_legend, framealpha=framealpha)
plt.xlabel(r'$\theta$',fontsize=fontsize)
plt.ylabel(r'$\dot{\theta}$',fontsize=fontsize)
plt_tick_1()
plt.tick_params(labelsize=fontsize)
N_data = 3000
def control_trajectory_(ax,title,path='./control_data.pt'):
data = torch.load(path)
# X = data['X'].clone().detach()
X1 = data['X1'].clone().detach()
print('X1 shape:{}'.format(X1.shape))
# X2 = data['X2']
L1 = plt.axhline(y=0.0,ls="--",linewidth=1.5,color="green")#添加水平直线
L2 = plt.axhline(y=math.pi,ls="--",linewidth=1.5,color="r")
L3 = plt.axhline(y=-math.pi,ls="--",linewidth=1.5,color="b")
ax.add_artist(L1)
ax.add_artist(L2)
ax.add_artist(L3)
color_id = 0
# for i in range(len(X1)):
for i in show_indx:
# x = X[i,:].numpy()
# m = np.max(x)
# index = np.argwhere(x == m )
# sample_length = int(index[0])
L = np.arange(len(X1[0,:N_data])) * 0.0001
# plt.plot(L[0],X1[i,0],marker='*',markersize=8,color=cm.Accent(i*2))
plot_jianbian_line(X=L, Y=X1[i, :N_data],
start_color=colors[color_id] * color_w, end_color=colors[color_id],
scale = 1/2,
width_rate = 5/10,
)
# plt.plot(L,X1[i,:3000],linestyle='--',color=cm.Accent(i*2),alpha=0.45)
color_id += 1
color_id = 0
for i in show_indx:
# plt.scatter(L[0],X1[i,0],marker='*', s=MarkerSize * 5, color=colors[color_id] * color_w, zorder=10)
plt.scatter(L[0],X1[i,0],marker='*', s=MarkerSize * 5, color=colors[color_id]/max(colors[color_id]) * 0.7, zorder=10)
color_id += 1
plt.legend([L1,L2,L3],[r'$\theta=0$',r'$\theta=\pi$',r'$\theta=-\pi$'],loc='upper right',
borderpad=0.05, labelspacing=0.05, fontsize=fontsize_legend, framealpha=framealpha)
# plt.title(title)
plt.xlabel('Time',fontsize=fontsize)
plt.ylabel(r'$\theta$',fontsize=fontsize)
# ax2 = plt.subplot(122)
def plot_fig2(ax2):
# control_trajectory(ax2,'Phase Trajectories along Time','./control_data.pt')
control_trajectory_(ax2,'Phase Trajectories along Time','./control_data.pt')
plt_tick_2()
plt.tick_params(labelsize=fontsize)
if __name__ == '__main__':
ax1 = plt.subplot(121)
plot_fig1(ax1=ax1)
ax2 = plt.subplot(122)
plot_fig2(ax2=ax2)
plt.show() | 6,416 | 33.315508 | 129 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/inverted_pendulum/algo2.py | import torch
import torch.nn.functional as F
import numpy as np
import timeit
import math
class Net(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(Net, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(x))
out = self.layer2(h_1)
return out
def inverted_pendulum(x):
y = []
G = 9.81 # gravity
L = 0.5 # length of the pole
m = 0.15 # ball mass
b = 0.1 # friction
for i in range(0,len(x)):
f = [x[i,1],G*torch.sin(x[i,0])/L +(-b*x[i,1])/(m*L**2)]
y.append(f)
y = torch.tensor(y)
return y
'''
For learning
'''
N = 1000 # sample size
D_in = 2 # input dimension
H1 = 6 # hidden dimension
D_out = 2 # output dimension
torch.manual_seed(10)
x = torch.Tensor(N, D_in).uniform_(-10, 10)
theta = 0.5
out_iters = 0
valid = False
while out_iters < 1 and not valid:
start = timeit.default_timer()
model = Net(D_in,H1, D_out)
i = 0
t = 0
max_iters = 2000
learning_rate = 0.05
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
L = []
while i < max_iters and not valid:
out = model(x)
g = out*x
f = inverted_pendulum(x)
loss = (2-theta)*torch.diagonal(torch.mm(x,g.T))**2-torch.diagonal(torch.mm(x,x.T))*torch.diagonal(2*torch.mm(x,f.T)+torch.mm(g,g.T))
# loss = (2-theta)*((x*g)**2)-x**2*(2*x*f+g**2)
Lyapunov_risk = (F.relu(-loss)).mean()
L.append(Lyapunov_risk)
print(i, "Lyapunov Risk=",Lyapunov_risk.item())
optimizer.zero_grad()
Lyapunov_risk.backward()
optimizer.step()
# if Lyapunov_risk == 0.0:
# break
i += 1
stop = timeit.default_timer()
print('\n')
print("Total time: ", stop - start)
print("Verified time: ", t)
out_iters+=1
torch.save(torch.tensor(L), './data/inverted_pendulum/loss_AS.pt')
torch.save(model.state_dict(), './data/inverted_pendulum/algo2_invert_net.pkl') | 2,276 | 24.021978 | 141 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/inverted_pendulum/functions.py | import numpy as np
import math
import torch
import timeit
import matplotlib.pyplot as plt
from matplotlib import cm
import matplotlib.gridspec as gridspec
from scipy.integrate import odeint
import numpy as np
np.random.seed(10)
class Net(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(Net, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(x))
out = self.layer2(h_1)
return out
D_in = 2 # input dimension
H1 = 6 # hidden dimension
D_out = 2
inverted_model = Net(D_in,H1,D_out)
inverted_model.load_state_dict(torch.load('./data/inverted_pendulum/algo2_invert_net.pkl'))
# ang = torch.zeros([5,1]) #initial angle
# vel = torch.zeros([5,1]) #initial velocity
# for i in range(5):
# x0 = np.random.uniform(-6,6,2)
# ang[i,0] = x0[0]
# vel[i,0] = x0[1]
def invert_pendulum(state0, t):
state0 = state0.flatten()
G = 9.81 # gravity
L = 0.5 # length of the pole
m = 0.15 # ball mass
b = 0.1 # friction
def f(state,t):
x, y = state # unpack the state vector
return y, G*np.sin(x)/L +(-b*y)/(m*L**2) # derivatives
states = odeint(f, state0, t)
return states.transpose()
#生成控制轨道数据
set_state0 = torch.tensor([[-5.0,5.0],[-3.0,4.0],[-1.0,3.0],[1.0,-3.0],[3.0,-4.0],[5.0,-5.0]])
def control_data(set_state0,M=6,N=20000,dt=0.00001):
start = timeit.default_timer()
torch.manual_seed(6)
X1,X2 = torch.zeros([M,N]),torch.zeros([M,N])
for r in range(M):
G = 9.81 # gravity
L = 0.5 # length of the pole
m = 0.15 # ball mass
b = 0.1
z1 = torch.randn(N)
z2 = torch.randn(N)
# X1[r,0] = ang[r,0]
# X2[r,0] = vel[r,0]
X1[r,0] = set_state0[r,0]
X2[r,0] = set_state0[r,1]
for i in range(N-1):
x1 = X1[r,i]
x2 = X2[r,i]
u = inverted_model(torch.tensor([x1,x2]))
new_x1 = x1 + x2*dt + x1*u[0]*z1[i]*math.sqrt(dt)
new_x2 = x2 + (G*math.sin(x1)/L - b*x2/(m*L**2))*dt + x2*u[1]*z2[i]*math.sqrt(dt)
X1[r,i+1] = new_x1
X2[r,i+1] = new_x2
print('{} done'.format(r))
orig_data = {'X1':X1,'X2':X2}
torch.save(orig_data,'./data/inverted_pendulum/control_data.pt')
stop = timeit.default_timer()
print(stop-start)
def control_trajectory(ax,title,path='./data/inverted_pendulum/control_data.pt'):
data = torch.load(path)
# X = data['X'].clone().detach()
X1 = data['X1'].clone().detach()
# X2 = data['X2']
for i in range(len(X1)):
# x = X[i,:].numpy()
# m = np.max(x)
# index = np.argwhere(x == m )
# sample_length = int(index[0])
L = np.arange(len(X1[0,:3000]))
plt.plot(L[0],X1[i,0],marker='*',markersize=8,color=cm.Accent(i*2))
plt.plot(L,X1[i,:3000],linestyle='--',color=cm.Accent(i*2),alpha=0.45)
L1 = plt.axhline(y=0.0,ls="--",linewidth=1.5,color="green")#添加水平直线
L2 = plt.axhline(y=math.pi,ls="--",linewidth=1.5,color="r")
L3 = plt.axhline(y=-math.pi,ls="--",linewidth=1.5,color="b")
ax.add_artist(L1)
ax.add_artist(L2)
ax.add_artist(L3)
plt.legend([L1,L2,L3],[r'$\theta=0$',r'$\theta=\pi$',r'$\theta=-\pi$'],loc='upper right',borderpad=0.05, labelspacing=0.05)
plt.title(title)
plt.xlabel('t')
plt.ylabel(r'$\theta$')
def f(y) :
#parameters
G = 9.81
L = 0.5
m = 0.15
b = 0.1
x1,x2 = y
dydt =[x2, (m*G*L*np.sin(x1) - b*x2) / (m*L**2)]
return dydt
#绘制向量场
def Plotflow(Xd, Yd):
# Plot phase plane
DX, DY = f([Xd, Yd])
DX=DX/np.linalg.norm(DX, ord=2, axis=1, keepdims=True)
DY=DY/np.linalg.norm(DY, ord=2, axis=1, keepdims=True)
plt.streamplot(Xd,Yd,DX,DY, color=('gray'), linewidth=0.5,
density=0.6, arrowstyle='-|>', arrowsize=1.5)
'''
generate control data
'''
if __name__ == '__main__':
control_data(set_state0,6,20000,0.0001) | 4,192 | 29.830882 | 127 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/hyper_a/plot_trajectory.py | from statistics import mean
import sys
sys.path.append('./neural_sde')
import numpy as np
import math
import matplotlib.pyplot as plt
import torch
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
import timeit
# import pylustrator
# pylustrator.start()
start = timeit.default_timer()
A = torch.load('./neural_sde/hyper_a/data.pt')
A = A[:,-8:-1,:,:]
print(A.shape)
def plot_trajec(L,a):
mean_data = torch.mean(L,0).detach().numpy()
std_data =torch.std(L,0).detach().numpy()
plt.fill_between(np.arange(len(mean_data)),mean_data-std_data,mean_data+std_data,color='r',alpha=0.2)
plt.plot(np.arange(len(mean_data)),mean_data,color='r',alpha=0.9,label=r'$b={}$'.format(a))
plt.ylim(-1,6)
# plt.xlabel('Time')
plt.yticks([])
plt.xticks([0.0, 6000], ["$0$", "$0.6$"])
| 816 | 26.233333 | 105 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/hyper_a/plot_loss.py | import numpy as np
import matplotlib.pyplot as plt
import torch
import pylustrator
pylustrator.start()
import seaborn as sns
sns.set_theme(style="white")
def plot_a(a):
L = np.load('./neural_sde/hyper_a/a_{}.npy'.format(a))
r_L = np.zeros(1000-len(L))
L = np.concatenate((L,r_L),axis=0)
# np.concatenate((a,b),axis=0)
plt.plot(np.arange(len(L)),L,'b')
# plt.xlabel('Iterations')
plt.ylim(-0.01,1)
plt.yticks([])
plt.title(r'$\alpha={}$'.format(a))
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.5, hspace=0.5)
plt.subplot(171)
plot_a(0.65)
plt.ylabel('Loss')
plt.yticks([0,0.25,0.5,0.75,1.0])
plt.subplot(172)
plot_a(0.7)
plt.subplot(173)
plot_a(0.75)
plt.subplot(174)
plot_a(0.8)
plt.subplot(175)
plot_a(0.85)
plt.subplot(176)
plot_a(0.9)
plt.subplot(177)
plot_a(0.95)
#% start: automatic generated code from pylustrator
plt.figure(1).ax_dict = {ax.get_label(): ax for ax in plt.figure(1).axes}
import matplotlib as mpl
plt.figure(1).set_size_inches(14.460000/2.54, 4.880000/2.54, forward=True)
plt.figure(1).axes[0].set_position([0.118581, 0.256900, 0.084156, 0.543710])
plt.figure(1).axes[1].set_position([0.244815, 0.256900, 0.084156, 0.543710])
plt.figure(1).axes[1].title.set_position([0.500000, 1.000000])
plt.figure(1).axes[2].set_position([0.371050, 0.256900, 0.084156, 0.543710])
plt.figure(1).axes[3].set_position([0.497285, 0.256900, 0.084156, 0.543710])
plt.figure(1).axes[4].set_position([0.623519, 0.256900, 0.084156, 0.543710])
plt.figure(1).axes[5].set_position([0.749754, 0.256900, 0.084156, 0.543710])
plt.figure(1).axes[6].set_position([0.875988, 0.256900, 0.084156, 0.543710])
plt.figure(1).text(0.5, 0.5, 'New Text', transform=plt.figure(1).transFigure) # id=plt.figure(1).texts[0].new
plt.figure(1).texts[0].set_position([0.474888, 0.048140])
plt.figure(1).texts[0].set_text("Iterations")
#% end: automatic generated code from pylustrator
plt.show() | 1,949 | 30.967213 | 110 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/hyper_a/AS.py | import torch
import torch.nn.functional as F
import numpy as np
import timeit
class Net(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(Net, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_hidden)
self.layer3 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(x))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
return out
def inverted_pendulum(x):
y = []
G = 9.81 # gravity
L = 0.5 # length of the pole
m = 0.15 # ball mass
b = 0.1 # friction
for i in range(0,len(x)):
f = [x[i,1],G*torch.sin(x[i,0])/L +(-b*x[i,1])/(m*L**2)]
y.append(f)
y = torch.tensor(y)
return y
'''
For learning
'''
N = 500 # sample size
D_in = 2 # input dimension
H1 = 6 # hidden dimension
D_out = 2 # output dimension
torch.manual_seed(2)
x = torch.Tensor(N, D_in).uniform_(-10, 10)
for r in range(19):
theta = float(format(r*0.05+0.05,'.2f'))
start = timeit.default_timer()
model = Net(D_in,H1, D_out)
i = 0
max_iters = 1000
learning_rate = 0.01
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
L = []
while i < max_iters:
out = model(x)
g = out*x
f = inverted_pendulum(x)
loss = (2-theta)*torch.diagonal(torch.mm(x,g.T))**2-torch.diagonal(torch.mm(x,x.T))*torch.diagonal(2*torch.mm(x,f.T)+torch.mm(g,g.T))
# loss = (2-theta)*((x*g)**2)-x**2*(2*x*f+g**2)
Lyapunov_risk = (F.relu(-loss)).mean()
L.append(Lyapunov_risk.item())
print(i, "Lyapunov Risk=",Lyapunov_risk.item())
optimizer.zero_grad()
Lyapunov_risk.backward()
optimizer.step()
if Lyapunov_risk == 0.0:
break
i += 1
stop = timeit.default_timer()
print('\n')
print("Total time: ", stop - start)
np.save('./hyper_a/a_{}.npy'.format(theta), L)
torch.save(model.state_dict(),'./hyper_a/a_{}.pkl'.format(theta))
| 2,236 | 25.011628 | 141 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/hyper_a/test.py | import sys
sys.path.append('./neural_sde')
import numpy as np
import math
import matplotlib.pyplot as plt
import torch
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
import timeit
A = torch.ones(2,100)
# B = torch.diagonal(A)
print(A[:,0:100:10].shape) | 273 | 20.076923 | 39 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/hyper_a/generate.py | import numpy as np
import math
import torch
import timeit
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(10)
class Net(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(Net, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_hidden)
self.layer3 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(x))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
return out
D_in = 2
H1 = 6
D_out = 2
model = Net(D_in,H1,D_out)
set_state0 = torch.tensor([[3.0,5.0]]) # initial
def control_data(model,random_seed,set_state0,M=6,N=20000,dt=0.00001):
start = timeit.default_timer()
torch.manual_seed(random_seed)
X1,X2 = torch.zeros([M,N]),torch.zeros([M,N])
for r in range(M):
G = 9.81 # gravity
L = 0.5 # length of the pole
m = 0.15 # ball mass
b = 0.1
z = torch.randn(N)
X1[r,0] = set_state0[r,0]
X2[r,0] = set_state0[r,1]
for i in range(N-1):
x1 = X1[r,i]
x2 = X2[r,i]
with torch.no_grad():
u = model(torch.tensor([x1,x2]))
new_x1 = x1 + x2*dt + x1*u[0]*z[i]*math.sqrt(dt)
new_x2 = x2 + (G*math.sin(x1)/L - b*x2/(m*L**2))*dt + x2*u[1]*z[i]*math.sqrt(dt)
X1[r,i+1] = new_x1
X2[r,i+1] = new_x2
print('{} done'.format(r))
X1=X1[:,0:N:10]
X2=X2[:,0:N:10]
# data = {'X1':X1,'X2':X2}
# torch.save(data,'./neural_sde/hyper_b/b_{}.pt'.format(b))
stop = timeit.default_timer()
print(stop-start)
return X1,X2
'''
Generate trajectories under control
'''
if __name__ == '__main__':
M = 5
N = 60000
data = torch.zeros([2,10,M,N])
for r in range(10):
b = 2.0 + r*0.1
model.load_state_dict(torch.load('./neural_sde/hyper_b/b_{}.pkl'.format(b)))
# X1,X2=torch.zeros([M,N]),torch.zeros([M,N])
for i in range(M):
x1,x2 = control_data(model,i*6,set_state0,1,N,0.0001)
# X1[i,:] = x1[0,:]
# X2[i,:] = x2[0,:]
data[0,r,i,:] = x1[0,:]
data[1,r,i,:] = x2[0,:]
print('({},{})'.format(r,i))
torch.save(data,'data.pt')
'''
Do some test
'''
# model.load_state_dict(torch.load('./neural_sde/hyper_a/a_{}.pkl'.format(0.45)))
# X1,X2 = control_data(model,6*9+1,set_state0,1,60000,0.00001)
# X1 = X1.detach().numpy()[0,:]
# print(X1.shape)
# plt.plot(np.arange(len(X1)),X1)
# plt.show() | 2,698 | 28.021505 | 92 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/hyper_a/u_plot.py | import matplotlib.pyplot as plt
import torch
import numpy as np
from matplotlib import cm
import matplotlib as mpl
class ControlNet(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(ControlNet, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_hidden)
self.layer3 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(x))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
return out*x
D_in = 2
H1 = 6
D_out = 2
model = ControlNet(D_in,H1,D_out)
vnorm = mpl.colors.Normalize(vmin=-80, vmax=80)
def draw_image2(f):
with torch.no_grad():
x = torch.linspace(-6, 6, 200)
y = torch.linspace(-6, 6, 200)
X, Y = torch.meshgrid(x, y)
inp = torch.stack([X, Y], dim=2)
image = f(inp)
image = image[..., 0].detach().cpu()
plt.imshow(image, extent=[-6, 6, -6, 6], cmap='rainbow',norm=vnorm)
# plt.xlabel(r'$\theta$')
plt.xticks([-6,0,6])
plt.yticks([])
return image
def draw(a):
model.load_state_dict(torch.load('./neural_sde/hyper_a/a_{}.pkl'.format(a)))
draw_image2(model)
| 1,330 | 26.729167 | 80 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/hyper_a/functions.py | from os import stat
import numpy as np
import math
import torch
import timeit
import random
import matplotlib.pyplot as plt
from matplotlib import cm
from scipy.integrate import odeint
import numpy as np
np.random.seed(10)
class ControlNet(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(ControlNet, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_hidden)
self.layer3 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(x))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
return out
D_in = 2 # input dimension
H1 = 6 # hidden dimension
D_out = 2
inverted_model = ControlNet(D_in,H1,D_out)
inverted_model.load_state_dict(torch.load('./neural_sde/hyper_b/b_2.2.pkl'))
# ang = torch.zeros([5,1]) #initial angle
# vel = torch.zeros([5,1]) #initial velocity
# for i in range(5):
# x0 = np.random.uniform(-6,6,2)
# ang[i,0] = x0[0]
# vel[i,0] = x0[1]
def invert_pendulum(state0, t):
state0 = state0.flatten()
G = 9.81 # gravity
L = 0.5 # length of the pole
m = 0.15 # ball mass
b = 0.1 # friction
def f(state,t):
x, y = state # unpack the state vector
return y, G*np.sin(x)/L +(-b*y)/(m*L**2) # derivatives
states = odeint(f, state0, t)
return states.transpose()
#生成控制轨道数据
set_state0 = torch.tensor([[-5.0,5.0],[-3.0,4.0],[-1.0,3.0],[1.0,-3.0],[3.0,-4.0],[5.0,-5.0]])
def control_data(set_state0,M=6,N=20000,dt=0.00001):
start = timeit.default_timer()
torch.manual_seed(6)
X1,X2 = torch.zeros([M,N]),torch.zeros([M,N])
for r in range(M):
G = 9.81 # gravity
L = 0.5 # length of the pole
m = 0.15 # ball mass
b = 0.1
z1 = torch.randn(N)
z2 = torch.randn(N)
# X1[r,0] = ang[r,0]
# X2[r,0] = vel[r,0]
X1[r,0] = set_state0[r,0]
X2[r,0] = set_state0[r,1]
for i in range(N-1):
x1 = X1[r,i]
x2 = X2[r,i]
u = inverted_model(torch.tensor([x1,x2]))
new_x1 = x1 + x2*dt + x1*u[0]*z1[i]*math.sqrt(dt)
new_x2 = x2 + (G*math.sin(x1)/L - b*x2/(m*L**2))*dt + x2*u[1]*z2[i]*math.sqrt(dt)
X1[r,i+1] = new_x1
X2[r,i+1] = new_x2
print('{} done'.format(r))
orig_data = {'X1':X1,'X2':X2}
torch.save(orig_data,'./neural_sde/inverted_ROA/control_data.pt')
stop = timeit.default_timer()
print(stop-start)
def control_trajectory(ax,title,path='./neural_sde/inverted_ROA/control_data.pt'):
data = torch.load(path)
# X = data['X'].clone().detach()
X1 = data['X1'].clone().detach()
# X2 = data['X2']
for i in range(len(X1)):
# x = X[i,:].numpy()
# m = np.max(x)
# index = np.argwhere(x == m )
# sample_length = int(index[0])
L = np.arange(len(X1[0,:3000]))
plt.plot(L[0],X1[i,0],marker='*',markersize=8,color=cm.Accent(i*2))
plt.plot(L,X1[i,:3000],linestyle='--',color=cm.Accent(i*2),alpha=0.45)
L1 = plt.axhline(y=0.0,ls="--",linewidth=1.5,color="green")#添加水平直线
L2 = plt.axhline(y=math.pi,ls="--",linewidth=1.5,color="r")
L3 = plt.axhline(y=-math.pi,ls="--",linewidth=1.5,color="b")
ax.add_artist(L1)
ax.add_artist(L2)
ax.add_artist(L3)
plt.legend([L1,L2,L3],[r'$\theta=0$',r'$\theta=\pi$',r'$\theta=-\pi$'],loc='upper right',borderpad=0.05, labelspacing=0.05)
plt.title(title)
plt.xlabel('t')
plt.ylabel(r'$\theta$')
def f(y) :
#parameters
G = 9.81
L = 0.5
m = 0.15
b = 0.1
x1,x2 = y
dydt =[x2, (m*G*L*np.sin(x1) - b*x2) / (m*L**2)]
return dydt
#绘制向量场
def Plotflow(Xd, Yd):
# Plot phase plane
DX, DY = f([Xd, Yd])
DX=DX/np.linalg.norm(DX, ord=2, axis=1, keepdims=True)
DY=DY/np.linalg.norm(DY, ord=2, axis=1, keepdims=True)
plt.streamplot(Xd,Yd,DX,DY, color=('gray'), linewidth=0.5,
density=0.6, arrowstyle='-|>', arrowsize=1.5)
if __name__ == '__main__':
control_data(set_state0,6,20000,0.0001) | 4,265 | 30.6 | 127 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/hyper_a/calculate.py | import matplotlib.pyplot as plt
import torch
import numpy as np
def plot_grid():
plt.grid(b=True, which='major', color='gray', alpha=0.5, linestyle='dashdot', lw=1.5)
# minor grid lines
plt.minorticks_on()
plt.grid(b=True, which='minor', color='beige', alpha=0.5, ls='-', lw=1)
'''
Calculate and plot the mean end position of trajectories under learning control with each $\alpha$
'''
A = torch.load('./data/hyper_a/data.pt')
A = A[:,:-1,:,:]
print(A.shape)
end = torch.zeros([19])
for r in range(19):
end[r] = torch.mean(A[0,r,:,-1])
print(end.shape)
end = end.detach().numpy()
plt.scatter(np.arange(len(end)),end, s=45, c=end, marker='.',alpha=0.99,cmap='rainbow')
plot_grid()
# plt.axvline(7.5,ls="--",linewidth=2.5,color="#dc8ff6",alpha=0.3)
plt.axvline(11.5,ls="--",linewidth=2.5,color="#dc8ff6",alpha=0.3)
plt.axhline(0.0,ls="--",linewidth=2.5,color="#dc8ff6",alpha=0.3)
plt.yticks([0,0.03,0.06])
plt.ylabel(r'$\theta$')
plt.xlabel(r'$\alpha$')
plt.colorbar()
#% start: automatic generated code from pylustrator
plt.figure(1).ax_dict = {ax.get_label(): ax for ax in plt.figure(1).axes}
import matplotlib as mpl
plt.figure(1).set_size_inches(12.040000/2.54, 5.670000/2.54, forward=True)
plt.figure(1).ax_dict["<colorbar>"].set_position([0.895507, 0.226426, 0.016383, 0.696457])
plt.figure(1).axes[0].set_xlim(-1.0, 18.9)
plt.figure(1).axes[0].set_xticks([-1.0, 3.0, 7.0, 11.0, 15.0, 19.0])
plt.figure(1).axes[0].set_xticklabels(["0", "0.2", "0.4", "0.6", "0.8", "1.0"], fontsize=10.0, fontweight="normal", color="black", fontstyle="normal", fontname="DejaVu Sans", horizontalalignment="center")
plt.figure(1).axes[0].set_position([0.139423, 0.226426, 0.739233, 0.696457])
plt.figure(1).axes[0].get_xaxis().get_label().set_fontsize(12)
plt.figure(1).axes[0].get_yaxis().get_label().set_fontsize(12)
#% end: automatic generated code from pylustrator
plt.show()
| 1,901 | 40.347826 | 207 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/hyper_a/plot.py | import numpy as np
import matplotlib.pyplot as plt
from u_plot import *
from plot_trajectory import *
# import matplotlib
# matplotlib.rcParams['font.sans-serif'] = 'NSimSun,Times New Roman'
# matplotlib.rcParams['text.usetex'] = True
font_size = 15
'''
Pick trajectories data for corresponding $\alpha$
'''
A = torch.load('./data/hyper_a/data.pt')
A = A[:,-8:-1,:,:]
print(A.shape)
def plot_grid():
plt.grid(b=True, which='major', color='gray', alpha=0.6, linestyle='dashdot', lw=1.5)
# minor grid lines
plt.minorticks_on()
plt.grid(b=True, which='minor', color='beige', alpha=0.8, ls='-', lw=1)
def plot_a(a):
L = np.load('./data/hyper_a/a_{}.npy'.format(a))
r_L = np.zeros(1000-len(L))
L = np.concatenate((L,r_L),axis=0)
# np.concatenate((a,b),axis=0)
plt.plot(np.arange(len(L)),L,'b')
# plt.xlabel('Iterations')
plt.ylim(-0.01,1)
plt.yticks([])
plt.title(r'$\alpha={}$'.format(a))
for i in range(7):
# plt.axes([0.1+0.17*i, 0.7, 0.1, 0.1])
plt.subplot(4, 7, i+1)
plot_a(float(format(0.65+i*0.05,'.2f')))
plot_grid()
if i == 0:
plt.yticks([0,10,20])
plt.ylabel('Loss',fontsize=font_size)
plt.text(-5,5,'Training',rotation=90,fontsize=font_size)
else:
plt.yticks([0, 10, 20], ['', '', ''])
if i == 3:
plt.xlabel('Iterations',fontsize=font_size)
for i in range(7):
plt.subplot(4, 7, 7 + i+1)
plot_trajec(A[0,i,:,0:60000:10],float(format(0.65+i*0.05,'.2f')))
plot_grid()
if i == 0:
plt.yticks([-10,-5,0,5,10])
plt.ylabel(r'$\theta$',fontsize=font_size)
plt.text(-1,-5,'Trajectory',rotation=90,fontsize=font_size)
else:
plt.yticks([-10,-5, 0,5, 10], ['', '', '','',''])
if i == 3:
plt.xlabel('Time',fontsize=font_size)
for i in range(7):
plt.subplot(4, 7, 14 + i+1)
plot_trajec(A[1,i,:,0:60000:10],float(format(0.65+i*0.05,'.2f')))
plot_grid()
if i == 0:
plt.yticks([-10,-5,0,5,10])
plt.ylabel(r'$\dot{\theta}$',fontsize=font_size)
plt.text(-1,-5,'Trajectory',rotation=90,fontsize=font_size)
else:
plt.yticks([-10,-5, 0,5, 10], ['', '', '','',''])
if i == 3:
plt.xlabel('Time',fontsize=font_size)
for i in range(7):
# plt.axes([0.1+0.17*i, 0.1, 0.1, 0.1])
plt.subplot(4, 7, 21 + i+1)
draw(float(format(0.65+i*0.05,'.2f')))
if i == 0:
plt.yticks([-5,0,5])
plt.ylabel(r'$\dot{\theta}$',fontsize=font_size)
plt.text(-15,-3,r'Control $u$',rotation=90,fontsize=font_size)
if i == 3:
plt.xlabel(r'$\theta$',fontsize=font_size)
plt.colorbar()
plt.show() | 2,666 | 27.98913 | 89 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/hopf/AS.py | import torch
import torch.nn.functional as F
import numpy as np
import timeit
class Net(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(Net, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_hidden)
self.layer3 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.Tanh()
# sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(x))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
return out
def f_value(x):
y = []
for i in range(0,len(x)):
f = [x[i]*(x[i]+5)*(x[i]+10)]
y.append(f)
y = torch.tensor(y)
return y
'''
For learning
'''
N = 3000 # sample size
D_in = 1 # input dimension
H1 = 10 # hidden dimension
D_out = 1 # output dimension
torch.manual_seed(10)
x = torch.Tensor(N, D_in).uniform_(-30, 30)
theta = 0.5
out_iters = 0
while out_iters < 1:
start = timeit.default_timer()
model = Net(D_in,H1, D_out)
i = 0
t = 0
max_iters = 700
learning_rate = 0.05
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
L = []
while i < max_iters:
out = model(x)
g = out*x
f = f_value(x)
# loss = (2-theta)*torch.diagonal(torch.mm(x,g.T))**2-torch.diagonal(torch.mm(x,x.T))*torch.diagonal(2*torch.mm(x,f.T)+torch.mm(g,g.T))
loss = (2-theta)*((x*g)**2)-x**2*(2*x*f+g**2)
Lyapunov_risk = (F.relu(-loss)).mean()
L.append(Lyapunov_risk)
print(i, "Lyapunov Risk=",Lyapunov_risk.item())
optimizer.zero_grad()
Lyapunov_risk.backward()
optimizer.step()
i += 1
stop = timeit.default_timer()
print('\n')
print("Total time: ", stop - start)
print("Verified time: ", t)
out_iters+=1
# torch.save(torch.tensor(L), './data/hopf/loss_AS.pt')
# torch.save(model.state_dict(), './data/hopf/1d_hopf_net.pkl') | 2,120 | 24.554217 | 143 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/hopf/generate.py | import numpy as np
import math
import matplotlib.pyplot as plt
import torch
import timeit
class Net(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(Net, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_hidden)
self.layer3 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.Tanh()
h_1 = sigmoid(self.layer1(x))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
return out
hopf_model = Net(1,10,1)
hopf_model.load_state_dict(torch.load('./data/hopf/1d_hopf_net.pkl'))
m = 30
torch.manual_seed(10)
rad = torch.Tensor(m,1).uniform_(3, 10)
ang = torch.Tensor(m,1).uniform_(0, 6.28)
def original_data(rad,ang,m,N=400,dt=0.001):
X,W = torch.zeros([m,N]),torch.zeros([m,N])
X1,X2 = torch.zeros([m,N]),torch.zeros([m,N])
for r in range(m):
X[r,0] = rad[r,0]
W[r,0] = ang[r,0]
for i in range(N-1):
x = X[r,i]
w = W[r,i]
# u = hopf_model(torch.tensor([x-5.0]))
new_x = x + x*(x-5.0)*(x+5.0)*dt
new_w = w + dt
if new_x > 10.0:
new_x = x
new_w = w
X[r,i+1] = new_x
W[r,i+1] = new_w
X1[r,:]=X[r,:]*torch.cos(W[r,:])
X2[r,:]=X[r,:]*torch.sin(W[r,:])
orig_data = {'X':X,'W':W,'X1':X1,'X2':X2}
torch.save(orig_data,'./data/hopf/data.pt')
def control_data(rad,ang,m=30,N=30000,dt=0.0001):
start = timeit.default_timer()
torch.manual_seed(9)
X,W = torch.zeros([m,N]),torch.zeros([m,N])
X1,X2 = torch.zeros([m,N]),torch.zeros([m,N])
# z = np.random.normal(0,1,N)
for r in range(m):
z = torch.randn(N)
X[r,0] = rad[r,0]
W[r,0] = ang[r,0]
for i in range(N-1):
x = X[r,i]
w = W[r,i]
u = hopf_model(torch.tensor([x-5.0]))
new_x = x + x*(x-5.0)*(x+5.0)*dt + (x-5.0)*(u[0])*z[i]*math.sqrt(dt)
new_w = w + dt
X[r,i+1] = new_x
W[r,i+1] = new_w
X1[r,:]=X[r,:]*torch.cos(W[r,:])
X2[r,:]=X[r,:]*torch.sin(W[r,:])
print('{} done'.format(r))
orig_data = {'X':X,'W':W,'X1':X1,'X2':X2}
torch.save(orig_data,'./data/hopf/control_data.pt')
stop = timeit.default_timer()
print(stop-start)
def test():
N = 100
dt = 0.0001
X = torch.zeros([1,N])
W = torch.zeros([1,N])
X[0,0] = 8.0
W[0,0] = 3.8
z = torch.randn(N)
for i in range(N-1):
x = X[0,i]
w = W[0,i]
u = hopf_model(torch.tensor([x-5.0]))
new_x = x + x*(x-5.0)*(x+5.0)*dt + (x-5.0)*(u[0])*z[i]*math.sqrt(dt)
new_w = w + dt
X[0,i+1] = new_x
W[0,i+1] = new_w
X = X.clone().detach()
plt.plot(np.arange(N),X[0,:],'r')
plt.show()
if __name__ == '__main__':
control_data(rad,ang,m,600,0.0001)
original_data(rad,ang,m,400,0.001)
test()
| 3,077 | 27.766355 | 80 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/hopf/functions.py | import numpy as np
import torch
import matplotlib.pyplot as plt
from matplotlib import cm
import matplotlib.gridspec as gridspec
#向量场
def f(y,t) :
#parameters
x1,x2 = y
dydt = [-25.0*x1-x2+x1*(x1**2+x2**2),x1-25*x2+x2*(x1**2+x2**2)]
return dydt
#绘制向量场
def Plotflow(Xd, Yd, t):
# Plot phase plane
DX, DY = f([Xd, Yd],t)
DX=DX/np.linalg.norm(DX, ord=2, axis=1, keepdims=True)
DY=DY/np.linalg.norm(DY, ord=2, axis=1, keepdims=True)
plt.streamplot(Xd,Yd,DX,DY, color=('gray'), linewidth=0.5,
density=0.6, arrowstyle='-|>', arrowsize=1.5)
def plot_orbit(ax,title,path='./hopf/control_data.pt'):
data = torch.load(path)
X = data['X'].clone().detach()
X1 = data['X1'].clone().detach()
X2 = data['X2'].clone().detach()
#添加极限环
C = plt.Circle((0, 0),5, color='g', linewidth=2.5, fill=False)
ax.add_artist(C)
#绘制向量场
xd = np.linspace(-10, 10, 10)
yd = np.linspace(-10, 10, 10)
Xd, Yd = np.meshgrid(xd,yd)
t = np.linspace(0,2,2000)
Plotflow(Xd, Yd,t)
m = len(X1)
for i in range(m):
if 9.6 > X[i,0] > 5.5 and torch.max(X[i,:])<10 and torch.min(X[i,:])>0: #避免扰动过大的轨道出现
plt.plot(X1[i,0],X2[i,0],marker='*',markersize=8,color='r')
plt.plot(X1[i,:],X2[i,:],linestyle='--',color='r')
elif X[i,0] < 4.5 and torch.max(X[i,:])<10 and torch.min(X[i,:])>0: #避免扰动过大的轨道出现
plt.plot(X1[i,0],X2[i,0],marker='*',markersize=8,color='b')
plt.plot(X1[i,:],X2[i,:],linestyle='--',color='b')
plt.legend([C],['limit cycle'],loc='upper right')
plt.title(title)
plt.xlabel('x')
plt.ylabel('y')
#绘制极限环外部出发的轨道
def uncontrol_trajectory1(ax,title,path='./hopf/data.pt'):
data = torch.load(path)
X = data['X']
C = plt.axhline(y=5.0,ls="--",linewidth=2.5,color="green")#添加水平直线
U = plt.axhline(y=9.5,ls="--",linewidth=2.5,color="black")
ax.add_artist(C)
ax.add_artist(U)
for i in range(len(X)):
if 9.5 > X[i,0] > 5.5:
x = X[i,:].numpy()
m = np.max(x)
index = np.argwhere(x == m )
sample_length = int(index[0])
L = np.arange(len(X[0,:sample_length]))
plt.plot(L[0],X[i,0],marker='*',markersize=8,color='r')
plt.plot(L,X[i,:sample_length],linestyle='--',color='r')
plt.legend([U,C],[r'$\rho$=9.5',r'$\rho$=5.0'],borderpad=0.01, labelspacing=0.01)
plt.title(title)
plt.xlabel('t')
plt.ylabel(r'$\rho$')
#绘制极限环内部出发的轨道,sample_length的作用是从data中选择适当的轨道长度绘图
def uncontrol_trajectory2(ax,title,sample_length = 40,path='./hopf/control_data.pt'):
data = torch.load(path)
X = data['X'].clone().detach()
C = plt.axhline(y=5.0,ls="--",linewidth=2.5,color="green") #添加水平直线,对应极限环
U = plt.axhline(y=0.0,ls="--",linewidth=2.5,color="deeppink") #添加水平直线,对应零点
ax.add_artist(C)
ax.add_artist(U)
for i in range(len(X)):
if X[i,0] < 4.5:
L = np.arange(len(X[0,:sample_length]))
plt.plot(L[0],X[i,0],marker='*',markersize=8,color='b')
plt.plot(L,X[i,:sample_length],linestyle='--',color='b')
plt.legend([C,U],[r'$\rho$=5.0',r'$\rho$=0.0'],borderpad=0.01, labelspacing=0.01)
plt.title(title)
plt.xlabel('t')
plt.ylabel(r'$\rho$')
#绘制控制下的极限环外部出发的轨道
def control_trajectory1(ax,title,sample_length,path='./hopf/data.pt'):
data = torch.load(path)
X = data['X'].clone().detach()
C = plt.axhline(y=5.0,ls="--",linewidth=2.5,color="green")#添加水平直线
ax.add_artist(C)
for i in range(len(X)):
if 9.6 > X[i,0] > 5.5:
L = np.arange(len(X[0,:sample_length]))
plt.plot(L[0],X[i,0],marker='*',markersize=8,color='r')
plt.plot(L,X[i,:sample_length],linestyle='--',color='r')
plt.legend([C],[r'$\rho$=5.0'],borderpad=0.01, labelspacing=0.01)
plt.title(title)
plt.xlabel('t')
plt.ylabel(r'$\rho$')
#绘制控制下的极限环内部出发的轨道
def control_trajectory2(ax,title,sample_length = 40,path='./hopf/control_data.pt'):
data = torch.load(path)
X = data['X'].clone().detach()
C = plt.axhline(y=5.0,ls="--",linewidth=2.5,color="green")#添加水平直线
ax.add_artist(C)
for i in range(len(X)):
if X[i,0] < 4.5:
L = np.arange(len(X[0,:sample_length]))
plt.plot(L[0],X[i,0],marker='*',markersize=8,color='b')
plt.plot(L,X[i,:sample_length],linestyle='--',color='b')
plt.legend([C],[r'$\rho$=5.0'],borderpad=0.01, labelspacing=0.01)
plt.title(title)
plt.xlabel('t')
plt.ylabel(r'$\rho$')
| 4,576 | 33.674242 | 92 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/Echo/AS.py | import torch
import torch.nn.functional as F
import numpy as np
import timeit
import argparse
parser = argparse.ArgumentParser('ODE demo')
parser.add_argument('--N', type=float, default=5000)
parser.add_argument('--lr', type=float, default=0.03)
args = parser.parse_args()
class Net(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(Net, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_hidden)
self.layer3 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(x))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
return out
'''
For learning
'''
N = args.N # sample size
D_in = 50 # input dimension
H1 = 4*D_in # hidden dimension
D_out = D_in # output dimension
torch.manual_seed(10)
x = torch.Tensor(N, D_in).uniform_(-10, 10)
A = np.load('neural_sde/Echo/50/A_{}.npy'.format(D_in))
A = torch.tensor(A).to(torch.float32)
theta = 0.8
out_iters = 0
valid = False
while out_iters < 1 and not valid:
# break
start = timeit.default_timer()
model = Net(D_in,H1, D_out)
i = 0
t = 0
max_iters = 10000
learning_rate = args.lr
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
while i < max_iters and not valid:
out = model(x)
g = out*x
f = torch.relu(torch.mm(A,x.T)).T
loss = (2-theta)*torch.diagonal(torch.mm(x,g.T))**2-torch.diagonal(torch.mm(x,x.T))*torch.diagonal(2*torch.mm(x,f.T)+torch.mm(g,g.T))
# loss = (2-theta)*((x*g)**2)-x**2*(2*x*f+g**2)
Lyapunov_risk = (F.relu(-loss)).mean()
print(i, "Lyapunov Risk=",Lyapunov_risk.item())
optimizer.zero_grad()
Lyapunov_risk.backward()
optimizer.step()
if Lyapunov_risk == 0:
break
i += 1
stop = timeit.default_timer()
print('\n')
print("Total time: ", stop - start)
print("Verified time: ", t)
out_iters+=1
torch.save(model.state_dict(), './data/Echo/AS_{}_relu_net.pkl'.format(D_in)) | 2,257 | 24.954023 | 141 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/Echo/generate.py | import numpy as np
import torch
import math
class Net(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(Net, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_hidden)
self.layer3 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(x))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
return out
D_in = 50 # input dimension
H1 = 4*D_in # hidden dimension
D_out = D_in
A = np.load('./data/Echo/A_{}.npy'.format(D_in))
A = torch.tensor(A).to(torch.float32)
m = 10
N = 200000
dt = 0.000001
model = Net(D_in,H1,D_out)
x0 = torch.linspace(-2,2,50)
def tanh_generate(m,N,dt):
model.load_state_dict(torch.load('./data/Echo/AS_50_net.pkl'))
X = torch.zeros(m,N+1,D_in)
for r in range(m):
torch.manual_seed(6*r+6)
z = torch.randn(N)
X[r,0,:] = x0
for i in range(N):
x = X[r,i,:].unsqueeze(1)
with torch.no_grad():
u = model(X[r,i,:]).unsqueeze(1)
new_x = x + torch.tanh(torch.mm(A,x))*dt + math.sqrt(dt)*z[i]*u*x
X[r,i+1,:]=new_x[:,0]
print(r)
X = X.detach().numpy()
np.save('./data/Echo/tanh_data.npy',X)
def relu_generate(m,N,dt):
model = Net(D_in,100,D_out)
model.load_state_dict(torch.load('./data/Echo/AS_50_relu_net.pkl'))
X = torch.zeros(m,N+1,D_in)
for r in range(m):
torch.manual_seed(6*r+6)
z = torch.randn(N)
X[r,0,:] = x0
for i in range(N):
x = X[r,i,:].unsqueeze(1)
with torch.no_grad():
u = model(X[r,i,:]).unsqueeze(1)
new_x = x + torch.relu(torch.mm(A,x))*dt + math.sqrt(dt)*z[i]*u*x
X[r,i+1,:]=new_x[:,0]
print(r)
X = X.detach().numpy()
np.save('./data/Echo/relu_data.npy',X)
tanh_generate(m,N,dt)
relu_generate(m,N,dt)
| 2,073 | 26.653333 | 77 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/hyper_b/plot_trajectory.py | import numpy as np
import math
import matplotlib.pyplot as plt
import torch
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
import timeit
start = timeit.default_timer()
def plot_trajec(L,b):
mean_data = torch.mean(L,0).detach().numpy()
std_data =torch.std(L,0).detach().numpy()
plt.fill_between(np.arange(len(mean_data)),mean_data-std_data,mean_data+std_data,color='r',alpha=0.2)
plt.plot(np.arange(len(mean_data)),mean_data,color='r',alpha=0.9,label=r'$b={}$'.format(b))
plt.ylim(-10,10)
# plt.xlabel('Time')
plt.xticks([0.0, 500, 1000], ["$0$", "$0.5$", "$1.0$"])
plt.yticks([])
| 639 | 28.090909 | 105 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/hyper_b/V_plot.py | import matplotlib.pyplot as plt
import torch
import numpy as np
from matplotlib import cm
import matplotlib as mpl
# import matplotlib
# matplotlib.rcParams['font.sans-serif'] = 'NSimSun,Times New Roman'
# matplotlib.rcParams['text.usetex'] = True
colors = [
[233/256, 110/256, 236/256], # #e96eec
# [0.6, 0.6, 0.2], # olive
# [0.5333333333333333, 0.13333333333333333, 0.3333333333333333], # wine
[255/255, 165/255, 0],
# [0.8666666666666667, 0.8, 0.4666666666666667], # sand
# [223/256, 73/256, 54/256], # #df4936
[107/256, 161/256,255/256], # #6ba1ff
[0.6, 0.4, 0.8], # amethyst
[0.0, 0.0, 1.0], # ao
[0.55, 0.71, 0.0], # applegreen
# [0.4, 1.0, 0.0], # brightgreen
[0.99, 0.76, 0.8], # bubblegum
[0.93, 0.53, 0.18], # cadmiumorange
[11/255, 132/255, 147/255], # deblue
[204/255, 119/255, 34/255], # {ocra}
]
colors = np.array(colors)
l = 0.01
class VNet(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(VNet, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.Tanh()
h_1 = sigmoid(self.layer1(x))
out = self.layer2(h_1)
return l*x*x + (x*out)**2
D_in = 2
H1 = 6
D_out = 2
vmodel = VNet(D_in,H1,D_out)
V_vnorm = mpl.colors.Normalize(vmin=0, vmax=2.0)
D = 6
def draw_imageV(f):
with torch.no_grad():
x = torch.linspace(-D, D, 200)
y = torch.linspace(-D, D, 200)
X, Y = torch.meshgrid(x, y)
inp = torch.stack([X, Y], dim=2)
image = f(inp)
image = image[..., 0].detach().cpu()
plt.contour(X,Y,image-0.05,0,linewidths=2, colors=colors[-3],linestyles='--')
# plt.contourf(X,Y,image,8,alpha=0.3,cmap='turbo',norm=vnorm)
plt.imshow(image, extent=[-6, 6, -6, 6], cmap='rainbow',norm=V_vnorm)
plt.xticks([-5,0,5])
plt.yticks([])
return image
def drawV(a):
vmodel.load_state_dict(torch.load('./neural_sde/hyper_b/V_b_{}.pkl'.format(a)))
draw_imageV(vmodel)
# plt.title(r'b$={}$'.format(a))
| 2,205 | 28.413333 | 83 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/hyper_b/generate.py | import numpy as np
import math
import torch
import timeit
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(10)
class ControlNet(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(ControlNet, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_hidden)
self.layer3 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(x))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
return out
D_in = 2
H1 = 6
D_out = 2
model = ControlNet(D_in,H1,D_out)
set_state0 = torch.tensor([[-5.0,5.0]])
# set_state0 = torch.tensor([[-5.0,5.0],[-3.0,4.0],[-1.0,3.0],[1.0,-3.0],[3.0,-4.0],[5.0,-5.0]])
def control_data(model,random_seed,set_state0,M=6,N=20000,dt=0.00001):
start = timeit.default_timer()
torch.manual_seed(random_seed)
X1,X2 = torch.zeros([M,N]),torch.zeros([M,N])
for r in range(M):
G = 9.81 # gravity
L = 0.5 # length of the pole
m = 0.15 # ball mass
b = 0.1
z = torch.randn(N)
X1[r,0] = set_state0[r,0]
X2[r,0] = set_state0[r,1]
for i in range(N-1):
x1 = X1[r,i]
x2 = X2[r,i]
with torch.no_grad():
u = model(torch.tensor([x1,x2]))
new_x1 = x1 + x2*dt + x1*u[0]*z[i]*math.sqrt(dt)
new_x2 = x2 + (G*math.sin(x1)/L - b*x2/(m*L**2))*dt + x2*u[1]*z[i]*math.sqrt(dt)
X1[r,i+1] = new_x1
X2[r,i+1] = new_x2
print('{} done'.format(r))
# data = {'X1':X1,'X2':X2}
# torch.save(data,'./neural_sde/hyper_b/b_{}.pt'.format(b))
stop = timeit.default_timer()
print(stop-start)
return X1,X2
'''
Generate trajectories under control with corresponding b
'''
if __name__ == '__main__':
M = 5
N = 20000
data = torch.zeros([2,10,M,N])
for r in range(10):
b = 2.0 + r*0.1
model.load_state_dict(torch.load('./data/hyper_b/b_{}.pkl'.format(b)))
# X1,X2=torch.zeros([M,N]),torch.zeros([M,N])
for i in range(M):
x1,x2 = control_data(model,i*6,set_state0,1,N,0.0001)
# X1[i,:] = x1[0,:]
# X2[i,:] = x2[0,:]
data[0,r,i,:] = x1[0,:]
data[1,r,i,:] = x2[0,:]
print('({},{})'.format(r,i))
torch.save(data,'data.pt')
# model.load_state_dict(torch.load('./neural_sde/hyper_b/b_{}.pkl'.format(1.6)))
# X1,X2 = control_data(model,6,set_state0,1,30000,0.0001)
# X1 = X1.detach().numpy()[0,:]
# print(X1.shape)
# plt.plot(np.arange(len(X1)),X1)
# plt.show() | 2,768 | 30.827586 | 96 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/hyper_b/u_plot.py | import matplotlib.pyplot as plt
import torch
import numpy as np
from matplotlib import cm
import matplotlib as mpl
class ControlNet(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(ControlNet, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_hidden)
self.layer3 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(x))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
return out*x
D_in = 2
H1 = 6
D_out = 2
cmodel = ControlNet(D_in,H1,D_out)
C_vnorm = mpl.colors.Normalize(vmin=-80, vmax=80)
def draw_image(f):
with torch.no_grad():
x = torch.linspace(-6, 6, 200)
y = torch.linspace(-6, 6, 200)
X, Y = torch.meshgrid(x, y)
inp = torch.stack([X, Y], dim=2)
image = f(inp)
image = image[..., 0].detach().cpu()
plt.imshow(image, extent=[-6, 6, -6, 6], cmap='rainbow',norm=C_vnorm)
# plt.xlabel(r'$\theta$')
plt.xticks([-5,0,5])
plt.yticks([])
# plt.show()
return image
def draw(a):
cmodel.load_state_dict(torch.load('./neural_sde/hyper_b/b_{}.pkl'.format(a)))
draw_image(cmodel)
# plt.title(r'b$={}$'.format(a))
| 1,389 | 27.367347 | 81 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/hyper_b/calculate.py | import matplotlib.pyplot as plt
import torch
import numpy as np
# import pylustrator
# pylustrator.start()
def plot_grid():
plt.grid(b=True, which='major', color='gray', alpha=0.5, linestyle='dashdot', lw=1.5)
# minor grid lines
plt.minorticks_on()
plt.grid(b=True, which='minor', color='beige', alpha=0.5, ls='-', lw=1)
A = torch.load('./neural_sde/hyper_b/data.pt')
print(A.shape)
end = torch.zeros([20])
for r in range(20):
end[r] = torch.mean(A[0,r,:,-1])
print(end)
end = end.detach().numpy()
plt.scatter(np.arange(len(end)),end, s=45, c=end, marker='.',alpha=0.99,cmap='rainbow')
plot_grid()
plt.yticks([0,1,2])
plt.xticks([0.0, 4.0, 8.0, 12.0, 16.0, 20.0],["1.0", "1.4", "1.8", "2.2", "2.6", "3.0"])
plt.axvline(8.5,ls="--",linewidth=2.5,color="#dc8ff6",alpha=0.3)
plt.axvline(13.5,ls="--",linewidth=2.5,color="#dc8ff6",alpha=0.3)
plt.axhline(0.0,ls="--",linewidth=2.5,color="#dc8ff6",alpha=0.3)
plt.ylabel(r'$\theta$')
plt.xlabel(r'$b$')
plt.colorbar()
#% start: automatic generated code from pylustrator
plt.figure(1).ax_dict = {ax.get_label(): ax for ax in plt.figure(1).axes}
import matplotlib as mpl
plt.figure(1).set_size_inches(11.360000/2.54, 4.990000/2.54, forward=True)
plt.figure(1).ax_dict["<colorbar>"].set_position([0.931942, 0.234718, 0.014887, 0.679046])
plt.figure(1).axes[0].set_xlim(-0.9, 20.0)
# plt.figure(1).axes[0].set_xticks([0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0])
# plt.figure(1).axes[0].set_xticklabels(["1.0", "1.2", "1.4", "1.6", "1.8", "2.0", "2.2", "2.4", "2.6", "2.8", "3.0"], fontsize=10.0, fontweight="normal", color="black", fontstyle="normal", fontname="DejaVu Sans", horizontalalignment="center")
# plt.figure(1).axes[0].grid(False)
plt.figure(1).axes[0].set_position([0.092998, 0.225654, 0.826345, 0.697175])
#% end: automatic generated code from pylustrator
plt.show()
| 1,872 | 43.595238 | 243 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/hyper_b/ES_Quadratic.py | import sys
sys.path.append('./neural_sde')
import torch
import torch.nn.functional as F
import numpy as np
import timeit
from hessian import hessian
from hessian import jacobian
# from gradient import hessian
# from gradient import jacobian
class ControlNet(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(ControlNet, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_hidden)
self.layer3 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(x))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
return out
class VNet(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(VNet, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.Tanh()
h_1 = sigmoid(self.layer1(x))
out = self.layer2(h_1)
return out
class Net(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(Net, self).__init__()
torch.manual_seed(2)
self._v = VNet(n_input,n_hidden,n_output)
self._control = ControlNet(n_input,n_hidden,n_output)
def forward(self,x):
v = self._v(x)
u = self._control(x)
return v,u*x
def inverted_pendulum(x):
y = []
G = 9.81 # gravity
L = 0.5 # length of the pole
m = 0.15 # ball mass
b = 0.1 # friction
for i in range(0,len(x)):
f = [x[i,1],G*torch.sin(x[i,0])/L +(-b*x[i,1])/(m*L**2)]
y.append(f)
y = torch.tensor(y)
return y
'''
For learning
'''
N = 500 # sample size
D_in = 2 # input dimension
H1 = 6 # hidden dimension
D_out = 2 # output dimension
torch.manual_seed(10)
x = torch.Tensor(N, D_in).uniform_(-10, 10)
l = 0.01
# valid = False
# while out_iters < 1:
for r in range(1):
b = float(format(2.1 + r*0.1,'.1f'))
start = timeit.default_timer()
model = Net(D_in,H1, D_out)
i = 0
t = 0
max_iters = 1000
learning_rate = 0.01
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
L = []
while i < max_iters:
V_net, u = model(x)
W1 = model._v.layer1.weight
W2 = model._v.layer2.weight
B1 = model._v.layer1.bias
B2 = model._v.layer2.bias
f = inverted_pendulum(x)
g = u
x = x.clone().detach().requires_grad_(True)
output = torch.mm(torch.tanh(torch.mm(x,W1.T)+B1),W2.T)+B2
# V = torch.sum(output)
num_v = torch.sum(l*x*x + ( x*output)**2,1)
# num_v = torch.sum(output,1)
V = torch.sum(l*x*x + (x*output)**2)
Vx = jacobian(V,x)
Vxx = hessian(V,x)
loss = torch.zeros(N)
for r in range(N):
L_V = torch.sum(Vx[0,2*r:2*r+2]*f[r,:]) + 0.5*torch.mm(g[r,:].unsqueeze(0),torch.mm(Vxx[2*r:2*r+2,2*r:2*r+2],g[r,:].unsqueeze(1)))
Vxg = torch.sum(Vx[0,2*r:2*r+2]*g[r,:])
v = num_v[r]
loss[r] = Vxg**2/(v**2) - b*L_V/v
Lyapunov_risk = (F.relu(-loss)).mean()
L.append(Lyapunov_risk.item())
print(i, "Lyapunov Risk=",Lyapunov_risk.item())
optimizer.zero_grad()
Lyapunov_risk.backward()
optimizer.step()
# if Lyapunov_risk < 0.12:
# optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# else:
# optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
# print(q)
# if Lyapunov_risk < 1.0:
# optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
# else:
# optimizer = torch.optim.Adam(model.parameters(), lr=0.5)
if Lyapunov_risk == 0.0:
break
i += 1
stop = timeit.default_timer()
print('\n')
print("Total time: ", stop - start)
# np.save('./neural_sde/hyper_b/b_{}.npy'.format(b), L)
# torch.save(model._control.state_dict(),'./neural_sde/hyper_b/b_{}.pkl'.format(b)) | 4,311 | 28.737931 | 142 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/hyper_b/plot.py | import numpy as np
import matplotlib.pyplot as plt
from V_plot import *
from u_plot import *
from plot_trajectory import *
# import matplotlib
# matplotlib.rcParams['font.sans-serif'] = 'NSimSun,Times New Roman'
# matplotlib.rcParams['text.usetex'] = True
font_size = 15
A = torch.load('./data/hyper_b/data.pt')[:,9:14,:,:] #pick trajectories correspond to 1.9,2.0,2.1,2.2,2.3
# print(A.shape)
def plot_grid():
plt.grid(b=True, which='major', color='gray', alpha=0.6, linestyle='dashdot', lw=1.5)
# minor grid lines
plt.minorticks_on()
plt.grid(b=True, which='minor', color='beige', alpha=0.8, ls='-', lw=1)
def plot_b(b):
L = np.load('./data/hyper_b/b_{}.npy'.format(b))
r_L = np.zeros(1000-len(L))
L = np.concatenate((L,r_L),axis=0)
# np.concatenate((a,b),axis=0)
plt.plot(np.arange(len(L)),L,'b')
plt.ylim(-1.0,25)
plt.title('b = {}'.format(b))
plt.xticks([0,400,800])
plt.yticks([])
for i in range(5):
plt.subplot(4, 5, i+1)
plot_b(1.9+i*0.1)
plot_grid()
if i == 0:
plt.yticks([0,10,20])
plt.ylabel('Loss',fontsize=font_size)
plt.text(-5,5,'Training',rotation=90,fontsize=font_size)
else:
plt.yticks([0, 10, 20], ['', '', ''])
if i == 2:
plt.xlabel('Iterations',fontsize=font_size)
for i in range(5):
plt.subplot(4, 5, 5 + i+1)
plot_trajec(A[0,i,:,0:10000:10],1.9+i*0.1)
plot_grid()
if i == 0:
plt.yticks([-10,-5,0,5,10])
plt.ylabel(r'$\theta$',fontsize=font_size)
plt.text(-1,-5,'Trajectory',rotation=90,fontsize=font_size)
else:
plt.yticks([-10,-5, 0,5, 10], ['', '', '','',''])
if i == 2:
plt.xlabel('Time',fontsize=font_size)
for i in range(5):
plt.subplot(4, 5, 10 + i+1)
drawV(1.9+i*0.1)
if i == 0:
plt.yticks([-5,0,5])
plt.ylabel(r'$\dot{\theta}$',fontsize=font_size)
plt.text(-15,-5,'Lyapunov V',rotation=90,fontsize=font_size)
if i == 2:
plt.xlabel(r'$\theta$',fontsize=font_size)
plt.colorbar()
for i in range(5):
plt.subplot(4, 5, 15 + i+1)
draw(1.9+i*0.1)
if i == 0:
plt.yticks([-5,0,5])
plt.ylabel(r'$\dot{\theta}$',fontsize=font_size)
plt.text(-15,-3,'Control u',rotation=90,fontsize=font_size)
if i == 2:
plt.xlabel(r'$\theta$',fontsize=font_size)
plt.colorbar()
plt.show() | 2,394 | 26.848837 | 106 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/harmonic/plot_loss.py | import numpy as np
import matplotlib.pyplot as plt
import torch
import pylustrator
pylustrator.start()
import seaborn as sns
sns.set_theme(style="whitegrid")
L1 = torch.load('./data/harmonic/loss_icnn.pt')[2:] # delete large first tow numbers
L2 = torch.load('./data/harmonic/loss_quad.pt')
L3 = torch.load('./data/harmonic/loss_AS.pt')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.5, hspace=0.5)
plt.subplot(231)
plt.plot(np.arange(len(L1)),L1,'b')
plt.ylim(-0.1,13)
plt.title('ES+ICNN')
plt.subplot(232)
plt.plot(np.arange(len(L2)),L2,'b')
plt.ylim(-0.1,13)
plt.title('ES+Quad')
plt.subplot(233)
plt.plot(np.arange(len(L3)),L3,'b')
plt.ylim(-0.1,13)
plt.title('AS')
plt.subplot(234)
plt.plot(np.arange(len(L1)),L1,'b')
plt.ylim(-0.1,1)
plt.subplot(235)
plt.plot(np.arange(len(L2)),L2,'b')
plt.ylim(-0.1,1)
plt.subplot(236)
plt.plot(np.arange(len(L3)),L3,'b')
plt.ylim(-0.1,1)
#% start: automatic generated code from pylustrator
plt.figure(1).ax_dict = {ax.get_label(): ax for ax in plt.figure(1).axes}
import matplotlib as mpl
plt.figure(1).set_size_inches(14.340000/2.54, 6.370000/2.54, forward=True)
plt.figure(1).axes[0].set_xlim(-8.3, 174.3)
plt.figure(1).axes[0].set_xticks([0.0, 50.0, 100.0, 150.0])
plt.figure(1).axes[0].set_xticklabels(["0", "50", "100", "150"], fontsize=11.0, fontweight="normal", color=".15", fontstyle="normal", fontname="Arial", horizontalalignment="center")
plt.figure(1).axes[0].grid(False)
plt.figure(1).axes[0].set_position([0.095838, 0.201885, 0.255811, 0.697830])
plt.figure(1).axes[0].get_xaxis().get_label().set_text("Iterations")
plt.figure(1).axes[0].get_yaxis().get_label().set_text("Loss")
plt.figure(1).axes[1].set_xlim(-7.75, 162.75)
plt.figure(1).axes[1].set_xticks([0.0, 50.0, 100.0, 150.0])
plt.figure(1).axes[1].set_xticklabels(["0", "50", "100", "150"], fontsize=11.0, fontweight="normal", color=".15", fontstyle="normal", fontname="Arial", horizontalalignment="center")
plt.figure(1).axes[1].grid(False)
plt.figure(1).axes[1].set_position([0.409361, 0.201885, 0.255811, 0.697830])
plt.figure(1).axes[1].lines[0].set_color("#4c72b0")
plt.figure(1).axes[1].lines[0].set_markeredgecolor("#4c72b0")
plt.figure(1).axes[1].lines[0].set_markerfacecolor("#4c72b0")
plt.figure(1).axes[1].get_xaxis().get_label().set_text("Iterations")
plt.figure(1).axes[2].set_xlim(-9.200000000000001, 193.2)
plt.figure(1).axes[2].set_xticks([0.0, 50.0, 100.0, 150.0])
plt.figure(1).axes[2].set_xticklabels(["0", "50", "100", "150"], fontsize=11.0, fontweight="normal", color=".15", fontstyle="normal", fontname="Arial", horizontalalignment="center")
plt.figure(1).axes[2].grid(False)
plt.figure(1).axes[2].set_position([0.722885, 0.201885, 0.255811, 0.697830])
plt.figure(1).axes[2].lines[0].set_color("#4c72b0")
plt.figure(1).axes[2].lines[0].set_markeredgecolor("#4c72b0")
plt.figure(1).axes[2].lines[0].set_markerfacecolor("#4c72b0")
plt.figure(1).axes[2].get_xaxis().get_label().set_text("Iterations")
plt.figure(1).axes[3].grid(False)
plt.figure(1).axes[3].set_position([0.198784, 0.478804, 0.152863, 0.321584])
plt.figure(1).axes[3].lines[0].set_color("#4c72b0")
plt.figure(1).axes[3].lines[0].set_markeredgecolor("#4c72b0")
plt.figure(1).axes[3].lines[0].set_markerfacecolor("#4c72b0")
plt.figure(1).axes[4].grid(False)
plt.figure(1).axes[4].set_position([0.512309, 0.478804, 0.152863, 0.321584])
plt.figure(1).axes[4].lines[0].set_color("#4c72b0")
plt.figure(1).axes[4].lines[0].set_markeredgecolor("#4c72b0")
plt.figure(1).axes[4].lines[0].set_markerfacecolor("#4c72b0")
plt.figure(1).axes[5].grid(False)
plt.figure(1).axes[5].set_position([0.828954, 0.463271, 0.149744, 0.337116])
plt.figure(1).axes[5].lines[0].set_color("#4c72b0")
plt.figure(1).axes[5].lines[0].set_markeredgecolor("#4c72b0")
plt.figure(1).axes[5].lines[0].set_markerfacecolor("#4c72b0")
#% end: automatic generated code from pylustrator
plt.show() | 3,889 | 45.86747 | 181 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/harmonic/AS.py | import torch
import torch.nn.functional as F
import timeit
class Net(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(Net, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_hidden)
self.layer3 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(x))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
return out
# Drift function
def harmonic(x):
y = []
beta = 0.5
for i in range(0,len(x)):
f = [x[i,1],-x[i,0]-2*beta*x[i,1]]
y.append(f)
y = torch.tensor(y)
return y
# Add control
def harmonic_control(x,u):
y = []
k1,k2 = -3,2.15
for i in range(0,len(x)):
f = [0.0,k1*x[i,0]+k2*x[i,1]]
y.append(f)
y = torch.tensor(y)
y[:,0] = y[:,0] + u[:,0]
y[:,1] = y[:,1] + u[:,1]
return y
'''
For learning
'''
N = 500 # sample size
D_in = 2 # input dimension
H1 = 6 # hidden dimension
D_out = 2 # output dimension
torch.manual_seed(10)
x = torch.Tensor(N, D_in).uniform_(-6, 6)
x_0 = torch.zeros_like(x)
theta = 0.75
out_iters = 0
while out_iters < 1:
# break
start = timeit.default_timer()
model = Net(D_in,H1, D_out)
i = 0
t = 0
max_iters = 200
learning_rate = 0.05
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
L = []
while i < max_iters:
# start = timeit.default_timer()
out = model(x)
u = out*x
f = harmonic(x)
g = harmonic_control(x,u)
# Both loss are efficient
# loss = (2-theta)*torch.diagonal(torch.mm(x,g.T))**2-torch.diagonal(torch.mm(x,x.T))*torch.diagonal(2*torch.mm(x,f.T)+torch.mm(g,g.T))
loss = (2-theta)*((x*g)**2)-x**2*(2*x*f+g**2)
Lyapunov_risk = (F.relu(-loss)).mean()
L.append(Lyapunov_risk)
print(i, "Lyapunov Risk=",Lyapunov_risk.item())
optimizer.zero_grad()
Lyapunov_risk.backward()
optimizer.step()
if Lyapunov_risk < 2.0:
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
else:
optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
if Lyapunov_risk == 0:
break
# stop = timeit.default_timer()
# print('per :', stop-start)
i += 1
stop = timeit.default_timer()
print('\n')
print("Total time: ", stop - start)
out_iters+=1
# torch.save(torch.tensor(L), './data/harmonic/loss_AS.pt')
# torch.save(model.state_dict(), './data/harmonic/algo2_net.pkl') | 2,766 | 26.39604 | 143 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/harmonic/generate.py | import numpy as np
import math
import torch
import numpy as np
import timeit
from AS import *
from Control_Nonlinear_Icnn import *
start = timeit.default_timer()
# Harmonic linear oscillator
model = Net(D_in,H1,D_out)
# Generate trajectory with nonlinaer AS control
def algo2(z,X,N,dt):
model = Net(D_in,H1,D_out)
model.load_state_dict(torch.load('./data/harmonic/algo2_net.pkl'))
beta = 0.5
for i in range(N):
x = X[i]
with torch.no_grad():
u = model(torch.tensor(x))
# -model((torch.tensor([0.0,0.0])))
x1,x2 = x[0],x[1]
new_x1 = x1 + x2*dt + math.sqrt(dt)*z[i]*u[0]*x1
new_x2 = x2 + (-x1-2*beta*x2)*dt + z[i]*(-3*x1+2.15*x2+u[1]*x2)*math.sqrt(dt)
# new_x1 = x1 + x2*dt + math.sqrt(dt)*z[i]*u[0]
# new_x2 = x2 + (-x1-2*beta*x2)*dt + z[i]*(-3*x1+2.15*x2+u[1])*math.sqrt(dt)
X.append([new_x1,new_x2])
X = torch.tensor(X)
return X
# Generate trajectory with linear ES(+Quadratic) control
def algo1(z,X,N,dt,a,b,c,d):
beta = 0.5
for i in range(N):
x = X[i]
x1,x2 = x[0],x[1]
new_x1 = x1 + x2*dt + math.sqrt(dt)*z[i]*(a*x1+b*x2)
new_x2 = x2 + (-x1-2*beta*x2)*dt + z[i]*(-3*x1+2.15*x2+c*x1+d*x2)*math.sqrt(dt)
X.append([new_x1,new_x2])
X = torch.tensor(X)
return X
# Generate trajectory with nonlinear ES(+ICNN) control
def algo_icnn(z,X,N,dt):
model2 = ControlNet(D_in,H1,D_out)
model2.load_state_dict(torch.load('./data/harmonic/icnn_net.pkl'))
beta = 0.5
for i in range(N):
x = X[i]
with torch.no_grad():
u = model2(torch.tensor(x))
x1,x2 = x[0],x[1]
new_x1 = x1 + x2*dt + math.sqrt(dt)*z[i]*u[0]*x1
new_x2 = x2 + (-x1-2*beta*x2)*dt + z[i]*(-3*x1+2.15*x2+u[1]*x2)*math.sqrt(dt)
X.append([new_x1,new_x2])
X = torch.tensor(X)
return X
def generate(m,N,dt):
X,Y,Z,W = torch.zeros(m,N+1,2),torch.zeros(m,N+1,2),torch.zeros(m,N+1,2),torch.zeros(m,N+1,2)
for r in range(m):
# x0 = [0.3,0.5] #Fixed initial
x0 = [np.random.uniform(-2,2),np.random.uniform(-2,2)] #random initial
np.random.seed(12*r)
z = np.random.normal(0,1,N)
X[r,:] = algo1(z,[x0],N,dt,0,0,0,0) # Without control
Y[r,:] = algo_icnn(z,[x0],N,dt)
Z[r,:] = algo1(z,[x0],N,dt,1.726,-0.4946,2.0548,0.3159) #Quadratic 2.2867,0.3492,1.593,-0.4191 61.6973088
W[r,:] = algo2(z,[x0],N,dt)
print(r)
return {'X':X,'Y':Y,'Z':Z,'W':W}
# Sample numbers, Iterations in per trajectory, and sample time interval : 20,400000,0.00001
torch.save(generate(20,400000,0.00001),'./data/harmonic/data_long.pt')
torch.save(generate(20,400000,0.00001),'./data/harmonic/data_long_random.pt')
stop = timeit.default_timer()
print('total time:',stop-start)
| 2,835 | 33.585366 | 113 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/harmonic/ES_ICNN.py | import torch
import torch.nn.functional as F
import timeit
from hessian import hessian
from hessian import jacobian
from Control_Nonlinear_Icnn import *
# Drift function
def harmonic(x):
y = []
beta = 0.5
for i in range(0,len(x)):
f = [x[i,1],-x[i,0]-2*beta*x[i,1]]
y.append(f)
y = torch.tensor(y)
return y
# Add stochastic control
def harmonic_control(x,u):
y = []
k1,k2 = -3,2.15
for i in range(0,len(x)):
f = [0.0,k1*x[i,0]+k2*x[i,1]]
y.append(f)
y = torch.tensor(y)
y[:,0] = y[:,0] + u[:,0]
y[:,1] = y[:,1] + u[:,1]
return y
'''
For learning
'''
N = 500 # sample size
D_in = 2 # input dimension
H1 = 6 # hidden dimension
D_out = 2 # output dimension
torch.manual_seed(10)
x = torch.Tensor(N, D_in).uniform_(-6, 6)
eps = 0.001
out_iters = 0
while out_iters < 1:
# break
start = timeit.default_timer()
model = LyapunovFunction(D_in,H1,D_out,(D_in,),0.1,[6,6,1],eps)
i = 0
t = 0
max_iters = 200
learning_rate = 0.1
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
L = []
while i < max_iters:
# start = timeit.default_timer()
output, u = model(x)
f = harmonic(x)
g = harmonic_control(x,u)
x = x.clone().detach().requires_grad_(True)
ws = model._icnn._ws
bs = model._icnn._bs
us = model._icnn._us
smooth = model.smooth_relu
input_shape = (D_in,)
V1 = lya(ws,bs,us,smooth,x,input_shape)
V0 = lya(ws,bs,us,smooth,torch.zeros_like(x),input_shape)
num_V = smooth(V1-V0)+eps*x.pow(2).sum(dim=1)
V = torch.sum(smooth(V1-V0)+eps*x.pow(2).sum(dim=1))
Vx = jacobian(V,x)
Vxx = hessian(V,x)
loss = torch.zeros(N)
for r in range(N):
L_V = torch.sum(Vx[0,2*r:2*r+2]*f[r,:]) + 0.5*torch.mm(g[r,:].unsqueeze(0),torch.mm(Vxx[2*r:2*r+2,2*r:2*r+2],g[r,:].unsqueeze(1)))
Vxg = torch.sum(Vx[0,2*r:2*r+2]*g[r,:])
v = num_V[0,r]
loss[r] = Vxg**2/(v**2) - 2.1*L_V/v
Lyapunov_risk = (F.relu(-loss)).mean()
L.append(Lyapunov_risk)
print(i, "Lyapunov Risk=",Lyapunov_risk.item())
optimizer.zero_grad()
Lyapunov_risk.backward()
optimizer.step()
if Lyapunov_risk < 2.0:
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
else:
optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
if Lyapunov_risk < 0.001:
break
# stop = timeit.default_timer()
# print('per:',stop-start)
i += 1
# torch.save(torch.tensor(L),'./data/harmonic/loss_icnn.pt')
# torch.save(model._control.state_dict(),'./data/harmonic/icnn_net.pkl')
stop = timeit.default_timer()
print('\n')
print("Total time: ", stop - start)
out_iters+=1
| 2,972 | 26.527778 | 142 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/harmonic/ES_Quadratic.py | import torch
import torch.nn.functional as F
import timeit
from hessian import hessian
from hessian import jacobian
class Net(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(Net, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_output)
self.control = torch.nn.Linear(n_input,2,bias=False)
def forward(self,x):
sigmoid = torch.nn.Tanh()
h_1 = sigmoid(self.layer1(x))
out = self.layer2(h_1)
u = self.control(x)
return out,u
def harmonic(x):
y = []
beta = 0.5
for i in range(0,len(x)):
f = [x[i,1],-x[i,0]-2*beta*x[i,1]]
y.append(f)
y = torch.tensor(y)
return y
def harmonic_control(x,u):
y = []
k1,k2 = -3,2.15
for i in range(0,len(x)):
f = [0.0,k1*x[i,0]+k2*x[i,1]]
y.append(f)
y = torch.tensor(y)
y[:,0] = y[:,0] + u[:,0]
y[:,1] = y[:,1] + u[:,1]
return y
'''
For learning
'''
N = 500 # sample size
D_in = 2 # input dimension
H1 = 6 # hidden dimension
D_out = 2 # output dimension
torch.manual_seed(10)
x = torch.Tensor(N, D_in).uniform_(-6, 6)
l = 0.01
x_0 = torch.zeros([1,2])
out_iters = 0
# valid = False
while out_iters < 1:
start = timeit.default_timer()
model = Net(D_in,H1, D_out)
i = 0
max_iters = 200
learning_rate = 0.03
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
L = []
while i < max_iters:
# start = timeit.default_timer()
V_net, u = model(x)
W1 = model.layer1.weight
W2 = model.layer2.weight
B1 = model.layer1.bias
B2 = model.layer2.bias
X0,u0 = model(x_0)
f = harmonic(x)
g = harmonic_control(x,u)
x = x.clone().detach().requires_grad_(True)
output = torch.mm(torch.tanh(torch.mm(x,W1.T)+B1),W2.T)+B2
# V = torch.sum(output)
num_v = torch.sum(l*x*x + ( x*output)**2,1)
# num_v = torch.sum(output,1)
V = torch.sum(l*x*x + (x*output)**2)
Vx = jacobian(V,x)
Vxx = hessian(V,x)
loss = torch.zeros(N)
for r in range(N):
L_V = torch.sum(Vx[0,2*r:2*r+2]*f[r,:]) + 0.5*torch.mm(g[r,:].unsqueeze(0),torch.mm(Vxx[2*r:2*r+2,2*r:2*r+2],g[r,:].unsqueeze(1)))
Vxg = torch.sum(Vx[0,2*r:2*r+2]*g[r,:])
v = num_v[r]
loss[r] = Vxg**2/(v**2) - 2.1*L_V/v
Lyapunov_risk = (F.relu(-loss)).mean()
L.append(Lyapunov_risk)
print(i, "Lyapunov Risk=",Lyapunov_risk.item())
optimizer.zero_grad()
Lyapunov_risk.backward()
optimizer.step()
if Lyapunov_risk < 2.0:
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
else:
optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
if Lyapunov_risk < 0.001:
break
# stop = timeit.default_timer()
# print('per:',stop-start)
q = model.control.weight.data.numpy()
i += 1
print(q)
stop = timeit.default_timer()
print('\n')
print("Total time: ", stop - start)
out_iters+=1
# torch.save(torch.tensor(L),'./data/harmonic/loss_quad.pt') | 3,376 | 26.016 | 142 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/harmonic/plot.py | import numpy as np
import matplotlib.pyplot as plt
import torch
import matplotlib
matplotlib.rcParams['font.sans-serif'] = 'NSimSun,Times New Roman'
matplotlib.rcParams['text.usetex'] = True
import sys
sys.path.append('./data/harmonic')
'''
Data is dictionary {'X','Y','Z','W'},corresponds to 20 sample trajectories under
original system, ES+ICNN,ES+Quad and AS control, we set dt=1e-5,N=400000,in Euler method
data size for each system is [20,400001,2]
'''
colors = [
[233/256, 110/256, 236/256], # #e96eec
# [0.6, 0.6, 0.2], # olive
# [0.5333333333333333, 0.13333333333333333, 0.3333333333333333], # wine
[255/255, 165/255, 0],
# [0.8666666666666667, 0.8, 0.4666666666666667], # sand
# [223/256, 73/256, 54/256], # #df4936
[107/256, 161/256,255/256], # #6ba1ff
[0.6, 0.4, 0.8], # amethyst
[0.0, 0.0, 1.0], # ao
[0.55, 0.71, 0.0], # applegreen
# [0.4, 1.0, 0.0], # brightgreen
[0.99, 0.76, 0.8], # bubblegum
[0.93, 0.53, 0.18], # cadmiumorange
[11/255, 132/255, 147/255], # deblue
[204/255, 119/255, 34/255], # {ocra}
]
colors = np.array(colors)
alpha = 0.1
methods = ['ES+ICNN', 'ES+Quad', 'AS']
fontsize = 35
fontsize_legend = 17
framealpha = 0.7
legend_loc = "lower right"
shade_color = colors[0]
labelpad=-30
linewidth = 3
sc_step = 10
delt_step = 10 * sc_step
data = torch.load('./data/harmonic/data_long.pt') # fixed initial (0.3,0.5)
# data = torch.load('./data/harmonic/data_long_random.pt') # random initial
X = data['X'][:,0:300001:delt_step,:]#Original system
Y = data['Y'][:,0:300001:delt_step,:]#ES+ICNN
Z = data['Z'][:,0:300001:delt_step,:]#ES+Quad
W = data['W'][torch.tensor([0,1,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19]),0:300001:delt_step,:]
# Two trajectories diverge due to the dt in Euler method is not small enough,we test these two trajectories
# with random seed 12*2 and 12*12 with dt = 1e-6, and the corresponding trajectories converge to zero.
mid_init = 24000 // sc_step #Start of small time
mid_end = 30000 // sc_step #End of small time
target_big_X_lim = [24000.0 / sc_step, 30000.0 /sc_step]
# target_small_X_lim = [-300.0, 6300.0]
target_small_X_lim = [0.0, 6000.0 / sc_step]
x1_ylim = [-20, 20]
def plot_grid():
plt.grid(b=True, which='major', color='gray', alpha=0.6, linestyle='dashdot', lw=1.5)
# minor grid lines
plt.minorticks_on()
plt.grid(b=True, which='minor', color='beige', alpha=0.8, ls='-', lw=1)
# plt.grid(b=True, which='both', color='beige', alpha=0.1, ls='-', lw=1)
pass
def plt_x1_ylim():
plt.ylim(x1_ylim[0], x1_ylim[1])
def plt_tick_time_0_to_3():
# time [2.4, 3.0]
plt.xlim(-1500.0 / sc_step, 31500.0 / sc_step)
plt.xticks([0.0, 10000.0 / sc_step, 20000.0 / sc_step, 30000.0 / sc_step], ["$0$", "$1.0$", "$2.0$", "$3.0$"])
# plt.xticks([0.0, 10000.0, 20000.0, 30000.0], ["$0$", "$~$", "$~$", "$3.0$"])
def plt_tick_time_24_to_30(case=1):
# time [2.4, 3.0]
# plt.xlim(-300, 6300)
plt.xlim(0, 6000/sc_step)
plt.xticks([0.0, 2000.0/sc_step, 4000.0/sc_step, 6000.0/sc_step], ["$2.4$", "$2.6$", "$2.8$", "$3.0$"])
# plt.xticks([0.0, 2000.0, 4000.0, 6000.0], ["$2.4$", "$~$", "$~$", "$3.0$"])
if case==1:
# plt.ylim(-0.115, 0.115)
# plt.yticks([-0.1, -0.05, 0, 0.05, 0.1], ["$-0.1$", "$~$", "$0$", "$~$", "$0.1$"])
plt.ylim(-0.23, 0.23)
plt.yticks([-0.2, -0.1, 0, 0.1, 0.2], ["$-0.2$", "$~$", "$0$", "$~$", "$0.2$"])
if case==2:
plt.ylim(-0.23, 0.23)
plt.yticks([-0.2, -0.1, 0, 0.1, 0.2], ["$-0.2$", "$~$", "$0$", "$~$", "$0.2$"])
def plot_x1(subfig=1):
X1 = X[:,:,0]#x1 component of original system
mean_x1 = torch.mean(X1,0)
std_x1 = torch.std(X1,0)
Y1 = Y[:,:,0]#x1 component of ES(+ICNN)
mean_y1 = torch.mean(Y1,0)
std_y1 = torch.std(Y1,0)
Z1 = Z[:,:,0]#x1 component of ES(+Quadratic)
mean_z1 = torch.mean(Z1,0)
std_z1 = torch.std(Z1,0)
W1 = W[:,:,0]#x1 component of AS
mean_w1 = torch.mean(W1,0)
std_w1 = torch.std(W1,0)
# plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.5, hspace=0.5)
if subfig==1:
# plt.subplot(251)
plt.fill_between(np.arange(X1.size(1)),mean_x1-std_x1,mean_x1+std_x1,color='r',alpha=alpha)
plt.plot(np.arange(X1.size(1)),mean_x1,color='r',alpha=0.5,label=r"$x_1$", linewidth=linewidth)
# plt.title('Original System', fontsize=fontsize)
plt_tick_time_0_to_3()
# plt.ylabel(r"$x_1$", fontsize=fontsize)
# plt.xlabel("Time", fontsize=fontsize)
plt.tick_params(labelsize=fontsize)
if subfig==2:
# plt.subplot(232)
plt.fill_between(np.arange(Y1.size(1)),mean_y1-std_y1,mean_y1+std_y1,color='r',alpha=alpha)
plt.plot(np.arange(Y1.size(1)),mean_y1,color='r',alpha=0.5,label=methods[0], linewidth=linewidth)
plt.fill_between(np.arange(mean_z1.size(0)),mean_z1-std_z1,mean_z1+std_z1,color='b',alpha=alpha)
plt.plot(np.arange(mean_z1.size(0)),mean_z1,color='b',alpha=0.5,label=methods[1], linewidth=linewidth)
plt.fill_between(np.arange(W1.size(1)),mean_w1-std_w1,mean_w1+std_w1,color='g',alpha=alpha)
plt.plot(np.arange(W1.size(1)),mean_w1,color='g',alpha=0.5,label=methods[2], linewidth=linewidth)
plt.legend(fontsize=fontsize_legend, framealpha=framealpha, loc=legend_loc)
# plt.title('ES(ICNN), ES(Quad), AS')
plt.xlabel("Time", fontsize=fontsize)
plt_tick_time_0_to_3()
plt_x1_ylim()
plt.tick_params(labelsize=fontsize)
if subfig==3:
# plt.subplot(233)
#Tune time
mean_y1 = mean_y1[mid_init:mid_end]
std_y1 = std_y1[mid_init:mid_end]
mean_z1 = mean_z1[mid_init:mid_end]
std_z1 = std_z1[mid_init:mid_end]
mean_w1 = mean_w1[mid_init:mid_end]
std_w1 = std_w1[mid_init:mid_end]
plt.fill_between(np.arange(mean_y1.size(0)),mean_y1-std_y1,mean_y1+std_y1,color='r',alpha=alpha)
plt.plot(np.arange(mean_y1.size(0)),mean_y1,color='r',alpha=0.5,label='mean value', linewidth=linewidth)
plt.fill_between(np.arange(mean_z1.size(0)),mean_z1-std_z1,mean_z1+std_z1,color='b',alpha=alpha)
plt.plot(np.arange(mean_z1.size(0)),mean_z1,color='b',alpha=0.5,label='mean value', linewidth=linewidth)
plt.fill_between(np.arange(mean_w1.size(0)),mean_w1-std_w1,mean_w1+std_w1,color='g',alpha=alpha)
plt.plot(np.arange(mean_w1.size(0)),mean_w1,color='g',alpha=0.5,label='mean value', linewidth=linewidth)
# plt.title('Time Magnify : [2.4,3.0]')
plt_tick_time_24_to_30(case=1)
plt.tick_params(labelsize=fontsize)
def plot_x2(subfig=1):
#Plot x2 component
mean_x1 = torch.mean(X[:,:,1],0)
std_x1 = torch.std(X[:,:,1],0)
mean_y1 = torch.mean(Y[:,:,1],0)
std_y1 = torch.std(Y[:,:,1],0)
mean_z1 = torch.mean(Z[:,:,1],0)
std_z1 = torch.std(Z[:,:,1],0)
mean_w1 = torch.mean(W[:,:,1],0)
std_w1 = torch.std(W[:,:,1],0)
if subfig==1:
# plt.subplot(256)
plt.fill_between(np.arange(mean_x1.size(0)),mean_x1-std_x1,mean_x1+std_x1,color='g',alpha=alpha)
plt.plot(np.arange(mean_x1.size(0)),mean_x1,color='g',alpha=0.5,label=r"$x_2$", linewidth=linewidth)
# plt.ylabel(r"$x_2$", fontsize=fontsize)
# plt.xlabel("Time", fontsize=fontsize)
plt_tick_time_0_to_3()
plt.tick_params(labelsize=fontsize)
if subfig==2:
# plt.subplot(235)
plt.fill_between(np.arange(mean_y1.size(0)),mean_y1-std_y1,mean_y1+std_y1,color='r',alpha=alpha)
plt.plot(np.arange(mean_y1.size(0)),mean_y1,color='r',alpha=0.5,label=methods[0], linewidth=linewidth)
plt.fill_between(np.arange(mean_z1.size(0)),mean_z1-std_z1,mean_z1+std_z1,color='b',alpha=alpha)
plt.plot(np.arange(mean_z1.size(0)),mean_z1,color='b',alpha=0.5,label=methods[1], linewidth=linewidth)
plt.fill_between(np.arange(mean_w1.size(0)),mean_w1-std_w1,mean_w1+std_w1,color='g',alpha=alpha)
plt.plot(np.arange(mean_w1.size(0)),mean_w1,color='g',alpha=0.5,label=methods[2], linewidth=linewidth)
plt.xlabel("Time", fontsize=fontsize)
plt_tick_time_0_to_3()
plt.legend(fontsize=fontsize_legend, framealpha=framealpha, loc=legend_loc)
plt_x1_ylim()
plt.tick_params(labelsize=fontsize)
if subfig==3:
# plt.subplot(236)
#Tune time
mean_y1 = mean_y1[mid_init:mid_end]
std_y1 = std_y1[mid_init:mid_end]
mean_z1 = mean_z1[mid_init:mid_end]
std_z1 = std_z1[mid_init:mid_end]
mean_w1 = mean_w1[mid_init:mid_end]
std_w1 = std_w1[mid_init:mid_end]
plt.fill_between(np.arange(mean_y1.size(0)),mean_y1-std_y1,mean_y1+std_y1,color='r',alpha=alpha)
plt.plot(np.arange(mean_y1.size(0)),mean_y1,color='r',alpha=0.5,label='mean value', linewidth=linewidth)
plt.fill_between(np.arange(mean_z1.size(0)),mean_z1-std_z1,mean_z1+std_z1,color='b',alpha=alpha)
plt.plot(np.arange(mean_z1.size(0)),mean_z1,color='b',alpha=0.5,label='mean value', linewidth=linewidth)
plt.fill_between(np.arange(mean_w1.size(0)),mean_w1-std_w1,mean_w1+std_w1,color='g',alpha=alpha)
plt.plot(np.arange(mean_w1.size(0)),mean_w1,color='g',alpha=0.5,label='mean value', linewidth=linewidth)
# plt.xlabel("Time", fontsize=fontsize)
plt_tick_time_24_to_30(case=2)
plt.tick_params(labelsize=fontsize)
from matplotlib.patches import ConnectionPatch
import matplotlib.gridspec as gridspec
gs = gridspec.GridSpec(16, 16)
fig = plt.figure(figsize=(18, 12))
# plt.subplot(231)
plt.subplot(gs[0:7, 0:4])
plot_x1(subfig=1)
plot_x2(subfig=1)
plt.legend(fontsize=fontsize_legend, framealpha=framealpha)
plot_grid()
plt.ylim(-10, 10)
plt.yticks([-10, -5, 0, 5, 10], ["$-10$", "$~$", "$0$", "$~$", "$10$"])
# sub_x1 = plt.subplot(223)
sub_x1 = plt.subplot(gs[9:16, 0:7])
plot_x1(subfig=2)
plt.ylabel('$x_1$', fontsize=fontsize, labelpad=labelpad)
sub_x1.fill_between((target_big_X_lim[0],target_big_X_lim[1]), -20, 30, facecolor=shade_color, alpha=0.2)
plot_grid()
plt.ylim(-20, 20)
plt.yticks([-20, -10, 0, 10, 20], ["$-20$", "$~$", "$0$", "$~$", "$20$"])
a = 0.47
# sub_x1_small = fig.add_axes([a, 0.58, 0.1, 0.1])
# sub_x1_small = plt.subplot(232)
sub_x1_small = plt.subplot(gs[0:7, 6:10])
plt.ylabel('$x_1$', fontsize=fontsize, labelpad=labelpad)
# a, b = 5, 10
# sub_x1_small = plt.subplot(a, b, 2*b + b//2)
plot_x1(subfig=3)
plot_grid()
con1 = ConnectionPatch(xyA=(target_big_X_lim[0], 0), coordsA=sub_x1.transData,
xyB=(target_small_X_lim[0], -0.23/2), coordsB=sub_x1_small.transData, color =shade_color)
fig.add_artist(con1)
con2 = ConnectionPatch(xyA=(target_big_X_lim[1], 0), coordsA=sub_x1.transData,
xyB=(target_small_X_lim[1], -0.23/2), coordsB=sub_x1_small.transData, color =shade_color)
fig.add_artist(con2)
# plt.subplot(256)
# plot_x2(subfig=1)
# sub_x1 = plt.subplot(224)
sub_x1 = plt.subplot(gs[9:16, 9:16])
plot_x2(subfig=2)
sub_x1.fill_between((target_big_X_lim[0],target_big_X_lim[1]), -20, 30, facecolor=shade_color, alpha=0.2)
plot_grid()
plt.ylabel('$x_2$', fontsize=fontsize, labelpad=labelpad)
plt.ylim(-20, 20)
plt.yticks([-20, -10, 0, 10, 20], ["$-20$", "$~$", "$0$", "$~$", "$20$"])
# sub_x1_small = plt.subplot(233)
sub_x1_small = plt.subplot(gs[0:7, 12:16])
plot_x2(subfig=3)
plot_grid()
plt.ylabel('$x_2$', fontsize=fontsize, labelpad=labelpad)
con1 = ConnectionPatch(xyA=(target_big_X_lim[0], 0), coordsA=sub_x1.transData,
xyB=(target_small_X_lim[0], -0.23), coordsB=sub_x1_small.transData, color =shade_color)
fig.add_artist(con1)
con2 = ConnectionPatch(xyA=(target_big_X_lim[1], 0), coordsA=sub_x1.transData,
xyB=(target_small_X_lim[1], -0.23), coordsB=sub_x1_small.transData, color =shade_color)
fig.add_artist(con2)
plt.show() | 11,933 | 39.317568 | 114 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/harmonic/table1.py | import numpy as np
import torch
data = torch.load('./data/harmonic/data_long.pt')
# Calculate the data in table1
def L2_norm(st,a):
Y = data[st][torch.tensor([0,1,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19]),:,:]
Y = Y.detach().numpy()
X = np.linalg.norm(Y,axis=2)
Z = np.mean(X,0)
index = np.where(Z<0.05)
print('{} min :'.format(a),np.min(Z))
print('{} convergence time of 0.05:'.format(a), format(index[0][0]*1e-5,'.3f'))
L2_norm('Y','ICNN')
L2_norm('Z','Quad')
L2_norm('W','AS')
| 513 | 24.7 | 83 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/Neural Stochastic Control/harmonic/Control_Nonlinear_Icnn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class ICNN(nn.Module):
def __init__(self, input_shape, layer_sizes, activation_fn):
super(ICNN, self).__init__()
self._input_shape = input_shape
self._layer_sizes = layer_sizes
self._activation_fn = activation_fn
ws = []
bs = []
us = []
prev_layer = input_shape
w = torch.empty(layer_sizes[0], *input_shape)
nn.init.xavier_normal_(w)
ws.append(nn.Parameter(w))
b = torch.empty([layer_sizes[0], 1])
nn.init.xavier_normal_(b)
bs.append(nn.Parameter(b))
for i in range(len(layer_sizes))[1:]:
w = torch.empty(layer_sizes[i], *input_shape)
nn.init.xavier_normal_(w)
ws.append(nn.Parameter(w))
b = torch.empty([layer_sizes[i], 1])
nn.init.xavier_normal_(b)
bs.append(nn.Parameter(b))
u = torch.empty([layer_sizes[i], layer_sizes[i-1]])
nn.init.xavier_normal_(u)
us.append(nn.Parameter(u))
self._ws = nn.ParameterList(ws)
self._bs = nn.ParameterList(bs)
self._us = nn.ParameterList(us)
def forward(self, x):
# x: [batch, data]
if len(x.shape) < 2:
x = x.unsqueeze(0)
else:
data_dims = list(range(1, len(self._input_shape) + 1))
x = x.permute(*data_dims, 0)
z = self._activation_fn(torch.addmm(self._bs[0], self._ws[0], x))
for i in range(len(self._us)):
u = F.softplus(self._us[i])
w = self._ws[i + 1]
b = self._bs[i + 1]
z = self._activation_fn(torch.addmm(b, w, x) + torch.mm(u, z))
return z
class ControlNet(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(ControlNet, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_hidden)
self.layer3 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(x))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
return out
class LyapunovFunction(nn.Module):
def __init__(self,n_input,n_hidden,n_output,input_shape,smooth_relu_thresh=0.1,layer_sizes=[64, 64],lr=3e-4,eps=1e-3):
super(LyapunovFunction, self).__init__()
torch.manual_seed(2)
self._d = smooth_relu_thresh
self._icnn = ICNN(input_shape, layer_sizes, self.smooth_relu)
self._eps = eps
self._control = ControlNet(n_input,n_hidden,n_output)
def forward(self, x):
g = self._icnn(x)
g0 = self._icnn(torch.zeros_like(x))
u = self._control(x)
u0 = self._control(torch.zeros_like(x))
return self.smooth_relu(g - g0) + self._eps * x.pow(2).sum(dim=1), u*x
# return self.smooth_relu(g - g0) + self._eps * x.pow(2).sum(dim=1), u-u0
def smooth_relu(self, x):
relu = x.relu()
# TODO: Is there a clean way to avoid computing both of these on all elements?
sq = (2*self._d*relu.pow(3) -relu.pow(4)) / (2 * self._d**3)
lin = x - self._d/2
return torch.where(relu < self._d, sq, lin)
def lya(ws,bs,us,smooth,x,input_shape):
if len(x.shape) < 2:
x = x.unsqueeze(0)
else:
data_dims = list(range(1, len(input_shape) + 1))
x = x.permute(*data_dims, 0)
z = smooth(torch.addmm(bs[0],ws[0], x))
for i in range(len(us)):
u = F.softplus(us[i])
w = ws[i + 1]
b = bs[i + 1]
z = smooth(torch.addmm(b, w, x) + torch.mm(u, z))
return z
| 3,754 | 34.424528 | 122 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/code_rebuttal/model_free/functions.py | import torch
import torch.nn.functional as F
import numpy as np
import timeit
import argparse
import matplotlib.pyplot as plt
colors = [
[233/256, 110/256, 236/256], # #e96eec
# [0.6, 0.6, 0.2], # olive
# [0.5333333333333333, 0.13333333333333333, 0.3333333333333333], # wine
[255/255, 165/255, 0],
# [0.8666666666666667, 0.8, 0.4666666666666667], # sand
# [223/256, 73/256, 54/256], # #df4936
[107/256, 161/256,255/256], # #6ba1ff
[0.6, 0.4, 0.8], # amethyst
[0.0, 0.0, 1.0], # ao
[0.55, 0.71, 0.0], # applegreen
# [0.4, 1.0, 0.0], # brightgreen
[0.99, 0.76, 0.8], # bubblegum
[0.93, 0.53, 0.18], # cadmiumorange
[11/255, 132/255, 147/255], # deblue
[204/255, 119/255, 34/255], # {ocra}
]
colors = np.array(colors)
def plot_grid():
plt.grid(b=True, which='major', color='gray', alpha=0.6, linestyle='dashdot', lw=1.5)
# minor grid lines
plt.minorticks_on()
plt.grid(b=True, which='minor', color='beige', alpha=0.8, ls='-', lw=1)
# plt.grid(b=True, which='both', color='beige', alpha=0.1, ls='-', lw=1)
pass
class ControlNet(torch.nn.Module):
def __init__(self, n_input, n_hidden, n_output):
super(ControlNet, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden, n_hidden)
self.layer3 = torch.nn.Linear(n_hidden, n_output)
def forward(self, data):
data =data[:,1:2]
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(data))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
x = data
# return out*x*torch.tensor([0.0,1.0,1.0,0.0,0.0,0.0])
return out * x
class Net(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(Net,self).__init__()
self._scontrol = ControlNet(n_input,n_hidden,n_output)
# self._dcontrol = ControlNet(n_input,n_hidden,n_output)
def forward(self,data):
s_u = self._scontrol(data)
# d_u = self._dcontrol(data)
return s_u | 2,097 | 32.301587 | 89 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/code_rebuttal/model_free/run.py | import numpy as np
from scipy import integrate
import torch
import matplotlib.pyplot as plt
import math
import timeit
from scipy.integrate import odeint
from functions import *
def f(x,u=0):
a, b, c = 1, 1, 1
U2 = np.array([0.5, 0.74645887, 1.05370735, 0.38154169, 1.68833014, 0.83746371])
x1, x2, x3, x4, x5, x6 = x+U2
dx1 = 0.5 - a * x1
dx2= 5 * x1 / ((1 + x1) * (1 + x3**4)) - b * x2
dx3= 5 * x4 / ((1 + x4) * (1 + x2**4)) - c * x3
dx4 = 0.5 / (1 + x2**4) - a * x4
dx5 = (x1 * x4 / (1 + x1 * x4) + 4 * x3 / (1 + x3)) / (1 + x2**4) - a * x5
dx6 = (x1 * x4 / (1 + x1 * x4) + 4 * x2 / (1 + x2)) / (1 + x3**4) - a * x6
return np.array([dx1,dx2,dx3,dx4,dx5,dx6])
models = ControlNet(1,6,1)
models.load_state_dict(torch.load('./data/node_S_2.pkl'))
# models = ControlNet(2,12,2)
# models.load_state_dict(torch.load('./data/node_S.pkl'))
def run_0(n,dt,case,seed):
np.random.seed(seed)
U2 = np.array([0.5, 0.74645887, 1.05370735, 0.38154169, 1.68833014, 0.83746371])
x0 = np.array([0.5,-0.9,0.6,-0.6,-0.9,0.5])
X = np.zeros([n,6])
DU = np.zeros([n-1,6])
SU = np.zeros([n-1,6])
X[0,:]=x0
z = np.random.normal(0,1,n) # common noise
# z = np.random.normal(0, 1, [n,6]) # common noise
for i in range(n-1):
x = X[i,:]
df = f(x)
if case == 0:
X[i+1,:] = x+df*dt
if case == 'S':
with torch.no_grad():
input = torch.from_numpy(x).to(torch.float32).unsqueeze(0)
u = models(input).detach().numpy()
X[i+1,:]=x+df*dt
# X[i+1:i+2,1:3] += np.sqrt(dt)*z[i]*(u)
X[i + 1:i + 2, 1:2] += np.sqrt(dt) * z[i] * (u)
return X
'''
data generate
'''
seed = 3
n = 50000
# dt = 0.00001
dt = 0.0003
m = 10
# X = np.zeros([11,5000,6])
# X[0,:] = run_0(5000,0.001,0,0)
# for i in range(10):
# X[i+1,:] = run_0(50000,dt,'S',i)[0:50000:10,:]
# print(i)
# np.save('./data/pin_control_2',X)
'''
test
'''
# X = run_0(n,dt,'S',1)
# for i in range(6):
# plt.plot(np.arange(len(X))*dt,X[:,i],label=r'$x_{}$'.format(i))
# plt.legend()
'''
plot
'''
font_size = 20
def subplot(X,xticks1,xticks2,yticks1,yticks2,ylim,title):
alpha = 0.5
mean_x,std_x,mean_y,std_y=np.mean(X[:,:,0],axis=0),np.std(X[:,:,0],axis=0),np.mean(X[:,:,1],axis=0),np.std(X[:,:,1],axis=0)
length = len(mean_x)
plt.fill_between(np.arange(length),mean_x-std_x,mean_x+std_x,color=colors[0],alpha=alpha)
plt.plot(np.arange(length),mean_x,color=colors[0],label=r'$x$')
plt.fill_between(np.arange(length),mean_y-std_y,mean_y+std_y,color=colors[1],alpha=alpha)
plt.plot(np.arange(length),mean_y,color=colors[1],label=r'$y$')
plot_grid()
plt.legend(fontsize=font_size)
plt.xticks(xticks1,xticks2,fontsize=font_size)
plt.yticks(yticks1,yticks2,fontsize=font_size)
plt.ylim(ylim)
plt.title('{}'.format(title),fontsize=font_size)
plt.xlabel('Time',fontsize=font_size)
def plot(alpha=0.5):
data = np.load('./data/pin_control_2.npy')
plt.subplot(121)
X=data[0,:]
for i in range(6):
plt.plot(np.arange(len(X))*0.001,X[:,i],color=colors[i],label=r'$x_{}$'.format(i))
# plt.legend(fontsize=font_size*0.7,ncol=3)
plt.ylabel('State variables',fontsize=font_size)
plt.xlabel('Time', fontsize=font_size)
plt.yticks([-2, 0, 2],fontsize=font_size)
plt.xticks([0, 2.5, 5.0], fontsize=font_size)
plot_grid()
# plt.legend(fontsize=font_size*0.7 , ncol=6, bbox_to_anchor=(1.5, 1.1))
plt.subplot(122)
X=data[1:,:]
for i in range(6):
x = X[:,:,i]
mean_x = np.mean(x,axis=0)
std_x = np.mean(x,axis=0)
length = len(mean_x)
plt.fill_between(np.arange(length)*0.003, mean_x - std_x, mean_x + std_x, color=colors[i], alpha=alpha)
plt.plot(np.arange(length)*0.003, mean_x, color=colors[i], label=r'$x_{}$'.format(i))
plt.xticks([0,15],fontsize=font_size)
plt.yticks([-2,0,2],fontsize=font_size)
plt.ylim(-2,2)
# plt.ylabel('state variables',fontsize=font_size)
plt.xlabel('Time', fontsize=font_size)
plot_grid()
plot()
plt.show() | 4,155 | 29.335766 | 127 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/code_rebuttal/model_free/NODE.py | # import sys
# sys.path.append('./neural_sde/NODE')
import argparse
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
parser = argparse.ArgumentParser('ODE demo')
parser.add_argument('--method', type=str, choices=['dopri5', 'adams'], default='dopri5')
parser.add_argument('--data_size', type=int, default=1000)
parser.add_argument('--batch_time', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=20)
parser.add_argument('--niters', type=int, default=2000)
parser.add_argument('--test_freq', type=int, default=20)
parser.add_argument('--viz', action='store_true')
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--adjoint', action='store_true')
args = parser.parse_args()
if args.adjoint:
from torchdiffeq import odeint_adjoint as odeint
else:
from torchdiffeq import odeint
# device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
device = torch.device('cpu')
true_y0 = torch.tensor([[0.5, -0.9, 0.6, -0.6, -0.9, 0.5]]).to(device)
# true_y0 = torch.Tensor(10,6).uniform_(-2,2).to(device)
t = torch.linspace(0., 15., args.data_size).to(device)
class Cell_Fate_ODEFunc(nn.Module):
dim = 6
a, b, c = 1, 1, 1
def forward(self, t, x):
# x shape: [1, 6]
dx = torch.zeros_like(x)
U2 = torch.tensor([[0.5, 0.74645887, 1.05370735, 0.38154169, 1.68833014, 0.83746371]])
x = x + U2
x1, x2, x3, x4, x5, x6 = x[:, 0], x[:, 1], x[:, 2], x[:, 3], x[:, 4], x[:, 5]
dx[:, 0] = 0.5 - self.a * x1
dx[:, 1] = 5 * x1 / ((1 + x1) * (1 + x3**4)) - self.b * x2
dx[:, 2] = 5 * x4 / ((1 + x4) * (1 + x2**4)) - self.c * x3
dx[:, 3] = 0.5 / (1 + x2**4) - self.a * x4
dx[:, 4] = (x1 * x4 / (1 + x1 * x4) + 4 * x3 / (1 + x3)) / (1 + x2**4) - self.a * x5
dx[:, 5] = (x1 * x4 / (1 + x1 * x4) + 4 * x2 / (1 + x2)) / (1 + x3**4) - self.a * x6
return dx
with torch.no_grad():
# true_y = odeint(Lambda(), true_y0, t, method='dopri5')
true_y = odeint(Cell_Fate_ODEFunc(), true_y0, t)
def get_batch():
s = torch.from_numpy(
np.random.choice(np.arange(args.data_size - args.batch_time, dtype=np.int64), args.batch_size, replace=False))
batch_y0 = true_y[s] # (M, D)
batch_t = t[:args.batch_time] # (T)
batch_y = torch.stack([true_y[s + i] for i in range(args.batch_time)], dim=0) # (T, M, D)
return batch_y0.to(device), batch_t.to(device), batch_y.to(device)
class ODEFunc(nn.Module):
def __init__(self):
super(ODEFunc, self).__init__()
self.net = nn.Sequential(
nn.Linear(6, 50),
nn.Tanh(),
nn.Linear(50, 6),
)
for m in self.net.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0, std=0.1)
nn.init.constant_(m.bias, val=0)
def forward(self, t, y):
return self.net(y)
class RunningAverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, momentum=0.99):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
if __name__ == '__main__':
ii = 0
func = ODEFunc().to(device)
# optimizer = optim.RMSprop(func.parameters(), lr=1e-3)
optimizer = optim.Adam(func.parameters(), lr=1e-2)
end = time.time()
time_meter = RunningAverageMeter(0.97)
loss_meter = RunningAverageMeter(0.97)
for itr in range(1, args.niters + 1):
optimizer.zero_grad()
batch_y0, batch_t, batch_y = get_batch()
pred_y = odeint(func, batch_y0, batch_t).to(device)
loss = torch.mean(torch.abs(pred_y - batch_y))
loss.backward()
optimizer.step()
time_meter.update(time.time() - end)
loss_meter.update(loss.item())
print(itr, loss)
# if itr % args.test_freq == 0:
# with torch.no_grad():
# pred_y = odeint(func, true_y0, t)
# loss = torch.mean(torch.abs(pred_y - true_y))
# print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss.item()))
# visualize(true_y, pred_y, func, ii)
# ii += 1
# torch.save(func.state_dict(),'./neural_sde/NODE/symmetry.pkl')
end = time.time()
data = func(1.0, true_y)
torch.save(data[:,0,:], './data/node1.pt')
print(data.shape) | 4,670 | 31.213793 | 118 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/code_rebuttal/model_free/NSC_train.py | import torch
import torch.nn.functional as F
import numpy as np
import timeit
import argparse
parser = argparse.ArgumentParser('ODE demo')
parser.add_argument('--N', type=float, default=1000)
parser.add_argument('--num', type=float, default=6)
parser.add_argument('--lr', type=float, default=0.05)
args = parser.parse_args()
class ControlNet(torch.nn.Module):
def __init__(self, n_input, n_hidden, n_output):
super(ControlNet, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden, n_hidden)
self.layer3 = torch.nn.Linear(n_hidden, n_output)
def forward(self, data):
data = data[:,1:2]
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(data))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
x = data
# return out*x*torch.tensor([0.0,1.0,1.0,0.0,0.0,0.0])
return out * x
class Net(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(Net,self).__init__()
self._scontrol = ControlNet(n_input,n_hidden,n_output)
# self._dcontrol = ControlNet(n_input,n_hidden,n_output)
def forward(self,data):
s_u = self._scontrol(data)
# d_u = self._dcontrol(data)
return s_u
def f_(data):
a, b, c = 1, 1, 1
z = torch.zeros_like(data)
U2 = torch.tensor([[0.5, 0.74645887, 1.05370735, 0.38154169, 1.68833014, 0.83746371]])
x = data + U2
for i in range(len(data)):
x1, x2, x3, x4, x5, x6 = x[i,:]
z[i, 0] = 0.5 - a * x1
z[i, 1] = 5 * x1 / ((1 + x1) * (1 + x3 ** 4)) - b * x2
z[i, 2] = 5 * x4 / ((1 + x4) * (1 + x2 ** 4)) - c * x3
z[i, 3] = 0.5 / (1 + x2 ** 4) - a * x4
z[i, 4] = (x1 * x4 / (1 + x1 * x4) + 4 * x3 / (1 + x3)) / (1 + x2 ** 4) - a * x5
z[i, 5] = (x1 * x4 / (1 + x1 * x4) + 4 * x2 / (1 + x2)) / (1 + x3 ** 4) - a * x6
# x,y=data[i,:]
# z[i,:] = torch.tensor([y,G*np.sin(x)/L +(-b*y)/(m*L**2)])#+u[i]
return z
def g_(data,u):
z = torch.zeros_like(data)
for i in range(len(data)):
z[i,:] = 0.0+u[i]
return z
'''
For learning
'''
N = args.N # sample size
D_in = 1 # input dimension
H1 = 6 * D_in # hidden dimension
D_out = 1 # output dimension
torch.manual_seed(10)
# Data = torch.Tensor(N,6).uniform_(-5,5)
Data = torch.load('./data/node1.pt')
# print(Data.shape)
theta = 0.9
out_iters = 0
while out_iters < 1:
# break
start = timeit.default_timer()
model = Net(D_in, H1, D_out)
i = 0
t = 0
max_iters = 200
learning_rate = args.lr
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
while i < max_iters:
s_u = model(Data)
f = f_(Data)[:,1:2]
# g = g_(Data,s_u)[:,1:3]
g = s_u
x = Data[:,1:2]
# loss = (2-theta)*torch.diagonal(torch.mm(x, g.T))**2-torch.diagonal(torch.mm(x,x.T))*torch.diagonal(
# 2*torch.mm(x,f.T)+torch.mm(g,g.T))
loss = (2-theta)*((x*g)**2)-x**2*(2*x*f+g**2)
# L_B = 2*(v-M/2)*f[:,3:4]/h(v)**2+g[:,3:4]**2/h(v)**2+4*g[:,3:4]**2*(v-M/2)**2/h(v)**3 - gamma*torch.log(1+torch.abs(h(v))) # barrier function 1
# L_B = (2*(v-M/2)*f[:,3:4]/h(v)**2+g[:,3:4]**2/h(v)**2+4*g[:,3:4]**2*(v-M/2)**2/h(v)**3)
# lossB = 2*L_B/h(v)-(1-theta)*(2*(v-M/2)*g[:,3:4])**2/h(v)**4
AS_loss = (F.relu(-loss)).mean()
print(i, "AS loss=", AS_loss.item())
optimizer.zero_grad()
AS_loss.backward()
optimizer.step()
if AS_loss < 1e-8:
break
# if AS_loss<0.5:
# optimizer=torch.optim.Adam(model.parameters(),lr=0.005)
i += 1
stop = timeit.default_timer()
print('\n')
print("Total time: ", stop - start)
print("Verified time: ", t)
out_iters += 1
torch.save(model._scontrol.state_dict(),'./data/node_S_2.pkl')
# torch.save(model._dcontrol.state_dict(),'./data/D.pkl')
| 4,004 | 31.04 | 153 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/code_rebuttal/multiple_k/AS.py | import torch
import torch.nn.functional as F
import timeit
import math
class Net(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(Net, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(x))
out = self.layer2(h_1)
return out
def f_value(x):
y = []
for i in range(0,len(x)):
f = [x[i]*math.log(1+abs(x[i]))]
y.append(f)
y = torch.tensor(y)
return y
'''
For learning
'''
N = 4000 # sample size
D_in = 1 # input dimension
H1 = 6 # hidden dimension
D_out = 1 # output dimension
torch.manual_seed(10)
x = torch.Tensor(N, D_in).uniform_(0,50)
theta = 0.9
out_iters = 0
while out_iters < 1:
start = timeit.default_timer()
model = Net(D_in,H1, D_out)
i = 0
t = 0
max_iters = 50
learning_rate = 0.1
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
while i < max_iters:
out = model(x)
g = out*x
f = f_value(x)
loss = (2-theta)*((x*g)**2)-x**2*(2*x*f+g**2)
Lyapunov_risk = (F.relu(-loss)).mean()
print(i, "Lyapunov Risk=",Lyapunov_risk.item())
optimizer.zero_grad()
Lyapunov_risk.backward()
optimizer.step()
i += 1
stop = timeit.default_timer()
print('\n')
print("Total time: ", stop - start)
print("Verified time: ", t)
out_iters+=1
torch.save(model.state_dict(), './data/theta0.9_1d_log_net_100.pkl') | 1,716 | 21.012821 | 72 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/code_rebuttal/multiple_k/functions.py | import numpy as np
import math
import torch
import timeit
from scipy import integrate
import matplotlib.pyplot as plt
start = timeit.default_timer()
np.random.seed(1)
class Net(torch.nn.Module):
def __init__(self, n_input, n_hidden, n_output):
super(Net, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden, n_output)
def forward(self, x):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(x))
out = self.layer2(h_1)
return out
log_model = Net(1,6,1)
log_model.load_state_dict(torch.load('./data/theta0.9_1d_log_net.pkl'))
N = 100000
dt = 0.00001
m = 20
T = 50
def k_list(N,dt,k,m):
x0 = [20.0]
data = torch.zeros([N+1,m])
for r in range(m):
np.random.seed(r * 4 + 1)
X = []
X.append(x0)
z = np.random.normal(0,1,N)
for i in range(N):
x = X[i][0]
new_x = x + x*math.log(1+abs(x))*dt + k*x*math.sqrt(dt)*z[i]
X.append([new_x])
X = torch.tensor(X)
data[:,r] = X[:,0]
return data
def learning_control(N,dt,m):
x0 = [20.0]
data = torch.zeros([N+1,m])
for r in range(m):
X = []
X.append(x0)
np.random.seed(r*4+1)
z = np.random.normal(0,1,N)
for i in range(N):
x = X[i][0]
k = log_model(torch.tensor([X[i]]))
new_x = x + x*math.log(1+abs(x))*dt + k[0]*x*math.sqrt(dt)*z[i]
X.append([new_x])
X = torch.tensor(X)
data[:,r] = X[:,0]
print(r)
return data
def multiple_k(T,N,dt,m):
x0 = [50.0]
data = torch.zeros([T, N + 1, m])
def generate(k):
data = torch.zeros([N+1,m])
for r in range(m):
X = []
X.append(x0)
np.random.seed(r * 4 + 1)
z = np.random.normal(0, 1, N)
for i in range(N):
x = X[i][0]
new_x = x + x * math.log(1 + abs(x)) * dt + k * x * math.sqrt(dt) * z[i]
X.append([new_x])
X = torch.tensor(X)
data[:, r] = X[:, 0]
print(r)
return data
for j in range(T):
k = 0.2*(j+1)
data[j,:]=generate(k)
torch.save(data,'./data/k_table_x0_20.pt')
return data
def stopping_time(j):
data = torch.load('./data/k_table_x0_20_100.pt').numpy()
X = data[j,:]
t_x = 0.0
dt = 0.0001
for i in range(20):
norm_x = np.abs(X[:, i])
ind = np.where(norm_x < 0.1)[0][0] if np.min(norm_x) < 0.1 else int(len(X)) - 1
t_x += ind*dt
print(t_x/20)
return t_x/20
def single_k_energy(j):
data = torch.load('./data/k_table_x0_20_100.pt').numpy()
# data = Data['data']
# X = data[i,:75001,:]
# N = int(len(X))-1
X = data[j,:]
# dt = 0.00001
dt = 0.00001
k = ((j+1)*0.2)**2
gx = k*X**2
# a = np.linspace(0, dt*N, N+1)
v_x = 0
max_norm = 0.0
for i in range(20):
norm_x = np.abs(X[:, i])
ind = np.where(norm_x < 0.1)[0][0] if np.min(norm_x) < 0.1 else int(len(X))-1
a = np.linspace(0, dt * ind, ind + 1)
g_x = gx[:,i]
v_x += integrate.trapz(g_x[0:ind + 1], a)
max_norm += np.sqrt(np.max(gx))
# v_x += integrate.trapz(np.array(g_x), a)
# print(i)
print(v_x/20,max_norm/20)
return v_x/20
'''
generate energy_list for different k
'''
T = 50
energy_list = np.zeros(T)
# time_list = np.zeros(T)
for i in range(T):
energy_list[i] = single_k_energy(i)
# time_list[i] = stopping_time(i)
# np.save('./data/energy_list',energy_list)
# np.save('./data/time_list',time_list)
# energy_list = np.load('./data/energy_list.npy')
plt.plot(np.arange(T),np.log(energy_list))
# plt.axhline(np.log(1438))
# plt.axhline(0.38)
# plt.show()
# Data = torch.load('./data/20seed_learning_control.pt')
# data = Data['data'].detach().numpy()
# Y = data[0,:][:,np.delete(np.arange(20),15)]# Delete the diverge trajectory due to the dt is not small enough in Euler method
# max_norm = 0.0
# for i in range(19):
# g_y = (log_model(torch.tensor(Y[:, i]).unsqueeze(1))[:, 0].detach().numpy() * Y[:, i])**2
# max_norm+=np.sqrt(np.max(g_y))
# print(max_norm)
def k_data():
endpoint = torch.zeros(T)
Data = torch.zeros(T,N+1,m)
for i in range(T):
k = i*0.2+0.2
data = k_list(N,dt,k,m)
endpoint[i] = data[-1].mean()
Data[i,:] = data
print(i)
torch.save({'data':Data,'end':endpoint},'./data/k_table_x0_20.pt')
def learning_data():
# data = learning_control(200000,dt,10)
data = learning_control(100000,dt,20)
# torch.save({'data':data},'./neural_sde/Energy/20_learning_control.pt')
torch.save({'data':data},'./data/20seed_learning_control.pt')
def k_energy_cost():
Data = torch.load('k_table.pt')
data = Data['data']
X = data[29,:75001,:]
N = 75000
dt = 0.00001
gx = 6*2*X**2
a = np.linspace(0, dt*N, N+1)
print(a.shape)
v_x = 0
for i in range(20):
g_x = gx[:,i]
v_x += integrate.trapz(np.array(g_x), a)
print(i)
print(v_x/20)
def energy_cost():
Data = torch.load('./data/20seed_learning_control.pt')
data = Data['data'].detach().numpy()
X = data[1,:]
Y = data[0,:][:,np.delete(np.arange(20),15)]# Delete the diverge trajectory due to the dt is not small enough in Euler method
N = 100000
dt = 0.00001
v_x = 0
v_y = 0
# a = np.linspace(0, dt*N, N+1)
for i in range(Y.shape[1]):
g_x = 36*X[:,i]**2
g_y = (log_model(torch.tensor(Y[:,i]).unsqueeze(1))[:,0].detach().numpy()*Y[:,i])**2
norm_x = np.abs(X[:,i])
norm_y = np.abs(Y[:,i])
ind1 = np.where(norm_x<0.1)[0][0]
ind2 = np.where(norm_y<0.1)[0][0]
a1 = np.linspace(0, dt*ind1, ind1+1)
a2 = np.linspace(0, dt*ind2, ind2+1)
v_x += integrate.trapz(g_x[0:ind1+1], a1)
v_y += integrate.trapz(g_y[0:ind2+1], a2)
print(i)
print(v_x/20,v_y/19)
# X = multiple_k(T,n,dt,m) # generate data
# k_energy_cost()
# energy_cost()
# learning_data()
# k_data()
# learning_data()
stop= timeit.default_timer()
print('time:',stop-start)
| 6,281 | 26.432314 | 129 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/code_rebuttal/multiple_k/plot_appendix.py | import numpy as np
import matplotlib.pyplot as plt
import torch
# import matplotlib
# matplotlib.rcParams['font.sans-serif'] = 'NSimSun,Times New Roman'
# matplotlib.rcParams['text.usetex'] = True
def plot_grid():
plt.grid(b=True, which='major', color='gray', alpha=0.6, linestyle='dashdot', lw=1.5)
# minor grid lines
plt.minorticks_on()
plt.grid(b=True, which='minor', color='beige', alpha=0.8, ls='-', lw=1)
# plt.grid(b=True, which='both', color='beige', alpha=0.1, ls='-', lw=1)
pass
energy = np.load('./data/energy_list.npy')
dt = 0.00001*10
# dt = 0.0001
fontsize = 15
data = torch.load('./data/k_table_x0_20.pt')
print(data.shape)
for i in range(5):
plt.subplot(1,6,i+1)
k=(i+1)*2
X=data[10*(i+1)-1,0:50000:10,:]
mean_data = torch.mean(X,1)
std_data = torch.std(X,1)
plt.fill_between(np.arange(len(X)) * dt,mean_data-std_data,mean_data+std_data,color='r',alpha=0.2)
plt.plot(np.arange(len(X)) * dt,mean_data,color='r',alpha=0.9,label='k={}'.format(k))
# plt.title('ME:{}'.format(38418))
plt.ylim([-100, 200])
plt.xlabel(r'Time', fontsize=fontsize)
if i == 0:
plt.ylabel(r'$x$', fontsize=fontsize)
plt.xticks([0, 0.125, 0.25, 0.375, 0.5],
["$0$", "$~$","$0.25$","$~$", "$0.5$"]
)
plt.yticks([-100, 0, 100, 200])
plt.legend(fontsize=fontsize)
plot_grid()
plt.title('ME:{}'.format(int(energy[10*(i+1)-1])))
plt.tick_params(labelsize=fontsize)
Data = torch.load('./data/20seed_learning_control.pt')
data = Data['data'].detach().numpy()
dt = 0.00001
fig3 = plt.subplot(166)
Y = data[0,:]
Y = Y[:14000,:]
mean_data = np.mean(Y,1)
std_data = np.std(Y,1)
plt.fill_between(np.arange(len(Y))*dt,mean_data-std_data,mean_data+std_data,color='g',alpha=0.2)
plt.plot(np.arange(len(Y))*dt,mean_data,color='g',alpha=0.9,label='Learned control')
# plt.ylim([-100, 200])
plt.xlabel(r'Time', fontsize=fontsize)
plt.xticks([0, 0.075/2, 0.075, (0.075 + 0.15)/2, 0.15],
["$0$", "$~$","$0.075$", "$~$", "$0.15$"]
)
plt.ylabel(r'$x$', fontsize=fontsize)
plt.yticks([-20, 0, 20, 40],
["0", "0.05","0.1", "0.15"]
)
plt.legend(fontsize=fontsize * 0.7)
plot_grid()
plt.tick_params(labelsize=fontsize)
plt.title('ME:{}'.format(1438))
plt.show() | 2,306 | 30.60274 | 102 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/code_rebuttal/mixed_control/functions.py | import numpy as np
from scipy import integrate
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import math
import timeit
from scipy.integrate import odeint
colors = [
[233/256, 110/256, 236/256], # #e96eec
# [0.6, 0.6, 0.2], # olive
# [0.5333333333333333, 0.13333333333333333, 0.3333333333333333], # wine
[255/255, 165/255, 0],
# [0.8666666666666667, 0.8, 0.4666666666666667], # sand
# [223/256, 73/256, 54/256], # #df4936
[107/256, 161/256,255/256], # #6ba1ff
[0.6, 0.4, 0.8], # amethyst
[0.0, 0.0, 1.0], # ao
[0.55, 0.71, 0.0], # applegreen
# [0.4, 1.0, 0.0], # brightgreen
[0.99, 0.76, 0.8], # bubblegum
[0.93, 0.53, 0.18], # cadmiumorange
[11/255, 132/255, 147/255], # deblue
[204/255, 119/255, 34/255], # {ocra}
]
colors = np.array(colors)
def plot_grid():
plt.grid(b=True, which='major', color='gray', alpha=0.6, linestyle='dashdot', lw=1.5)
# minor grid lines
plt.minorticks_on()
plt.grid(b=True, which='minor', color='beige', alpha=0.8, ls='-', lw=1)
# plt.grid(b=True, which='both', color='beige', alpha=0.1, ls='-', lw=1)
pass
class Net(torch.nn.Module):
def __init__(self, n_input, n_hidden, n_output):
super(Net, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden, n_hidden)
self.layer3 = torch.nn.Linear(n_hidden, n_output)
def forward(self, data):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(data))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
x = data[:,0:4]
return out*x
class ControlNet(torch.nn.Module):
def __init__(self, n_input, n_hidden, n_output):
super(ControlNet, self).__init__()
# torch.manual_seed(2)
self.net = nn.Sequential(
nn.Linear(n_input, n_hidden),
nn.ReLU(),
nn.Linear(n_hidden, n_hidden),
nn.ReLU(),
nn.Linear(n_hidden,n_output)
)
for m in self.net.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0, std=0.001)
nn.init.constant_(m.bias, val=0)
def forward(self, x):
return self.net(x) | 2,307 | 31.507042 | 89 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/code_rebuttal/mixed_control/run.py | import numpy as np
from scipy import integrate
import torch
import matplotlib.pyplot as plt
import math
import timeit
from scipy.integrate import odeint
from functions import *
from cvxopt import solvers,matrix
def f(x,u=0):
u,v = x
G = 9.81 # gravity
L = 0.5 # length of the pole
m = 0.15 # ball mass
b = 0.1 # friction
return np.array([v,G*np.sin(u)/L +(-b*v)/(m*L**2)])
models = Net(2,6,2)
models.load_state_dict(torch.load('./data/S.pkl'))
modeld = Net(2,6,2)
modeld.load_state_dict(torch.load('./data/D.pkl'))
modelmd = Net(2,6,2)
modelmd.load_state_dict(torch.load('./data/MD.pkl'))
modelms = Net(2,6,2)
modelms.load_state_dict(torch.load('./data/MS.pkl'))
def run_0(n,dt,case,seed):
np.random.seed(seed)
x0 = np.array([3.0,-4.0])
X = np.zeros([n,2])
DU = np.zeros([n-1,2])
SU = np.zeros([n-1,2])
X[0,:]=x0
z = np.random.normal(0,1,n) # common noise
# z = np.random.normal(0,1,[n,4]) # uncorrelated noise
for i in range(n-1):
x = X[i,:]
df = f(x)
if case == 0:
X[i+1,:] = x+df*dt#+()*(dt*z[i]**2-dt)/(2*np.sqrt(dt))
if case == 'S':
with torch.no_grad():
input = torch.from_numpy(x).to(torch.float32).unsqueeze(0)
u = models(input).detach().numpy()
X[i+1,:]=x+df*dt+np.sqrt(dt)*z[i]*(u)
SU[i,:] = u
if case == 'D':
with torch.no_grad():
input = torch.from_numpy(x).to(torch.float32).unsqueeze(0)
u = modeld(input).detach().numpy()
X[i + 1, :] = x + (df+u) * dt
DU[i, :] = u
if case == 'M':
with torch.no_grad():
input = torch.from_numpy(x).to(torch.float32).unsqueeze(0)
d_u = modelmd(input).detach().numpy()
s_u = modelms(input).detach().numpy()
X[i+1,:]=x+(df+d_u)*dt+np.sqrt(dt)*z[i]*(s_u)
DU[i,:] = d_u
SU[i,:] = s_u
return X,DU,SU
'''
data generate
'''
seed = 3
n = 50000
dt = 0.00001
m = 10
# X,DU,SU = np.zeros([m,n,2]),np.zeros([m,n-1,2]),np.zeros([m,n-1,2])
# for i in range(m):
# X[i,:],DU[i,:],SU[i,:] = run_0(n,dt,'D',2*i+1)
# print(i)
# np.save('./data/S.npy',{'X':X,'DU':DU,'SU':SU}) # (5000,0.0001)
# np.save('./data/M.npy',{'X':X,'DU':DU,'SU':SU}) # throw out 2nd trajectory (5000,0.0001)
# np.save('./data/D.npy',{'X':X,'DU':DU,'SU':SU})
def energy(U,n=5000,dt=0.0001):
n = n-1
a=np.linspace(0,dt*(n-1),n)
e = 0.0
for i in range(len(U)):
e += integrate.trapz(np.array(np.sum(U[i,:]**2,axis=1)),a)
return e/float(len(U))
def stop_time(X,delta=0.001,dt=0.0001):
time = 0
for i in range(len(X)):
norm_x = np.sqrt(X[i,:,0]**2+X[i,:,1]**2)
index = np.where(norm_x<delta)
time += index[0][0]
return time/float(len(X))*dt
def minima(X):
min_x = 0
for i in range(len(X)):
norm_x = np.sqrt(X[i,:,0]**2+X[i,:,1]**2)
min_x += np.min(norm_x)
print(i,np.min(norm_x))
return min_x/float(len(X))
'''
plot
'''
font_size = 20
def subplot(X,xticks1,xticks2,yticks1,yticks2,ylim,title):
alpha = 0.5
mean_x,std_x,mean_y,std_y=np.mean(X[:,:,0],axis=0),np.std(X[:,:,0],axis=0),np.mean(X[:,:,1],axis=0),np.std(X[:,:,1],axis=0)
length = len(mean_x)
plt.fill_between(np.arange(length),mean_x-std_x,mean_x+std_x,color=colors[0],alpha=alpha)
plt.plot(np.arange(length),mean_x,color=colors[0],label=r'$x$')
plt.fill_between(np.arange(length),mean_y-std_y,mean_y+std_y,color=colors[1],alpha=alpha)
plt.plot(np.arange(length),mean_y,color=colors[1],label=r'$y$')
plot_grid()
plt.legend(fontsize=font_size)
plt.xticks(xticks1,xticks2,fontsize=font_size)
plt.yticks(yticks1,yticks2,fontsize=font_size)
plt.ylim(ylim)
plt.title('{}'.format(title),fontsize=font_size)
plt.xlabel('Time',fontsize=font_size)
def plot():
plt.subplot(131)
data = np.load('./data/D.npy',allow_pickle=True).item()
X,DU,SU = data['X'],data['DU'],data['SU']
X = X[:, 0:n:10, :]
subplot(X,[0,2000,4000],[0,0.2,0.4],[-2,0,2,4],[-2,0,2,4],[-2,5],'deterministic')
plt.ylabel('state variables',fontsize=font_size)
plt.title('ME:{}'.format(int(energy(DU+SU,n,dt))),fontsize=font_size)
plt.subplot(132)
data = np.load('./data/M.npy',allow_pickle=True).item()
X,DU,SU = data['X'],data['DU'],data['SU']
X = X[:,0:31000:10,:]
subplot(X,[0,1500,3000],[0,0.15,0.3],[0,1,2],[0,'',2],[-0.2,2.5],'mix')
plt.title('ME:{}'.format(int(energy(DU+SU,n,dt))),fontsize=font_size)
plt.subplot(133)
data = np.load('./data/S.npy',allow_pickle=True).item()
X,DU,SU = data['X'],data['DU'],data['SU']
X = X[:,0:31000:10,:]
subplot(X,[0,1500,3000],[0,0.15,0.3],[0,1,2,3],[0,1,2,3],[-0.2,2.5],'stochastic')
plt.title('ME:{}'.format(int(energy(DU+SU,n,dt))),fontsize=font_size)
plot()
plt.show() | 4,956 | 29.598765 | 127 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/code_rebuttal/mixed_control/NSC_train.py | import torch
import torch.nn.functional as F
import numpy as np
import timeit
import argparse
parser = argparse.ArgumentParser('ODE demo')
parser.add_argument('--N', type=float, default=1000)
parser.add_argument('--num', type=float, default=2)
parser.add_argument('--lr', type=float, default=0.05)
args = parser.parse_args()
class ControlNet(torch.nn.Module):
def __init__(self, n_input, n_hidden, n_output):
super(ControlNet, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden, n_hidden)
self.layer3 = torch.nn.Linear(n_hidden, n_output)
def forward(self, data):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(data))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
x = data
return out * x
class Net(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(Net,self).__init__()
self._scontrol = ControlNet(n_input,n_hidden,n_output)
self._dcontrol = ControlNet(n_input,n_hidden,n_output)
def forward(self,data):
s_u = self._scontrol(data)
d_u = self._dcontrol(data)
return d_u,s_u
def f_(data,u):
G = 9.81 # gravity
L = 0.5 # length of the pole
m = 0.15 # ball mass
b = 0.1 # friction
z = torch.zeros_like(data)
for i in range(len(data)):
x,y=data[i,:]
z[i,:] = torch.tensor([y,G*np.sin(x)/L +(-b*y)/(m*L**2)])#+u[i]
return z
def g_(data,u):
z = torch.zeros_like(data)
for i in range(len(data)):
z[i,:] = 0.0+u[i]
return z
'''
For learning
'''
N = args.N # sample size
D_in = 2 # input dimension
H1 = 3 * D_in # hidden dimension
D_out = 2 # output dimension
torch.manual_seed(10)
Data = torch.Tensor(N,2).uniform_(-10,10)
theta = 0.8
out_iters = 0
while out_iters < 1:
# break
start = timeit.default_timer()
model = Net(D_in, H1, D_out)
i = 0
t = 0
max_iters = 200
learning_rate = args.lr
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
while i < max_iters:
d_u,s_u = model(Data)
f = f_(Data,d_u)
g = g_(Data,s_u)
x = Data
# loss = (2-theta)*torch.diagonal(torch.mm(x, g.T))**2-torch.diagonal(torch.mm(x,x.T))*torch.diagonal(
# 2*torch.mm(x,f.T)+torch.mm(g,g.T))
loss = (2-theta)*((x*g)**2)-x**2*(2*x*f+g**2)
# L_B = 2*(v-M/2)*f[:,3:4]/h(v)**2+g[:,3:4]**2/h(v)**2+4*g[:,3:4]**2*(v-M/2)**2/h(v)**3 - gamma*torch.log(1+torch.abs(h(v))) # barrier function 1
# L_B = (2*(v-M/2)*f[:,3:4]/h(v)**2+g[:,3:4]**2/h(v)**2+4*g[:,3:4]**2*(v-M/2)**2/h(v)**3)
# lossB = 2*L_B/h(v)-(1-theta)*(2*(v-M/2)*g[:,3:4])**2/h(v)**4
AS_loss = (F.relu(-loss)).mean()
print(i, "AS loss=", AS_loss.item())
optimizer.zero_grad()
AS_loss.backward()
optimizer.step()
if AS_loss < 1e-8:
break
# if AS_loss<0.5:
# optimizer=torch.optim.Adam(model.parameters(),lr=0.005)
i += 1
stop = timeit.default_timer()
print('\n')
print("Total time: ", stop - start)
print("Verified time: ", t)
out_iters += 1
torch.save(model._scontrol.state_dict(),'./data/node_S.pkl')
# torch.save(model._dcontrol.state_dict(),'./data/D.pkl')
| 3,379 | 28.137931 | 153 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/code_rebuttal/comparison/lqr.py | import numpy as np
from cvxopt import solvers,matrix
import matplotlib.pyplot as plt
import torch
def harmonic(n,dt):
x0 = np.array([2.0,2.0])
X = np.zeros([n,2])
X[0,:]=x0
z = np.random.normal(0, 1, n)
for i in range(n-1):
x1,x2 = X[i,:]
X[i+1,0] = x1 + (x2-4.45*x1-0.09*x2)*dt
X[i+1,1] = x2 + (-x1-x2-0.09*x1-3.6*x2)*dt+(-3*x1+2.15*x2)*np.sqrt(dt)*z[i]
return X
n = 6000
dt = 0.0001
X = np.zeros([10,n,2])
for i in range(10):
np.random.seed(20*i)
X[i,:] = harmonic(n,dt)
np.save('lqr.npy',X)
# X = harmonic(n,dt)
# plt.plot(np.arange(len(X)),X[:,0])
# plt.plot(np.arange(len(X)),X[:,1])
# plt.show()
| 662 | 21.1 | 83 | py |
Neural-Stochastic-Control | Neural-Stochastic-Control-main/code_rebuttal/comparison/run.py | import numpy as np
from cvxopt import solvers,matrix
import matplotlib.pyplot as plt
import torch
import seaborn as sns
class ControlNet(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(ControlNet,self).__init__()
torch.manual_seed(2)
self.layer1=torch.nn.Linear(n_input,n_hidden)
self.layer2=torch.nn.Linear(n_hidden,n_hidden)
self.layer3=torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid=torch.nn.ReLU()
h_1=sigmoid(self.layer1(x))
h_2=sigmoid(self.layer2(h_1))
out=self.layer3(h_2)
return out
def qp(x1,x2,epi=0.1,p=10.0):
P = matrix(np.diag([2.0,2.0,2*p]))
q = matrix([0.0,0.0,0.0])
G = matrix(np.array([[x1,x2,-1.0]]))
h = matrix([(-3.0*x1+2.15*x2)**2/2-x2**2-(x1**2+x2**2)/(2*epi)]) # 在Lie算子里加入V/epi项
# h = matrix([(-3.0*x1+2.15*x2)**2/2-x2**2])
solvers.options['show_progress']=False
sol=solvers.qp(P,q,G,h) # 调用优化函数solvers.qp求解
u =np.array(sol['x'])
return u
def osqp(x1,x2,epi=0.1,p=10.0):
P = matrix(np.diag([2.0,2.0,2*p]))
q = matrix([0.0,0.0,0.0])
G = matrix(np.array([[3*x1+x2,x1+3*x2,-1.0]]))
h = matrix([x1**2+x1*x2+2*x2**2-(3*x1**2+2*x1*x2+3*x2**2)/(2*epi)-3*(-3.0*x1+2.15*x2)**2/2])
solvers.options['show_progress']=False
sol=solvers.qp(P,q,G,h) # 调用优化函数solvers.qp求解
u =np.array(sol['x'])
return u
model = ControlNet(2,6,2)
model.load_state_dict(torch.load('icnn_net.pkl'))
def harmonic(n,dt,case):
x0 = np.array([-2.0,2.0])
X = np.zeros([n,2])
X[0,:]=x0
z = np.random.normal(0,1,n)
for i in range(n-1):
x1,x2 = X[i,:]
if case != 3:
if case == 0:
u1,u2,d = np.zeros(3)
if case == 1:
u1,u2,d = qp(x1,x2)
if case == 2:
u1,u2,d=osqp(x1,x2)
X[i+1,0] = x1 + (x2+u1)*dt
X[i+1,1] = x2 + (-x1-x2+u2)*dt+(-3*x1+2.15*x2)*np.sqrt(dt)*z[i]
if case == 3:
with torch.no_grad():
u = model(torch.from_numpy(X[i,:]).to(torch.float32))
u = u.detach().numpy()
u1,u2 = u[0],u[1]
X[i+1,0]=x1+(x2)*dt + np.sqrt(dt)*z[i]*u1*x1
X[i+1,1]=x2+(-x1-x2)*dt+(-3*x1+2.15*x2+u2*x2)*np.sqrt(dt)*z[i]
if i%3000 == 0:
print(i,u1,u2)
return X
n = 4000
dt = 0.00001
font_size=20
X = np.zeros([10,n,2])
# for i in range(10):
# np.random.seed(20*i)
# X[i,:] = harmonic(n,dt,3)
# # np.save('qp.npy',X)
# # X = np.load('ES.npy')
# plt.plot(np.arange(n),np.mean(X[:,:,0],axis=0))
# plt.plot(np.arange(n),np.mean(X[:,:,1],axis=0))
def plot_grid():
plt.grid(b=True, which='major', color='gray', alpha=0.6, linestyle='dashdot', lw=1.5)
# minor grid lines
plt.minorticks_on()
plt.grid(b=True, which='minor', color='beige', alpha=0.8, ls='-', lw=1)
# plt.grid(b=True, which='both', color='beige', alpha=0.1, ls='-', lw=1)
pass
colors = [
[233/256, 110/256, 236/256], # #e96eec
# [0.6, 0.6, 0.2], # olive
# [0.5333333333333333, 0.13333333333333333, 0.3333333333333333], # wine
[255/255, 165/255, 0],
# [0.8666666666666667, 0.8, 0.4666666666666667], # sand
# [223/256, 73/256, 54/256], # #df4936
[107/256, 161/256,255/256], # #6ba1ff
[0.6, 0.4, 0.8], # amethyst
[0.0, 0.0, 1.0], # ao
[0.55, 0.71, 0.0], # applegreen
# [0.4, 1.0, 0.0], # brightgreen
[0.99, 0.76, 0.8], # bubblegum
[0.93, 0.53, 0.18], # cadmiumorange
[11/255, 132/255, 147/255], # deblue
[204/255, 119/255, 34/255], # {ocra}
]
colors = np.array(colors)
def plot1(alpha=0.1):
X1 = np.load('ES.npy')
X1 = X1[:, 0:40000:10, :]
X2 = np.load('qp.npy')[:, :4000, :]
X3 = np.load('osqp.npy')[:, :4000, :]
X4 = np.load('lqr.npy')[:, :4000, :]
plt.subplot(144)
plt.fill_between(np.arange(n), np.mean(X1[:, :, 0], 0) - np.std(X1[:, :, 0], 0),
np.mean(X1[:, :, 0], 0) + np.std(X1[:, :, 0], 0),
color='r', alpha=alpha)
plt.plot(np.arange(n), np.mean(X1[:, :, 0], axis=0), color='r', label=r'$x_1$')
plt.fill_between(np.arange(n), np.mean(X1[:, :, 1], 0) - np.std(X1[:, :, 1], 0),
np.mean(X1[:, :, 1], 0) + np.std(X1[:, :, 1], 0),
color='r', alpha=alpha)
plt.plot(np.arange(n), np.mean(X1[:, :, 1], axis=0), color='orange', label=r'$x_2$')
plt.xticks([0, 2000, 4000], [0, 0.2, 0.4])
plt.xlabel(r'$t$', fontsize=font_size)
plt.ylabel(r'$x_1$', fontsize=font_size)
plt.ylim(-4, 4.0)
plt.legend(loc=4)
plt.title('ES+ICNN', fontsize=font_size)
plot_grid()
plt.subplot(142)
plt.fill_between(np.arange(n), np.mean(X2[:, :, 0], 0) - np.std(X2[:, :, 0], 0),
np.mean(X2[:, :, 0], 0) + np.std(X2[:, :, 0], 0),
color='b', alpha=alpha)
plt.plot(np.arange(n), np.mean(X2[:, :, 0], axis=0), color='r', label=r'$x_1$')
plt.fill_between(np.arange(n), np.mean(X2[:, :, 1], 0) - np.std(X2[:, :, 1], 0),
np.mean(X2[:, :, 1], 0) + np.std(X2[:, :, 1], 0),
color='b', alpha=alpha)
plt.plot(np.arange(n), np.mean(X2[:, :, 1], axis=0), color='orange', label=r'$x_2$')
plt.xticks([0, 2000, 4000], [0, 0.2, 0.4])
plt.xlabel(r'$t$', fontsize=font_size)
plt.ylabel(r'$x_1$', fontsize=font_size)
plt.ylim(-4, 4.0)
plt.legend(loc=4)
plt.title('HDSCLF',fontsize=font_size)
plot_grid()
plt.subplot(143)
plt.fill_between(np.arange(n), np.mean(X3[:, :, 0], 0) - np.std(X3[:, :, 0], 0),
np.mean(X3[:, :, 0], 0) + np.std(X3[:, :, 0], 0),
color='g', alpha=alpha)
plt.plot(np.arange(n), np.mean(X3[:, :, 0], axis=0), color='r', label=r'$x_1$')
plt.fill_between(np.arange(n), np.mean(X3[:, :, 1], 0) - np.std(X3[:, :, 1], 0),
np.mean(X3[:, :, 1], 0) + np.std(X3[:, :, 1], 0),
color='g', alpha=alpha)
plt.plot(np.arange(n), np.mean(X3[:, :, 1], axis=0), color='orange', label=r'$x_2$')
plt.xticks([0, 2000, 4000], [0, 0.2, 0.4])
plt.xlabel(r'$t$', fontsize=font_size)
plt.ylabel(r'$x_1$', fontsize=font_size)
plt.ylim(-4, 4.0)
plt.legend(loc=4)
plt.title('BALSA', fontsize=font_size)
plot_grid()
plt.subplot(141)
plt.fill_between(np.arange(n), np.mean(X4[:, :, 0], 0) - np.std(X4[:, :, 0], 0),
np.mean(X4[:, :, 0], 0) + np.std(X4[:, :, 0], 0),
color='orange', alpha=alpha)
plt.plot(np.arange(n), np.mean(X4[:, :, 0], axis=0), color='r', label=r'$x_1$')
plt.fill_between(np.arange(n), np.mean(X4[:, :, 1], 0) - np.std(X4[:, :, 1], 0),
np.mean(X4[:, :, 1], 0) + np.std(X4[:, :, 1], 0),
color='orange', alpha=alpha)
plt.plot(np.arange(n), np.mean(X4[:, :, 1], axis=0), color='orange', label=r'$x_2$')
plt.xticks([0, 2000, 4000], [0, 0.2, 0.4])
plt.xlabel(r'$t$', fontsize=font_size)
plt.ylabel(r'$x_1$', fontsize=font_size)
plt.ylim(-4, 4.0)
plt.legend(loc=4)
plt.title('LQR', fontsize=font_size)
plot_grid()
def plot2(alpha=0.1):
X1 = np.load('ES.npy')
X1 = X1[:,0:40000:10,:]
X2 = np.load('qp.npy')[:,:4000,:]
X3 = np.load('osqp.npy')[:,:4000,:]
X4 = np.load('lqr.npy')[:,:4000,:]
plt.subplot(121)
plt.fill_between(np.arange(n),np.mean(X1[:,:,0],0)-np.std(X1[:,:,0],0),np.mean(X1[:,:,0],0)+np.std(X1[:,:,0],0),
color=colors[0],alpha=alpha)
plt.plot(np.arange(n),np.mean(X1[:,:,0],axis=0),color=colors[0],label='ES+ICNN')
plt.fill_between(np.arange(n),np.mean(X2[:,:,0],0)-np.std(X2[:,:,0],0),np.mean(X2[:,:,0],0)+np.std(X2[:,:,0],0),
color=colors[1],alpha=alpha)
plt.plot(np.arange(n),np.mean(X2[:,:,0],axis=0),color=colors[1],label='HDSCLF')
plt.fill_between(np.arange(n),np.mean(X3[:,:,0],0)-np.std(X3[:,:,0],0),np.mean(X3[:,:,0],0)+np.std(X3[:,:,0],0),
color=colors[2],alpha=alpha)
plt.plot(np.arange(n),np.mean(X3[:,:,0],axis=0),color=colors[2],label='BALSA')
plt.fill_between(np.arange(n),np.mean(X4[:,:,0],0)-np.std(X4[:,:,0],0),np.mean(X4[:,:,0],0)+np.std(X4[:,:,0],0),
color=colors[5],alpha=alpha)
plt.plot(np.arange(n),np.mean(X4[:,:,0],axis=0),color=colors[5],label='LQR')
plt.xticks([0,2000,4000],[0,0.2,0.4], fontsize=font_size)
plt.xlabel('Time',fontsize=font_size)
plt.ylabel(r'$x_1$',fontsize=font_size)
plt.yticks([-3,0,3],fontsize=font_size)
plt.ylim(-3,3.0)
# plt.legend(loc=4, fontsize=font_size*0.6,)
# plt.legend(fontsize=font_size * 0.7, ncol=4, bbox_to_anchor=(1.5, 1.1))
plot_grid()
plt.subplot(122)
plt.fill_between(np.arange(n),np.mean(X1[:,:,1],0)-np.std(X1[:,:,1],0),np.mean(X1[:,:,1],0)+np.std(X1[:,:,1],0),
color=colors[0],alpha=alpha)
plt.plot(np.arange(n),np.mean(X1[:,:,1],axis=0),color=colors[0],label='ES+ICNN')
plt.fill_between(np.arange(n),np.mean(X2[:,:,1],0)-np.std(X2[:,:,1],0),np.mean(X2[:,:,1],0)+np.std(X2[:,:,1],0),
color=colors[1],alpha=alpha)
plt.plot(np.arange(n),np.mean(X2[:,:,1],axis=0),color=colors[1],label='HDSCLF')
plt.fill_between(np.arange(n),np.mean(X3[:,:,1],0)-np.std(X3[:,:,1],0),np.mean(X3[:,:,1],0)+np.std(X3[:,:,1],0),
color=colors[2],alpha=alpha)
plt.plot(np.arange(n),np.mean(X3[:,:,1],axis=0),color=colors[2],label='BALSA')
plt.fill_between(np.arange(n),np.mean(X4[:,:,1],0)-np.std(X4[:,:,1],0),np.mean(X4[:,:,1],0)+np.std(X4[:,:,1],0),
color=colors[5],alpha=alpha)
plt.plot(np.arange(n),np.mean(X4[:,:,1],axis=0),color=colors[5],label='LQR')
plt.xticks([0,2000,4000],[0,0.2,0.4], fontsize=font_size)
# plt.legend(loc=1, fontsize=font_size*0.6)
plt.xlabel('Time',fontsize=font_size)
plt.ylabel(r'$x_2$',fontsize=font_size)
plt.yticks([ 0, 6], fontsize=font_size)
plt.ylim(-1,6)
plot_grid()
# plot1()
plot2()
plt.show() | 10,101 | 40.572016 | 116 | py |
MixLacune | MixLacune-main/process-lacunes.py | # -*- coding: utf-8 -*-
import os
import torch
import torchvision
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import SimpleITK as sitk
import glob
import torch.nn as nn
import nibabel as nib
import shutil
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
torch.cuda.set_device(0)
test_data_path = glob.glob(f'input_data/**/')
for x in range(len(test_data_path)):
t1_path = glob.glob(test_data_path[x]+'/*T1*')
t2_path = glob.glob(test_data_path[x]+'/*T2*')
flair_path = glob.glob(test_data_path[x]+'/*_FLAIR*')
sub_no = str(t1_path[0])
sub_no = sub_no.rsplit('/', 1)[-1][0:7]
print("Loading: T1, T2, Flair\n")
im = sitk.ReadImage(t1_path[0])
#-------------------Functions------------------------------
def zscore_normalize(img, mask=None):
"""
normalize a target image by subtracting the mean of the whole brain
and dividing by the standard deviation
Args:
img (nibabel.nifti1.Nifti1Image): target MR brain image
mask (nibabel.nifti1.Nifti1Image): brain mask for img
Returns:
normalized (nibabel.nifti1.Nifti1Image): img with WM mean at norm_value
"""
img_data = img.get_fdata()
if mask is not None and not isinstance(mask, str):
mask_data = mask.get_fdata()
elif mask == 'nomask':
mask_data = img_data == img_data
else:
mask_data = img_data > img_data.mean()
logical_mask = mask_data > 0. # force the mask to be logical type
mean = img_data[logical_mask].mean()
std = img_data[logical_mask].std()
normalized = nib.Nifti1Image((img_data - mean) / std, img.affine, img.header)
return normalized
def read_img(path):
nib_img = nib.load(path)
normal = zscore_normalize(nib_img)
normal = normal.get_fdata()
normal = normal.astype(np.float32)
img_as_tensor = torch.from_numpy(normal)
img_as_tensor = img_as_tensor.permute(2,1,0)
img_as_tensor = img_as_tensor.unsqueeze(1)
return img_as_tensor
def extract_patches_2d(img,patch_shape,step=[1.0,1.0],batch_first=False):
patch_H, patch_W = patch_shape[0], patch_shape[1]
if(img.size(2)<patch_H):
num_padded_H_Top = (patch_H - img.size(2))//2
num_padded_H_Bottom = patch_H - img.size(2) - num_padded_H_Top
padding_H = nn.ConstantPad2d((0,0,num_padded_H_Top,num_padded_H_Bottom),0)
img = padding_H(img)
if(img.size(3)<patch_W):
num_padded_W_Left = (patch_W - img.size(3))//2
num_padded_W_Right = patch_W - img.size(3) - num_padded_W_Left
padding_W = nn.ConstantPad2d((num_padded_W_Left,num_padded_W_Right,0,0),0)
img = padding_W(img)
step_int = [0,0]
step_int[0] = int(patch_H*step[0]) if(isinstance(step[0], float)) else step[0]
step_int[1] = int(patch_W*step[1]) if(isinstance(step[1], float)) else step[1]
patches_fold_H = img.unfold(2, patch_H, step_int[0])
if((img.size(2) - patch_H) % step_int[0] != 0):
patches_fold_H = torch.cat((patches_fold_H,img[:,:,-patch_H:,].permute(0,1,3,2).unsqueeze(2)),dim=2)
patches_fold_HW = patches_fold_H.unfold(3, patch_W, step_int[1])
if((img.size(3) - patch_W) % step_int[1] != 0):
patches_fold_HW = torch.cat((patches_fold_HW,patches_fold_H[:,:,:,-patch_W:,:].permute(0,1,2,4,3).unsqueeze(3)),dim=3)
patches = patches_fold_HW.permute(2,3,0,1,4,5)
patches = patches.reshape(-1,img.size(0),img.size(1),patch_H,patch_W)
#patches = patches[:,0,:,:,:]
if(batch_first):
patches = patches.permute(1,0,2,3,4)
patches = patches[0,:,:,:,:]
#patches = patches[0,:,:,:,:]
return patches
def reconstruct_from_patches_2d(patches,img_shape,step=[1.0,1.0],batch_first=False):
patches = patches.unsqueeze(1)
if(batch_first):
patches = patches.permute(1,0,2,3,4)
patch_H, patch_W = patches.size(3), patches.size(4)
img_size = (patches.size(1), patches.size(2),max(img_shape[0], patch_H), max(img_shape[1], patch_W))
step_int = [0,0]
step_int[0] = int(patch_H*step[0]) if(isinstance(step[0], float)) else step[0]
step_int[1] = int(patch_W*step[1]) if(isinstance(step[1], float)) else step[1]
nrow, ncol = 1 + (img_size[-2] - patch_H)//step_int[0], 1 + (img_size[-1] - patch_W)//step_int[1]
r_nrow = nrow + 1 if((img_size[2] - patch_H) % step_int[0] != 0) else nrow
r_ncol = ncol + 1 if((img_size[3] - patch_W) % step_int[1] != 0) else ncol
patches = patches.reshape(r_nrow,r_ncol,img_size[0],img_size[1],patch_H,patch_W)
img = torch.zeros(img_size, device = patches.device)
overlap_counter = torch.zeros(img_size, device = patches.device)
for i in range(nrow):
for j in range(ncol):
img[:,:,i*step_int[0]:i*step_int[0]+patch_H,j*step_int[1]:j*step_int[1]+patch_W] += patches[i,j,]
overlap_counter[:,:,i*step_int[0]:i*step_int[0]+patch_H,j*step_int[1]:j*step_int[1]+patch_W] += 1
if((img_size[2] - patch_H) % step_int[0] != 0):
for j in range(ncol):
img[:,:,-patch_H:,j*step_int[1]:j*step_int[1]+patch_W] += patches[-1,j,]
overlap_counter[:,:,-patch_H:,j*step_int[1]:j*step_int[1]+patch_W] += 1
if((img_size[3] - patch_W) % step_int[1] != 0):
for i in range(nrow):
img[:,:,i*step_int[0]:i*step_int[0]+patch_H,-patch_W:] += patches[i,-1,]
overlap_counter[:,:,i*step_int[0]:i*step_int[0]+patch_H,-patch_W:] += 1
if((img_size[2] - patch_H) % step_int[0] != 0 and (img_size[3] - patch_W) % step_int[1] != 0):
img[:,:,-patch_H:,-patch_W:] += patches[-1,-1,]
overlap_counter[:,:,-patch_H:,-patch_W:] += 1
img /= overlap_counter
if(img_shape[0]<patch_H):
num_padded_H_Top = (patch_H - img_shape[0])//2
num_padded_H_Bottom = patch_H - img_shape[0] - num_padded_H_Top
img = img[:,:,num_padded_H_Top:-num_padded_H_Bottom,]
if(img_shape[1]<patch_W):
num_padded_W_Left = (patch_W - img_shape[1])//2
num_padded_W_Right = patch_W - img_shape[1] - num_padded_W_Left
img = img[:,:,:,num_padded_W_Left:-num_padded_W_Right]
return img
m = nn.Upsample(scale_factor=4, mode='nearest')
d = nn.Upsample(scale_factor=0.25, mode='nearest')
#-------------------Load volume------------------------------
t1 = read_img(t1_path[0])
t2 = read_img(t2_path[0])
flair = read_img(flair_path[0])
height = t1.shape[2]
width = t1.shape[3]
tensor = torch.cat(( t1,t2,flair),1)
print("Volume created\n")
#-------------------Prevalence map------------------------------
print("Starting the registration\n")
def register():
import os
import elastix
import imageio
import elastix
import numpy as np
import imageio
import os
import SimpleITK as sitk
def change_parameter(input_path, old_text, new_text, output_path):
"""
replaces the old_text to the next_text in parameter files
Parameters
----------
input_path : str
parameter file path to be changed.
old_text : str
old text.
new_text : str
new text.
output_path : str
changed paramter file path.
Returns
-------
None.
"""
#check if input_path exists
if not os.path.exists(input_path):
print(input_path + ' does not exist.')
a_file = open(input_path)
list_of_lines = a_file.readlines()
for line in range(0,len(list_of_lines)):
if (list_of_lines[line] == old_text):
list_of_lines[line] = new_text
a_file = open(output_path, 'w')
a_file.writelines(list_of_lines)
a_file.close()
# IMPORTANT: these paths may differ on your system, depending on where
# Elastix has been installed. Please set accordingly.
#ELASTIX_PATH = os.path.join('elastix-5.0.1-linux/bin/elastix')
#TRANSFORMIX_PATH = os.path.join('elastix-5.0.1-linux/bin/transformix')
ELASTIX_PATH = os.path.join('elastix-5.0.1-linux/bin/elastix')
TRANSFORMIX_PATH = os.path.join('elastix-5.0.1-linux/bin/transformix')
if not os.path.exists(ELASTIX_PATH):
raise IOError('Elastix cannot be found, please set the correct ELASTIX_PATH.')
if not os.path.exists(TRANSFORMIX_PATH):
raise IOError('Transformix cannot be found, please set the correct TRANSFORMIX_PATH.')
# Make a results directory if non exists
if os.path.exists('results') is False:
os.mkdir('results')
# Define the paths to the two images you want to register
target_dir = os.path.join(t1_path[0])
moving_dir = os.path.join( 'example_data', 'mni.nii')
moving_mask_dir = os.path.join('example_data', 'Prevalence_map-csv.nii.gz')
output_dir='results'
# Define a new elastix object 'el' with the correct path to elastix
el = elastix.ElastixInterface(elastix_path=ELASTIX_PATH)
# Register the moving image to the target image with el →
el.register(
fixed_image=target_dir,
moving_image=moving_dir,
parameters=[os.path.join( 'example_data', 'affine.txt'), os.path.join('example_data', 'bspline.txt')],
output_dir=os.path.join('results'))
# NOTE: two TransformParameters files will come out of this. Check which one to use for transformix. One file calls the other, so only provide one.
# Find the results
transform_path = os.path.join(output_dir, 'TransformParameters.1.txt')
result_path = os.path.join(output_dir, 'result.1.nii')
param_path=transform_path
for i in range(len(param_path)):
old_text = '(FinalBSplineInterpolationOrder 3)\n'
new_text = '(FinalBSplineInterpolationOrder 0)\n'
change_parameter(param_path , old_text, new_text, param_path)
# Feed the directory of the parameters from the registration to a tr →
tr = elastix.TransformixInterface(parameters=transform_path,
transformix_path=TRANSFORMIX_PATH)
tr.transform_image(moving_mask_dir, output_dir=r'results')
# Apply it to the moving prostate segmentation →
transformed_image_path = tr.transform_image(moving_mask_dir, output_dir=r'results')
moving_img_mask = sitk.GetArrayFromImage(sitk.ReadImage(transformed_image_path))
#print(moving_img_mask)
img1= sitk.ReadImage('results/result.nii')
Im = img1
BinThreshImFilt = sitk.BinaryThresholdImageFilter()
BinThreshImFilt.SetLowerThreshold(1)
BinThreshImFilt.SetOutsideValue(0)
BinThreshImFilt.SetInsideValue(1)
BinIm = BinThreshImFilt.Execute(Im)
sitk.WriteImage(BinIm, 'results/prevalence_map.nii.gz')
register()
print("Registration done\n")
map_path = 'results/prevalence_map.nii.gz'
prev_map_itk = sitk.ReadImage(map_path)
prev_map_arr = sitk.GetArrayFromImage(prev_map_itk)
#-------------------Prediction RCNN------------------------------
model = torch.load('model_RCNN.pt', map_location=device)
model.to(device)
print("Model Mask RCNN loaded\n")
print("Predicting with Mask RCNN......\n")
# Do prediction on all 64 pacthes == 1 slice
def pred_patches(upsample_patch):
upsample_patch = upsample
patch_pred = torch.zeros(0,1,256,256)
for f in range(len(upsample)):
#for f in range(36):
one_patch = upsample[f,:,:,:]
model.eval()
with torch.no_grad():
prediction = model([one_patch.to(device)])
mask = prediction[0]['masks']
mask = mask.cpu()
threshold, upper, lower = 0.1, 1, 0
bmask=np.where(mask>threshold, upper, lower)
if len(mask) !=0:
mm0 = bmask[0 ,:,:, :]
for f in range(len(bmask)):
m = bmask[f ,:,:, :]
mm0 = mm0 + m
#binarize
threshold, upper, lower = 0.1, 1, 0
fuse=np.where(mm0>threshold, upper, lower)
fuse = torch.from_numpy(fuse)
fuse = fuse.unsqueeze(0)
#print(fuse.shape)
elif len(mask) == 0:
fuse = torch.zeros(1,256,256)
fuse = fuse.unsqueeze(0)
patch_pred = torch.cat((patch_pred,fuse),0)
downsample = d(patch_pred)
vol = reconstruct_from_patches_2d(downsample, [height,width], batch_first=False)
return vol
slices = torch.zeros(0,1,height,width)
for f in range(len(tensor)):
one_slice = tensor[f,:,:,:]
one_slice = one_slice.unsqueeze(0)
patches = extract_patches_2d(one_slice, [64,64], batch_first=True)
m = nn.Upsample(scale_factor=4, mode='nearest')
upsample = m(patches)
slice_pred = pred_patches(upsample)
slices = torch.cat((slices,slice_pred),0)
print("Prediction done\n")
foo = slices.squeeze(1)
it_img = sitk.GetImageFromArray(foo)
it_img.CopyInformation(im)
sitk.WriteImage(it_img, 'results/rcnn_pred-script.nii.gz')
rcnn_pred_itk = it_img
rcnn_pred_arr = foo
#-------------------Prediction - map------------------------------
print("Prediction from Mask RCNN - Prevalence map in progress\n")
im = sitk.ReadImage('results/rcnn_pred-script.nii.gz')
arr = sitk.GetArrayFromImage(im)
im2 = sitk.ReadImage('results/prevalence_map.nii.gz')
arr2 = sitk.GetArrayFromImage(im2)
#arr = rcnn_pred_arr
#arr2 = prev_map_arr
out_arr = arr + arr2
out_im = sitk.GetImageFromArray(out_arr)
out_im.CopyInformation(im)
Im = out_im
BinThreshImFilt = sitk.BinaryThresholdImageFilter()
BinThreshImFilt.SetLowerThreshold(1.1)
BinThreshImFilt.SetUpperThreshold(2)
BinThreshImFilt.SetOutsideValue(0)
BinThreshImFilt.SetInsideValue(1)
BinIm = BinThreshImFilt.Execute(Im)
sitk.WriteImage(BinIm, 'results/rcnn_pred-map.nii.gz')
rcnn_pred_map_itk = BinIm
rcnn_pred_map_arr = sitk.GetArrayFromImage(rcnn_pred_map_itk)
#-------------------Prediction UNet ------------------------------
print("Prediction with Unet\n")
from torchvision.models import resnext50_32x4d
class ConvRelu(nn.Module):
def __init__(self, in_channels, out_channels, kernel, padding):
super().__init__()
self.convrelu = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel, padding=padding),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.convrelu(x)
return x
class DecoderBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv1 = ConvRelu(in_channels, in_channels // 4, 1, 0)
self.deconv = nn.ConvTranspose2d(in_channels // 4, in_channels // 4, kernel_size=4,
stride=2, padding=1, output_padding=0)
self.conv2 = ConvRelu(in_channels // 4, out_channels, 1, 0)
def forward(self, x):
x = self.conv1(x)
x = self.deconv(x)
x = self.conv2(x)
return x
class ResNeXtUNet(nn.Module):
def __init__(self, n_classes):
super().__init__()
self.base_model = resnext50_32x4d(pretrained=True)
self.base_layers = list(self.base_model.children())
filters = [4*64, 4*128, 4*256, 4*512]
# Down
self.encoder0 = nn.Sequential(*self.base_layers[:3])
self.encoder1 = nn.Sequential(*self.base_layers[4])
self.encoder2 = nn.Sequential(*self.base_layers[5])
self.encoder3 = nn.Sequential(*self.base_layers[6])
self.encoder4 = nn.Sequential(*self.base_layers[7])
# Up
self.decoder4 = DecoderBlock(filters[3], filters[2])
self.decoder3 = DecoderBlock(filters[2], filters[1])
self.decoder2 = DecoderBlock(filters[1], filters[0])
self.decoder1 = DecoderBlock(filters[0], filters[0])
# Final Classifier
self.last_conv0 = ConvRelu(256, 128, 3, 1)
self.last_conv1 = nn.Conv2d(128, n_classes, 3, padding=1)
def forward(self, x):
# Down
x = self.encoder0(x)
e1 = self.encoder1(x)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
# Up + sc
d4 = self.decoder4(e4) + e3
d3 = self.decoder3(d4) + e2
d2 = self.decoder2(d3) + e1
d1 = self.decoder1(d2)
#print(d1.shape)
# final classifier
out = self.last_conv0(d1)
out = self.last_conv1(out)
out = torch.sigmoid(out)
return out
rx50 = torch.load('model_UNet32.pt', map_location=device)
rx50.to(device)
print("Model rx50 loaded\n")
mask_path = sitk.ReadImage('results/rcnn_pred-map.nii.gz')
mask_img = sitk.GetArrayFromImage(mask_path)
mask = torch.from_numpy(mask_img)
#mask = torch.from_numpy(rcnn_pred_map_arr)
mask = mask.unsqueeze(1)
volume = torch.cat((tensor, mask),1)
print("Predicting with UNet rx50\n")
# Do prediction on all 256 pacthes == 1 slice
def pred_patches_UNet(patches):
patches = patches
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
torch.cuda.set_device(0)
train_dataloader = DataLoader(patches, batch_size=1, num_workers=0, shuffle=False)
inp_tensor = torch.zeros(0,1,32,32)
for i, (data) in enumerate(train_dataloader):
if data[:,3,:,:].max()==0:
data = data[:,3,:,:]
data = data.unsqueeze(0)
inp_tensor = torch.cat((inp_tensor,data),0)
# LAcunes are here
elif data[:,3,:,:].max()!=0:
mask = data[:,3,:,:]
x = data[:,:3,:,:]
bla2 = x / 255
pred = rx50(bla2.to(device))
pred = pred.detach().cpu().numpy()[0,0,:,:]
pred_tensor = torch.from_numpy(pred)
pred_tensor = pred_tensor.unsqueeze(0)
pred_tensor = pred_tensor.unsqueeze(0)
## Apply thresholding
inp_tensor = torch.cat((inp_tensor,pred_tensor),0)
return inp_tensor
slices = torch.zeros(0,1,height,width)
for f in range(len(volume)):
one_slice = volume[f,:,:,:]
one_slice = one_slice.unsqueeze(0)
patches = extract_patches_2d(one_slice, [32,32], batch_first=True)
bla = pred_patches_UNet(patches)
vol = reconstruct_from_patches_2d(bla, [height,width], batch_first=False)
slices = torch.cat((slices,vol),0)
#a = np.array(slices)
#threshold, upper, lower = 0.7, 1, 0
#mask=np.where(a>threshold, upper, lower)
foo = slices.squeeze(1)
#foo = mask.squeeze(1)
it_img = sitk.GetImageFromArray(foo)
it_img.CopyInformation(im)
sitk.WriteImage(it_img, 'results/unet_pred.nii.gz')
unet_pred_itk = it_img
unet_pred_arr = foo
print("Done\n")
#-------------------UNet pred - Map ------------------------------
print("Prediction from UNet - Prevalence map.....\n")
im = sitk.ReadImage('results/unet_pred.nii.gz')
arr = sitk.GetArrayFromImage(im)
#arr = unet_pred_arr
im2 = sitk.ReadImage('results/prevalence_map.nii.gz')
arr2 = sitk.GetArrayFromImage(im2)
out_arr = arr + arr2
out_im = sitk.GetImageFromArray(out_arr)
out_im.CopyInformation(im)
Im = out_im
BinThreshImFilt = sitk.BinaryThresholdImageFilter()
BinThreshImFilt.SetLowerThreshold(1.1)
#BinThreshImFilt.SetUpperThreshold(2)
BinThreshImFilt.SetOutsideValue(0)
BinThreshImFilt.SetInsideValue(1)
BinIm = BinThreshImFilt.Execute(Im)
end = '/'+ sub_no + '_space-T1_binary_prediction.nii.gz'
pred_path = os.path.join('output_data' + end)
sitk.WriteImage(BinIm, pred_path)
print("final prediction done \n")
rem_path = ('results')
shutil.rmtree(rem_path)
print("results removed \n")
| 21,262 | 36.173077 | 155 | py |
SimCSE | SimCSE-main/setup.py | import io
from setuptools import setup, find_packages
with io.open('./README.md', encoding='utf-8') as f:
readme = f.read()
setup(
name='simcse',
packages=['simcse'],
version='0.4',
license='MIT',
description='A sentence embedding tool based on SimCSE',
author='Tianyu Gao, Xingcheng Yao, Danqi Chen',
author_email='tianyug@cs.princeton.edu',
url='https://github.com/princeton-nlp/SimCSE',
download_url='https://github.com/princeton-nlp/SimCSE/archive/refs/tags/0.4.tar.gz',
keywords=['sentence', 'embedding', 'simcse', 'nlp'],
install_requires=[
"tqdm",
"scikit-learn",
"scipy>=1.5.4,<1.6",
"transformers",
"torch",
"numpy>=1.19.5,<1.20",
"setuptools"
]
)
| 767 | 26.428571 | 88 | py |
SimCSE | SimCSE-main/evaluation.py | import sys
import io, os
import numpy as np
import logging
import argparse
from prettytable import PrettyTable
import torch
import transformers
from transformers import AutoModel, AutoTokenizer
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
# Set PATHs
PATH_TO_SENTEVAL = './SentEval'
PATH_TO_DATA = './SentEval/data'
# Import SentEval
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
def print_table(task_names, scores):
tb = PrettyTable()
tb.field_names = task_names
tb.add_row(scores)
print(tb)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model_name_or_path", type=str,
help="Transformers' model name or path")
parser.add_argument("--pooler", type=str,
choices=['cls', 'cls_before_pooler', 'avg', 'avg_top2', 'avg_first_last'],
default='cls',
help="Which pooler to use")
parser.add_argument("--mode", type=str,
choices=['dev', 'test', 'fasttest'],
default='test',
help="What evaluation mode to use (dev: fast mode, dev results; test: full mode, test results); fasttest: fast mode, test results")
parser.add_argument("--task_set", type=str,
choices=['sts', 'transfer', 'full', 'na'],
default='sts',
help="What set of tasks to evaluate on. If not 'na', this will override '--tasks'")
parser.add_argument("--tasks", type=str, nargs='+',
default=['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'TREC', 'MRPC',
'SICKRelatedness', 'STSBenchmark'],
help="Tasks to evaluate on. If '--task_set' is specified, this will be overridden")
args = parser.parse_args()
# Load transformers' model checkpoint
model = AutoModel.from_pretrained(args.model_name_or_path)
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)
# Set up the tasks
if args.task_set == 'sts':
args.tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16', 'STSBenchmark', 'SICKRelatedness']
elif args.task_set == 'transfer':
args.tasks = ['MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'TREC', 'MRPC']
elif args.task_set == 'full':
args.tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16', 'STSBenchmark', 'SICKRelatedness']
args.tasks += ['MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'TREC', 'MRPC']
# Set params for SentEval
if args.mode == 'dev' or args.mode == 'fasttest':
# Fast mode
params = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
elif args.mode == 'test':
# Full mode
params = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 10}
params['classifier'] = {'nhid': 0, 'optim': 'adam', 'batch_size': 64,
'tenacity': 5, 'epoch_size': 4}
else:
raise NotImplementedError
# SentEval prepare and batcher
def prepare(params, samples):
return
def batcher(params, batch, max_length=None):
# Handle rare token encoding issues in the dataset
if len(batch) >= 1 and len(batch[0]) >= 1 and isinstance(batch[0][0], bytes):
batch = [[word.decode('utf-8') for word in s] for s in batch]
sentences = [' '.join(s) for s in batch]
# Tokenization
if max_length is not None:
batch = tokenizer.batch_encode_plus(
sentences,
return_tensors='pt',
padding=True,
max_length=max_length,
truncation=True
)
else:
batch = tokenizer.batch_encode_plus(
sentences,
return_tensors='pt',
padding=True,
)
# Move to the correct device
for k in batch:
batch[k] = batch[k].to(device)
# Get raw embeddings
with torch.no_grad():
outputs = model(**batch, output_hidden_states=True, return_dict=True)
last_hidden = outputs.last_hidden_state
pooler_output = outputs.pooler_output
hidden_states = outputs.hidden_states
# Apply different poolers
if args.pooler == 'cls':
# There is a linear+activation layer after CLS representation
return pooler_output.cpu()
elif args.pooler == 'cls_before_pooler':
return last_hidden[:, 0].cpu()
elif args.pooler == "avg":
return ((last_hidden * batch['attention_mask'].unsqueeze(-1)).sum(1) / batch['attention_mask'].sum(-1).unsqueeze(-1)).cpu()
elif args.pooler == "avg_first_last":
first_hidden = hidden_states[1]
last_hidden = hidden_states[-1]
pooled_result = ((first_hidden + last_hidden) / 2.0 * batch['attention_mask'].unsqueeze(-1)).sum(1) / batch['attention_mask'].sum(-1).unsqueeze(-1)
return pooled_result.cpu()
elif args.pooler == "avg_top2":
second_last_hidden = hidden_states[-2]
last_hidden = hidden_states[-1]
pooled_result = ((last_hidden + second_last_hidden) / 2.0 * batch['attention_mask'].unsqueeze(-1)).sum(1) / batch['attention_mask'].sum(-1).unsqueeze(-1)
return pooled_result.cpu()
else:
raise NotImplementedError
results = {}
for task in args.tasks:
se = senteval.engine.SE(params, batcher, prepare)
result = se.eval(task)
results[task] = result
# Print evaluation results
if args.mode == 'dev':
print("------ %s ------" % (args.mode))
task_names = []
scores = []
for task in ['STSBenchmark', 'SICKRelatedness']:
task_names.append(task)
if task in results:
scores.append("%.2f" % (results[task]['dev']['spearman'][0] * 100))
else:
scores.append("0.00")
print_table(task_names, scores)
task_names = []
scores = []
for task in ['MR', 'CR', 'SUBJ', 'MPQA', 'SST2', 'TREC', 'MRPC']:
task_names.append(task)
if task in results:
scores.append("%.2f" % (results[task]['devacc']))
else:
scores.append("0.00")
task_names.append("Avg.")
scores.append("%.2f" % (sum([float(score) for score in scores]) / len(scores)))
print_table(task_names, scores)
elif args.mode == 'test' or args.mode == 'fasttest':
print("------ %s ------" % (args.mode))
task_names = []
scores = []
for task in ['STS12', 'STS13', 'STS14', 'STS15', 'STS16', 'STSBenchmark', 'SICKRelatedness']:
task_names.append(task)
if task in results:
if task in ['STS12', 'STS13', 'STS14', 'STS15', 'STS16']:
scores.append("%.2f" % (results[task]['all']['spearman']['all'] * 100))
else:
scores.append("%.2f" % (results[task]['test']['spearman'].correlation * 100))
else:
scores.append("0.00")
task_names.append("Avg.")
scores.append("%.2f" % (sum([float(score) for score in scores]) / len(scores)))
print_table(task_names, scores)
task_names = []
scores = []
for task in ['MR', 'CR', 'SUBJ', 'MPQA', 'SST2', 'TREC', 'MRPC']:
task_names.append(task)
if task in results:
scores.append("%.2f" % (results[task]['acc']))
else:
scores.append("0.00")
task_names.append("Avg.")
scores.append("%.2f" % (sum([float(score) for score in scores]) / len(scores)))
print_table(task_names, scores)
if __name__ == "__main__":
main()
| 8,127 | 38.456311 | 165 | py |
SimCSE | SimCSE-main/simcse_to_huggingface.py | """
Convert SimCSE's checkpoints to Huggingface style.
"""
import argparse
import torch
import os
import json
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--path", type=str, help="Path of SimCSE checkpoint folder")
args = parser.parse_args()
print("SimCSE checkpoint -> Huggingface checkpoint for {}".format(args.path))
state_dict = torch.load(os.path.join(args.path, "pytorch_model.bin"), map_location=torch.device("cpu"))
new_state_dict = {}
for key, param in state_dict.items():
# Replace "mlp" to "pooler"
if "mlp" in key:
key = key.replace("mlp", "pooler")
# Delete "bert" or "roberta" prefix
if "bert." in key:
key = key.replace("bert.", "")
if "roberta." in key:
key = key.replace("roberta.", "")
new_state_dict[key] = param
torch.save(new_state_dict, os.path.join(args.path, "pytorch_model.bin"))
# Change architectures in config.json
config = json.load(open(os.path.join(args.path, "config.json")))
for i in range(len(config["architectures"])):
config["architectures"][i] = config["architectures"][i].replace("ForCL", "Model")
json.dump(config, open(os.path.join(args.path, "config.json"), "w"), indent=2)
if __name__ == "__main__":
main()
| 1,327 | 29.181818 | 107 | py |
SimCSE | SimCSE-main/train.py | import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional, Union, List, Dict, Tuple
import torch
import collections
import random
from datasets import load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorWithPadding,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
EvalPrediction,
BertModel,
BertForPreTraining,
RobertaModel
)
from transformers.tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTrainedTokenizerBase
from transformers.trainer_utils import is_main_process
from transformers.data.data_collator import DataCollatorForLanguageModeling
from transformers.file_utils import cached_property, torch_required, is_torch_available, is_torch_tpu_available
from simcse.models import RobertaForCL, BertForCL
from simcse.trainers import CLTrainer
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
# Huggingface's original arguments
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization."
"Don't set if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
# SimCSE's arguments
temp: float = field(
default=0.05,
metadata={
"help": "Temperature for softmax."
}
)
pooler_type: str = field(
default="cls",
metadata={
"help": "What kind of pooler to use (cls, cls_before_pooler, avg, avg_top2, avg_first_last)."
}
)
hard_negative_weight: float = field(
default=0,
metadata={
"help": "The **logit** of weight for hard negatives (only effective if hard negatives are used)."
}
)
do_mlm: bool = field(
default=False,
metadata={
"help": "Whether to use MLM auxiliary objective."
}
)
mlm_weight: float = field(
default=0.1,
metadata={
"help": "Weight for MLM auxiliary objective (only effective if --do_mlm)."
}
)
mlp_only_train: bool = field(
default=False,
metadata={
"help": "Use MLP only during training"
}
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
# Huggingface's original arguments.
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
validation_split_percentage: Optional[int] = field(
default=5,
metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
# SimCSE's arguments
train_file: Optional[str] = field(
default=None,
metadata={"help": "The training data file (.txt or .csv)."}
)
max_seq_length: Optional[int] = field(
default=32,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated."
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
mlm_probability: float = field(
default=0.15,
metadata={"help": "Ratio of tokens to mask for MLM (only effective if --do_mlm)"}
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
@dataclass
class OurTrainingArguments(TrainingArguments):
# Evaluation
## By default, we evaluate STS (dev) during training (for selecting best checkpoints) and evaluate
## both STS and transfer tasks (dev) at the end of training. Using --eval_transfer will allow evaluating
## both STS and transfer tasks (dev) during training.
eval_transfer: bool = field(
default=False,
metadata={"help": "Evaluate transfer task dev sets (in validation)."}
)
@cached_property
@torch_required
def _setup_devices(self) -> "torch.device":
logger.info("PyTorch: setting up devices")
if self.no_cuda:
device = torch.device("cpu")
self._n_gpu = 0
elif is_torch_tpu_available():
import torch_xla.core.xla_model as xm
device = xm.xla_device()
self._n_gpu = 0
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
self._n_gpu = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
#
# deepspeed performs its own DDP internally, and requires the program to be started with:
# deepspeed ./program.py
# rather than:
# python -m torch.distributed.launch --nproc_per_node=2 ./program.py
if self.deepspeed:
from .integrations import is_deepspeed_available
if not is_deepspeed_available():
raise ImportError("--deepspeed requires deepspeed: `pip install deepspeed`.")
import deepspeed
deepspeed.init_distributed()
else:
torch.distributed.init_process_group(backend="nccl")
device = torch.device("cuda", self.local_rank)
self._n_gpu = 1
if device.type == "cuda":
torch.cuda.set_device(device)
return device
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, OurTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty."
"Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if is_main_process(training_args.local_rank) else logging.WARN,
)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f" distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub
#
# For CSV/JSON files, this script will use the column called 'text' or the first column. You can easily tweak this
# behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
if extension == "csv":
datasets = load_dataset(extension, data_files=data_files, cache_dir="./data/", delimiter="\t" if "tsv" in data_args.train_file else ",")
else:
datasets = load_dataset(extension, data_files=data_files, cache_dir="./data/")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
if 'roberta' in model_args.model_name_or_path:
model = RobertaForCL.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
model_args=model_args
)
elif 'bert' in model_args.model_name_or_path:
model = BertForCL.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
model_args=model_args
)
if model_args.do_mlm:
pretrained_model = BertForPreTraining.from_pretrained(model_args.model_name_or_path)
model.lm_head.load_state_dict(pretrained_model.cls.predictions.state_dict())
else:
raise NotImplementedError
else:
raise NotImplementedError
logger.info("Training new model from scratch")
model = AutoModelForMaskedLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Prepare features
column_names = datasets["train"].column_names
sent2_cname = None
if len(column_names) == 2:
# Pair datasets
sent0_cname = column_names[0]
sent1_cname = column_names[1]
elif len(column_names) == 3:
# Pair datasets with hard negatives
sent0_cname = column_names[0]
sent1_cname = column_names[1]
sent2_cname = column_names[2]
elif len(column_names) == 1:
# Unsupervised datasets
sent0_cname = column_names[0]
sent1_cname = column_names[0]
else:
raise NotImplementedError
def prepare_features(examples):
# padding = longest (default)
# If no sentence in the batch exceed the max length, then use
# the max sentence length in the batch, otherwise use the
# max sentence length in the argument and truncate those that
# exceed the max length.
# padding = max_length (when pad_to_max_length, for pressure test)
# All sentences are padded/truncated to data_args.max_seq_length.
total = len(examples[sent0_cname])
# Avoid "None" fields
for idx in range(total):
if examples[sent0_cname][idx] is None:
examples[sent0_cname][idx] = " "
if examples[sent1_cname][idx] is None:
examples[sent1_cname][idx] = " "
sentences = examples[sent0_cname] + examples[sent1_cname]
# If hard negative exists
if sent2_cname is not None:
for idx in range(total):
if examples[sent2_cname][idx] is None:
examples[sent2_cname][idx] = " "
sentences += examples[sent2_cname]
sent_features = tokenizer(
sentences,
max_length=data_args.max_seq_length,
truncation=True,
padding="max_length" if data_args.pad_to_max_length else False,
)
features = {}
if sent2_cname is not None:
for key in sent_features:
features[key] = [[sent_features[key][i], sent_features[key][i+total], sent_features[key][i+total*2]] for i in range(total)]
else:
for key in sent_features:
features[key] = [[sent_features[key][i], sent_features[key][i+total]] for i in range(total)]
return features
if training_args.do_train:
train_dataset = datasets["train"].map(
prepare_features,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
)
# Data collator
@dataclass
class OurDataCollatorWithPadding:
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
mlm: bool = True
mlm_probability: float = data_args.mlm_probability
def __call__(self, features: List[Dict[str, Union[List[int], List[List[int]], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
special_keys = ['input_ids', 'attention_mask', 'token_type_ids', 'mlm_input_ids', 'mlm_labels']
bs = len(features)
if bs > 0:
num_sent = len(features[0]['input_ids'])
else:
return
flat_features = []
for feature in features:
for i in range(num_sent):
flat_features.append({k: feature[k][i] if k in special_keys else feature[k] for k in feature})
batch = self.tokenizer.pad(
flat_features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
if model_args.do_mlm:
batch["mlm_input_ids"], batch["mlm_labels"] = self.mask_tokens(batch["input_ids"])
batch = {k: batch[k].view(bs, num_sent, -1) if k in special_keys else batch[k].view(bs, num_sent, -1)[:, 0] for k in batch}
if "label" in batch:
batch["labels"] = batch["label"]
del batch["label"]
if "label_ids" in batch:
batch["labels"] = batch["label_ids"]
del batch["label_ids"]
return batch
def mask_tokens(
self, inputs: torch.Tensor, special_tokens_mask: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
"""
inputs = inputs.clone()
labels = inputs.clone()
# We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
probability_matrix = torch.full(labels.shape, self.mlm_probability)
if special_tokens_mask is None:
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)
else:
special_tokens_mask = special_tokens_mask.bool()
probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
data_collator = default_data_collator if data_args.pad_to_max_length else OurDataCollatorWithPadding(tokenizer)
trainer = CLTrainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
tokenizer=tokenizer,
data_collator=data_collator,
)
trainer.model_args = model_args
# Training
if training_args.do_train:
model_path = (
model_args.model_name_or_path
if (model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path))
else None
)
train_result = trainer.train(model_path=model_path)
trainer.save_model() # Saves the tokenizer too for easy upload
output_train_file = os.path.join(training_args.output_dir, "train_results.txt")
if trainer.is_world_process_zero():
with open(output_train_file, "w") as writer:
logger.info("***** Train results *****")
for key, value in sorted(train_result.metrics.items()):
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json"))
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
results = trainer.evaluate(eval_senteval_transfer=True)
output_eval_file = os.path.join(training_args.output_dir, "eval_results.txt")
if trainer.is_world_process_zero():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key, value in sorted(results.items()):
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
return results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 24,040 | 39.955707 | 144 | py |
SimCSE | SimCSE-main/simcse/tool.py | import logging
from tqdm import tqdm
import numpy as np
from numpy import ndarray
import torch
from torch import Tensor, device
import transformers
from transformers import AutoModel, AutoTokenizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import normalize
from typing import List, Dict, Tuple, Type, Union
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class SimCSE(object):
"""
A class for embedding sentences, calculating similarities, and retriving sentences by SimCSE.
"""
def __init__(self, model_name_or_path: str,
device: str = None,
num_cells: int = 100,
num_cells_in_search: int = 10,
pooler = None):
self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
self.model = AutoModel.from_pretrained(model_name_or_path)
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
self.device = device
self.index = None
self.is_faiss_index = False
self.num_cells = num_cells
self.num_cells_in_search = num_cells_in_search
if pooler is not None:
self.pooler = pooler
elif "unsup" in model_name_or_path:
logger.info("Use `cls_before_pooler` for unsupervised models. If you want to use other pooling policy, specify `pooler` argument.")
self.pooler = "cls_before_pooler"
else:
self.pooler = "cls"
def encode(self, sentence: Union[str, List[str]],
device: str = None,
return_numpy: bool = False,
normalize_to_unit: bool = True,
keepdim: bool = False,
batch_size: int = 64,
max_length: int = 128) -> Union[ndarray, Tensor]:
target_device = self.device if device is None else device
self.model = self.model.to(target_device)
single_sentence = False
if isinstance(sentence, str):
sentence = [sentence]
single_sentence = True
embedding_list = []
with torch.no_grad():
total_batch = len(sentence) // batch_size + (1 if len(sentence) % batch_size > 0 else 0)
for batch_id in tqdm(range(total_batch)):
inputs = self.tokenizer(
sentence[batch_id*batch_size:(batch_id+1)*batch_size],
padding=True,
truncation=True,
max_length=max_length,
return_tensors="pt"
)
inputs = {k: v.to(target_device) for k, v in inputs.items()}
outputs = self.model(**inputs, return_dict=True)
if self.pooler == "cls":
embeddings = outputs.pooler_output
elif self.pooler == "cls_before_pooler":
embeddings = outputs.last_hidden_state[:, 0]
else:
raise NotImplementedError
if normalize_to_unit:
embeddings = embeddings / embeddings.norm(dim=1, keepdim=True)
embedding_list.append(embeddings.cpu())
embeddings = torch.cat(embedding_list, 0)
if single_sentence and not keepdim:
embeddings = embeddings[0]
if return_numpy and not isinstance(embeddings, ndarray):
return embeddings.numpy()
return embeddings
def similarity(self, queries: Union[str, List[str]],
keys: Union[str, List[str], ndarray],
device: str = None) -> Union[float, ndarray]:
query_vecs = self.encode(queries, device=device, return_numpy=True) # suppose N queries
if not isinstance(keys, ndarray):
key_vecs = self.encode(keys, device=device, return_numpy=True) # suppose M keys
else:
key_vecs = keys
# check whether N == 1 or M == 1
single_query, single_key = len(query_vecs.shape) == 1, len(key_vecs.shape) == 1
if single_query:
query_vecs = query_vecs.reshape(1, -1)
if single_key:
key_vecs = key_vecs.reshape(1, -1)
# returns an N*M similarity array
similarities = cosine_similarity(query_vecs, key_vecs)
if single_query:
similarities = similarities[0]
if single_key:
similarities = float(similarities[0])
return similarities
def build_index(self, sentences_or_file_path: Union[str, List[str]],
use_faiss: bool = None,
faiss_fast: bool = False,
device: str = None,
batch_size: int = 64):
if use_faiss is None or use_faiss:
try:
import faiss
assert hasattr(faiss, "IndexFlatIP")
use_faiss = True
except:
logger.warning("Fail to import faiss. If you want to use faiss, install faiss through PyPI. Now the program continues with brute force search.")
use_faiss = False
# if the input sentence is a string, we assume it's the path of file that stores various sentences
if isinstance(sentences_or_file_path, str):
sentences = []
with open(sentences_or_file_path, "r") as f:
logging.info("Loading sentences from %s ..." % (sentences_or_file_path))
for line in tqdm(f):
sentences.append(line.rstrip())
sentences_or_file_path = sentences
logger.info("Encoding embeddings for sentences...")
embeddings = self.encode(sentences_or_file_path, device=device, batch_size=batch_size, normalize_to_unit=True, return_numpy=True)
logger.info("Building index...")
self.index = {"sentences": sentences_or_file_path}
if use_faiss:
quantizer = faiss.IndexFlatIP(embeddings.shape[1])
if faiss_fast:
index = faiss.IndexIVFFlat(quantizer, embeddings.shape[1], min(self.num_cells, len(sentences_or_file_path)), faiss.METRIC_INNER_PRODUCT)
else:
index = quantizer
if (self.device == "cuda" and device != "cpu") or device == "cuda":
if hasattr(faiss, "StandardGpuResources"):
logger.info("Use GPU-version faiss")
res = faiss.StandardGpuResources()
res.setTempMemory(20 * 1024 * 1024 * 1024)
index = faiss.index_cpu_to_gpu(res, 0, index)
else:
logger.info("Use CPU-version faiss")
else:
logger.info("Use CPU-version faiss")
if faiss_fast:
index.train(embeddings.astype(np.float32))
index.add(embeddings.astype(np.float32))
index.nprobe = min(self.num_cells_in_search, len(sentences_or_file_path))
self.is_faiss_index = True
else:
index = embeddings
self.is_faiss_index = False
self.index["index"] = index
logger.info("Finished")
def add_to_index(self, sentences_or_file_path: Union[str, List[str]],
device: str = None,
batch_size: int = 64):
# if the input sentence is a string, we assume it's the path of file that stores various sentences
if isinstance(sentences_or_file_path, str):
sentences = []
with open(sentences_or_file_path, "r") as f:
logging.info("Loading sentences from %s ..." % (sentences_or_file_path))
for line in tqdm(f):
sentences.append(line.rstrip())
sentences_or_file_path = sentences
logger.info("Encoding embeddings for sentences...")
embeddings = self.encode(sentences_or_file_path, device=device, batch_size=batch_size, normalize_to_unit=True, return_numpy=True)
if self.is_faiss_index:
self.index["index"].add(embeddings.astype(np.float32))
else:
self.index["index"] = np.concatenate((self.index["index"], embeddings))
self.index["sentences"] += sentences_or_file_path
logger.info("Finished")
def search(self, queries: Union[str, List[str]],
device: str = None,
threshold: float = 0.6,
top_k: int = 5) -> Union[List[Tuple[str, float]], List[List[Tuple[str, float]]]]:
if not self.is_faiss_index:
if isinstance(queries, list):
combined_results = []
for query in queries:
results = self.search(query, device, threshold, top_k)
combined_results.append(results)
return combined_results
similarities = self.similarity(queries, self.index["index"]).tolist()
id_and_score = []
for i, s in enumerate(similarities):
if s >= threshold:
id_and_score.append((i, s))
id_and_score = sorted(id_and_score, key=lambda x: x[1], reverse=True)[:top_k]
results = [(self.index["sentences"][idx], score) for idx, score in id_and_score]
return results
else:
query_vecs = self.encode(queries, device=device, normalize_to_unit=True, keepdim=True, return_numpy=True)
distance, idx = self.index["index"].search(query_vecs.astype(np.float32), top_k)
def pack_single_result(dist, idx):
results = [(self.index["sentences"][i], s) for i, s in zip(idx, dist) if s >= threshold]
return results
if isinstance(queries, list):
combined_results = []
for i in range(len(queries)):
results = pack_single_result(distance[i], idx[i])
combined_results.append(results)
return combined_results
else:
return pack_single_result(distance[0], idx[0])
if __name__=="__main__":
example_sentences = [
'An animal is biting a persons finger.',
'A woman is reading.',
'A man is lifting weights in a garage.',
'A man plays the violin.',
'A man is eating food.',
'A man plays the piano.',
'A panda is climbing.',
'A man plays a guitar.',
'A woman is slicing a meat.',
'A woman is taking a picture.'
]
example_queries = [
'A man is playing music.',
'A woman is making a photo.'
]
model_name = "princeton-nlp/sup-simcse-bert-base-uncased"
simcse = SimCSE(model_name)
print("\n=========Calculate cosine similarities between queries and sentences============\n")
similarities = simcse.similarity(example_queries, example_sentences)
print(similarities)
print("\n=========Naive brute force search============\n")
simcse.build_index(example_sentences, use_faiss=False)
results = simcse.search(example_queries)
for i, result in enumerate(results):
print("Retrieval results for query: {}".format(example_queries[i]))
for sentence, score in result:
print(" {} (cosine similarity: {:.4f})".format(sentence, score))
print("")
print("\n=========Search with Faiss backend============\n")
simcse.build_index(example_sentences, use_faiss=True)
results = simcse.search(example_queries)
for i, result in enumerate(results):
print("Retrieval results for query: {}".format(example_queries[i]))
for sentence, score in result:
print(" {} (cosine similarity: {:.4f})".format(sentence, score))
print("")
| 12,092 | 41.135889 | 160 | py |
SimCSE | SimCSE-main/simcse/trainers.py | import collections
import inspect
import math
import sys
import os
import re
import json
import shutil
import time
import warnings
from pathlib import Path
import importlib.util
from packaging import version
from transformers import Trainer
from transformers.modeling_utils import PreTrainedModel
from transformers.training_args import ParallelMode, TrainingArguments
from transformers.utils import logging
from transformers.trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalPrediction,
HPSearchBackend,
PredictionOutput,
TrainOutput,
default_compute_objective,
default_hp_space,
set_seed,
speed_metrics,
)
from transformers.file_utils import (
WEIGHTS_NAME,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_torch_tpu_available,
)
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from transformers.trainer_pt_utils import (
reissue_pt_warnings,
)
from transformers.utils import logging
from transformers.data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
import torch
import torch.nn as nn
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, SequentialSampler
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_apex_available():
from apex import amp
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_native_amp_available = True
from torch.cuda.amp import autocast
if is_datasets_available():
import datasets
from transformers.trainer import _model_unwrap
from transformers.optimization import Adafactor, AdamW, get_scheduler
import copy
# Set path to SentEval
PATH_TO_SENTEVAL = './SentEval'
PATH_TO_DATA = './SentEval/data'
# Import SentEval
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
import numpy as np
from datetime import datetime
from filelock import FileLock
logger = logging.get_logger(__name__)
class CLTrainer(Trainer):
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
eval_senteval_transfer: bool = False,
) -> Dict[str, float]:
# SentEval prepare and batcher
def prepare(params, samples):
return
def batcher(params, batch):
sentences = [' '.join(s) for s in batch]
batch = self.tokenizer.batch_encode_plus(
sentences,
return_tensors='pt',
padding=True,
)
for k in batch:
batch[k] = batch[k].to(self.args.device)
with torch.no_grad():
outputs = self.model(**batch, output_hidden_states=True, return_dict=True, sent_emb=True)
pooler_output = outputs.pooler_output
return pooler_output.cpu()
# Set params for SentEval (fastmode)
params = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
se = senteval.engine.SE(params, batcher, prepare)
tasks = ['STSBenchmark', 'SICKRelatedness']
if eval_senteval_transfer or self.args.eval_transfer:
tasks = ['STSBenchmark', 'SICKRelatedness', 'MR', 'CR', 'SUBJ', 'MPQA', 'SST2', 'TREC', 'MRPC']
self.model.eval()
results = se.eval(tasks)
stsb_spearman = results['STSBenchmark']['dev']['spearman'][0]
sickr_spearman = results['SICKRelatedness']['dev']['spearman'][0]
metrics = {"eval_stsb_spearman": stsb_spearman, "eval_sickr_spearman": sickr_spearman, "eval_avg_sts": (stsb_spearman + sickr_spearman) / 2}
if eval_senteval_transfer or self.args.eval_transfer:
avg_transfer = 0
for task in ['MR', 'CR', 'SUBJ', 'MPQA', 'SST2', 'TREC', 'MRPC']:
avg_transfer += results[task]['devacc']
metrics['eval_{}'.format(task)] = results[task]['devacc']
avg_transfer /= 7
metrics['eval_avg_transfer'] = avg_transfer
self.log(metrics)
return metrics
def _save_checkpoint(self, model, trial, metrics=None):
"""
Compared to original implementation, we change the saving policy to
only save the best-validation checkpoints.
"""
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save.
assert _model_unwrap(model) is self.model, "internal model should be a reference to self.model"
# Determine the new best metric / best model checkpoint
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (
self.state.best_metric is None
or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)
):
output_dir = self.args.output_dir
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
# Only save model when it is the best one
self.save_model(output_dir)
if self.deepspeed:
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_dpp:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
elif self.is_world_process_zero() and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
# Save the Trainer state
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
else:
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is not None and trial is not None:
if self.hp_search_backend == HPSearchBackend.OPTUNA:
run_id = trial.number
else:
from ray import tune
run_id = tune.get_trial_id()
run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
output_dir = os.path.join(self.args.output_dir, run_name, checkpoint_folder)
else:
output_dir = os.path.join(self.args.output_dir, checkpoint_folder)
self.store_flos()
self.save_model(output_dir)
if self.deepspeed:
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_dpp:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
elif self.is_world_process_zero() and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
# Save the Trainer state
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
# Maybe delete some older checkpoints.
if self.is_world_process_zero():
self._rotate_checkpoints(use_mtime=True)
def train(self, model_path: Optional[str] = None, trial: Union["optuna.Trial", Dict[str, Any]] = None):
"""
Main training entry point.
Args:
model_path (:obj:`str`, `optional`):
Local path to the model if the model to train has been instantiated from a local path. If present,
training will resume from the optimizer/scheduler states loaded here.
trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
The trial run or the hyperparameter dictionary for hyperparameter search.
The main difference between ours and Huggingface's original implementation is that we
also load model_args when reloading best checkpoints for evaluation.
"""
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
# Model re-init
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
set_seed(self.args.seed)
model = self.call_model_init(trial)
if not self.is_model_parallel:
model = model.to(self.args.device)
self.model = model
self.model_wrapped = model
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Keeping track whether we can can len() on the dataset or not
train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
if train_dataset_is_sized:
num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
if self.args.max_steps > 0:
max_steps = self.args.max_steps
num_train_epochs = self.args.max_steps // num_update_steps_per_epoch + int(
self.args.max_steps % num_update_steps_per_epoch > 0
)
else:
max_steps = math.ceil(self.args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(self.args.num_train_epochs)
else:
# see __init__. max_steps is set when the dataset has no __len__
max_steps = self.args.max_steps
num_train_epochs = 1
num_update_steps_per_epoch = max_steps
if self.args.deepspeed:
model, optimizer, lr_scheduler = init_deepspeed(self, num_training_steps=max_steps)
self.model = model.module
self.model_wrapped = model # will get further wrapped in DDP
self.deepspeed = model # DeepSpeedEngine object
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
else:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(model_path)
model = self.model_wrapped
# Mixed precision training with apex (torch < 1.6)
if self.use_apex:
model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
# Multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if self.sharded_dpp:
model = ShardedDDP(model, self.optimizer)
elif self.args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=(
not getattr(model.config, "gradient_checkpointing", False)
if isinstance(model, PreTrainedModel)
else True
),
)
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), DDP(Deepspeed(Transformers Model)), etc.
# Train!
if is_torch_tpu_available():
total_train_batch_size = self.args.train_batch_size * xm.xrt_world_size()
else:
total_train_batch_size = (
self.args.train_batch_size
* self.args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if self.args.local_rank != -1 else 1)
)
num_examples = (
self.num_examples(train_dataloader)
if train_dataset_is_sized
else total_train_batch_size * self.args.max_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {self.args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Gradient Accumulation steps = {self.args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if model_path and os.path.isfile(os.path.join(model_path, "trainer_state.json")):
self.state = TrainerState.load_from_json(os.path.join(model_path, "trainer_state.json"))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not self.args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= self.args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not self.args.ignore_data_skip:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
"batches in the first epoch."
)
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
self.state.trial_params = hp_params(trial) if trial is not None else None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0).to(self.args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = 0
self._total_flos = self.state.total_flos
model.zero_grad()
self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not self.args.ignore_data_skip:
for epoch in range(epochs_trained):
# We just need to begin an iteration to create the randomization of the sampler.
for _ in train_dataloader:
break
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
epoch_iterator = train_dataloader
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
steps_in_epoch = len(train_dataloader) if train_dataset_is_sized else self.args.max_steps
self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control)
assert train_dataset_is_sized, "currently we only support sized dataloader!"
inputs = None
last_inputs = None
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
if (step + 1) % self.args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(self.args, self.state, self.control)
if ((step + 1) % self.args.gradient_accumulation_steps != 0) and self.args.local_rank != -1:
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
tr_loss += self.training_step(model, inputs)
else:
tr_loss += self.training_step(model, inputs)
self._total_flos += self.floating_point_ops(inputs)
if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= self.args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# Gradient clipping
if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.use_amp:
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(self.args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
torch.nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
self.args.max_grad_norm,
)
# Optimizer step
if is_torch_tpu_available():
xm.optimizer_step(self.optimizer)
elif self.use_amp:
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.optimizer.step()
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
self.control = self.callback_handler.on_epoch_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.args.tpu_metrics_debug or self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if self.args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
logger.info(
f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
if isinstance(self.model, PreTrainedModel):
self.model = self.model.from_pretrained(self.state.best_model_checkpoint, model_args=self.model_args)
if not self.is_model_parallel:
self.model = self.model.to(self.args.device)
else:
state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
if self.deepspeed:
self.deepspeed.load_checkpoint(
self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False
)
metrics = speed_metrics("train", start_time, self.state.max_steps)
if self._total_flos is not None:
self.store_flos()
metrics["total_flos"] = self.state.total_flos
self.log(metrics)
self.control = self.callback_handler.on_train_end(self.args, self.state, self.control)
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics) | 25,360 | 44.368515 | 149 | py |
SimCSE | SimCSE-main/simcse/models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
import transformers
from transformers import RobertaTokenizer
from transformers.models.roberta.modeling_roberta import RobertaPreTrainedModel, RobertaModel, RobertaLMHead
from transformers.models.bert.modeling_bert import BertPreTrainedModel, BertModel, BertLMPredictionHead
from transformers.activations import gelu
from transformers.file_utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from transformers.modeling_outputs import SequenceClassifierOutput, BaseModelOutputWithPoolingAndCrossAttentions
class MLPLayer(nn.Module):
"""
Head for getting sentence representations over RoBERTa/BERT's CLS representation.
"""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, features, **kwargs):
x = self.dense(features)
x = self.activation(x)
return x
class Similarity(nn.Module):
"""
Dot product or cosine similarity
"""
def __init__(self, temp):
super().__init__()
self.temp = temp
self.cos = nn.CosineSimilarity(dim=-1)
def forward(self, x, y):
return self.cos(x, y) / self.temp
class Pooler(nn.Module):
"""
Parameter-free poolers to get the sentence embedding
'cls': [CLS] representation with BERT/RoBERTa's MLP pooler.
'cls_before_pooler': [CLS] representation without the original MLP pooler.
'avg': average of the last layers' hidden states at each token.
'avg_top2': average of the last two layers.
'avg_first_last': average of the first and the last layers.
"""
def __init__(self, pooler_type):
super().__init__()
self.pooler_type = pooler_type
assert self.pooler_type in ["cls", "cls_before_pooler", "avg", "avg_top2", "avg_first_last"], "unrecognized pooling type %s" % self.pooler_type
def forward(self, attention_mask, outputs):
last_hidden = outputs.last_hidden_state
pooler_output = outputs.pooler_output
hidden_states = outputs.hidden_states
if self.pooler_type in ['cls_before_pooler', 'cls']:
return last_hidden[:, 0]
elif self.pooler_type == "avg":
return ((last_hidden * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1))
elif self.pooler_type == "avg_first_last":
first_hidden = hidden_states[1]
last_hidden = hidden_states[-1]
pooled_result = ((first_hidden + last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1)
return pooled_result
elif self.pooler_type == "avg_top2":
second_last_hidden = hidden_states[-2]
last_hidden = hidden_states[-1]
pooled_result = ((last_hidden + second_last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1)
return pooled_result
else:
raise NotImplementedError
def cl_init(cls, config):
"""
Contrastive learning class init function.
"""
cls.pooler_type = cls.model_args.pooler_type
cls.pooler = Pooler(cls.model_args.pooler_type)
if cls.model_args.pooler_type == "cls":
cls.mlp = MLPLayer(config)
cls.sim = Similarity(temp=cls.model_args.temp)
cls.init_weights()
def cl_forward(cls,
encoder,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
mlm_input_ids=None,
mlm_labels=None,
):
return_dict = return_dict if return_dict is not None else cls.config.use_return_dict
ori_input_ids = input_ids
batch_size = input_ids.size(0)
# Number of sentences in one instance
# 2: pair instance; 3: pair instance with a hard negative
num_sent = input_ids.size(1)
mlm_outputs = None
# Flatten input for encoding
input_ids = input_ids.view((-1, input_ids.size(-1))) # (bs * num_sent, len)
attention_mask = attention_mask.view((-1, attention_mask.size(-1))) # (bs * num_sent len)
if token_type_ids is not None:
token_type_ids = token_type_ids.view((-1, token_type_ids.size(-1))) # (bs * num_sent, len)
# Get raw embeddings
outputs = encoder(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=True if cls.model_args.pooler_type in ['avg_top2', 'avg_first_last'] else False,
return_dict=True,
)
# MLM auxiliary objective
if mlm_input_ids is not None:
mlm_input_ids = mlm_input_ids.view((-1, mlm_input_ids.size(-1)))
mlm_outputs = encoder(
mlm_input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=True if cls.model_args.pooler_type in ['avg_top2', 'avg_first_last'] else False,
return_dict=True,
)
# Pooling
pooler_output = cls.pooler(attention_mask, outputs)
pooler_output = pooler_output.view((batch_size, num_sent, pooler_output.size(-1))) # (bs, num_sent, hidden)
# If using "cls", we add an extra MLP layer
# (same as BERT's original implementation) over the representation.
if cls.pooler_type == "cls":
pooler_output = cls.mlp(pooler_output)
# Separate representation
z1, z2 = pooler_output[:,0], pooler_output[:,1]
# Hard negative
if num_sent == 3:
z3 = pooler_output[:, 2]
# Gather all embeddings if using distributed training
if dist.is_initialized() and cls.training:
# Gather hard negative
if num_sent >= 3:
z3_list = [torch.zeros_like(z3) for _ in range(dist.get_world_size())]
dist.all_gather(tensor_list=z3_list, tensor=z3.contiguous())
z3_list[dist.get_rank()] = z3
z3 = torch.cat(z3_list, 0)
# Dummy vectors for allgather
z1_list = [torch.zeros_like(z1) for _ in range(dist.get_world_size())]
z2_list = [torch.zeros_like(z2) for _ in range(dist.get_world_size())]
# Allgather
dist.all_gather(tensor_list=z1_list, tensor=z1.contiguous())
dist.all_gather(tensor_list=z2_list, tensor=z2.contiguous())
# Since allgather results do not have gradients, we replace the
# current process's corresponding embeddings with original tensors
z1_list[dist.get_rank()] = z1
z2_list[dist.get_rank()] = z2
# Get full batch embeddings: (bs x N, hidden)
z1 = torch.cat(z1_list, 0)
z2 = torch.cat(z2_list, 0)
cos_sim = cls.sim(z1.unsqueeze(1), z2.unsqueeze(0))
# Hard negative
if num_sent >= 3:
z1_z3_cos = cls.sim(z1.unsqueeze(1), z3.unsqueeze(0))
cos_sim = torch.cat([cos_sim, z1_z3_cos], 1)
labels = torch.arange(cos_sim.size(0)).long().to(cls.device)
loss_fct = nn.CrossEntropyLoss()
# Calculate loss with hard negatives
if num_sent == 3:
# Note that weights are actually logits of weights
z3_weight = cls.model_args.hard_negative_weight
weights = torch.tensor(
[[0.0] * (cos_sim.size(-1) - z1_z3_cos.size(-1)) + [0.0] * i + [z3_weight] + [0.0] * (z1_z3_cos.size(-1) - i - 1) for i in range(z1_z3_cos.size(-1))]
).to(cls.device)
cos_sim = cos_sim + weights
loss = loss_fct(cos_sim, labels)
# Calculate loss for MLM
if mlm_outputs is not None and mlm_labels is not None:
mlm_labels = mlm_labels.view(-1, mlm_labels.size(-1))
prediction_scores = cls.lm_head(mlm_outputs.last_hidden_state)
masked_lm_loss = loss_fct(prediction_scores.view(-1, cls.config.vocab_size), mlm_labels.view(-1))
loss = loss + cls.model_args.mlm_weight * masked_lm_loss
if not return_dict:
output = (cos_sim,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=cos_sim,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def sentemb_forward(
cls,
encoder,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else cls.config.use_return_dict
outputs = encoder(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=True if cls.pooler_type in ['avg_top2', 'avg_first_last'] else False,
return_dict=True,
)
pooler_output = cls.pooler(attention_mask, outputs)
if cls.pooler_type == "cls" and not cls.model_args.mlp_only_train:
pooler_output = cls.mlp(pooler_output)
if not return_dict:
return (outputs[0], pooler_output) + outputs[2:]
return BaseModelOutputWithPoolingAndCrossAttentions(
pooler_output=pooler_output,
last_hidden_state=outputs.last_hidden_state,
hidden_states=outputs.hidden_states,
)
class BertForCL(BertPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config, *model_args, **model_kargs):
super().__init__(config)
self.model_args = model_kargs["model_args"]
self.bert = BertModel(config, add_pooling_layer=False)
if self.model_args.do_mlm:
self.lm_head = BertLMPredictionHead(config)
cl_init(self, config)
def forward(self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
sent_emb=False,
mlm_input_ids=None,
mlm_labels=None,
):
if sent_emb:
return sentemb_forward(self, self.bert,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
labels=labels,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
else:
return cl_forward(self, self.bert,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
labels=labels,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
mlm_input_ids=mlm_input_ids,
mlm_labels=mlm_labels,
)
class RobertaForCL(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config, *model_args, **model_kargs):
super().__init__(config)
self.model_args = model_kargs["model_args"]
self.roberta = RobertaModel(config, add_pooling_layer=False)
if self.model_args.do_mlm:
self.lm_head = RobertaLMHead(config)
cl_init(self, config)
def forward(self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
sent_emb=False,
mlm_input_ids=None,
mlm_labels=None,
):
if sent_emb:
return sentemb_forward(self, self.roberta,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
labels=labels,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
else:
return cl_forward(self, self.roberta,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
labels=labels,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
mlm_input_ids=mlm_input_ids,
mlm_labels=mlm_labels,
)
| 13,807 | 34.405128 | 161 | py |
SimCSE | SimCSE-main/demo/gradiodemo.py | import torch
from scipy.spatial.distance import cosine
from transformers import AutoModel, AutoTokenizer
import gradio as gr
# Import our models. The package will take care of downloading the models automatically
tokenizer = AutoTokenizer.from_pretrained("princeton-nlp/sup-simcse-bert-base-uncased")
model = AutoModel.from_pretrained("princeton-nlp/sup-simcse-bert-base-uncased")
def simcse(text1, text2, text3):
# Tokenize input texts
texts = [
text1,
text2,
text3
]
inputs = tokenizer(texts, padding=True, truncation=True, return_tensors="pt")
# Get the embeddings
with torch.no_grad():
embeddings = model(**inputs, output_hidden_states=True, return_dict=True).pooler_output
# Calculate cosine similarities
# Cosine similarities are in [-1, 1]. Higher means more similar
cosine_sim_0_1 = 1 - cosine(embeddings[0], embeddings[1])
cosine_sim_0_2 = 1 - cosine(embeddings[0], embeddings[2])
return {"cosine similarity":cosine_sim_0_1}, {"cosine similarity":cosine_sim_0_2}
inputs = [
gr.inputs.Textbox(lines=5, label="Input Text One"),
gr.inputs.Textbox(lines=5, label="Input Text Two"),
gr.inputs.Textbox(lines=5, label="Input Text Three")
]
outputs = [
gr.outputs.Label(type="confidences",label="Cosine similarity between text one and two"),
gr.outputs.Label(type="confidences", label="Cosine similarity between text one and three")
]
title = "SimCSE"
description = "demo for Princeton-NLP SimCSE. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2104.08821'>SimCSE: Simple Contrastive Learning of Sentence Embeddings</a> | <a href='https://github.com/princeton-nlp/SimCSE'>Github Repo</a></p>"
examples = [
["There's a kid on a skateboard.",
"A kid is skateboarding.",
"A kid is inside the house."]
]
gr.Interface(simcse, inputs, outputs, title=title, description=description, article=article, examples=examples).launch() | 2,105 | 40.294118 | 219 | py |
SimCSE | SimCSE-main/demo/flaskdemo.py | import json
import argparse
import torch
import os
import random
import numpy as np
import requests
import logging
import math
import copy
import string
from tqdm import tqdm
from time import time
from flask import Flask, request, jsonify
from flask_cors import CORS
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from simcse import SimCSE
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def run_simcse_demo(port, args):
app = Flask(__name__, static_folder='./static')
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
CORS(app)
sentence_path = os.path.join(args.sentences_dir, args.example_sentences)
query_path = os.path.join(args.sentences_dir, args.example_query)
embedder = SimCSE(args.model_name_or_path)
embedder.build_index(sentence_path)
@app.route('/')
def index():
return app.send_static_file('index.html')
@app.route('/api', methods=['GET'])
def api():
query = request.args['query']
top_k = int(request.args['topk'])
threshold = float(request.args['threshold'])
start = time()
results = embedder.search(query, top_k=top_k, threshold=threshold)
ret = []
out = {}
for sentence, score in results:
ret.append({"sentence": sentence, "score": score})
span = time() - start
out['ret'] = ret
out['time'] = "{:.4f}".format(span)
return jsonify(out)
@app.route('/files/<path:path>')
def static_files(path):
return app.send_static_file('files/' + path)
@app.route('/get_examples', methods=['GET'])
def get_examples():
with open(query_path, 'r') as fp:
examples = [line.strip() for line in fp.readlines()]
return jsonify(examples)
addr = args.ip + ":" + args.port
logger.info(f'Starting Index server at {addr}')
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(port)
IOLoop.instance().start()
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model_name_or_path', default=None, type=str)
parser.add_argument('--device', default='cpu', type=str)
parser.add_argument('--sentences_dir', default=None, type=str)
parser.add_argument('--example_query', default=None, type=str)
parser.add_argument('--example_sentences', default=None, type=str)
parser.add_argument('--port', default='8888', type=str)
parser.add_argument('--ip', default='http://127.0.0.1')
parser.add_argument('--load_light', default=False, action='store_true')
args = parser.parse_args()
run_simcse_demo(args.port, args) | 2,839 | 32.809524 | 113 | py |
SimCSE | SimCSE-main/SentEval/examples/infersent.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
InferSent models. See https://github.com/facebookresearch/InferSent.
"""
from __future__ import absolute_import, division, unicode_literals
import sys
import os
import torch
import logging
# get models.py from InferSent repo
from models import InferSent
# Set PATHs
PATH_SENTEVAL = '../'
PATH_TO_DATA = '../data'
PATH_TO_W2V = 'PATH/TO/glove.840B.300d.txt' # or crawl-300d-2M.vec for V2
MODEL_PATH = 'infersent1.pkl'
V = 1 # version of InferSent
assert os.path.isfile(MODEL_PATH) and os.path.isfile(PATH_TO_W2V), \
'Set MODEL and GloVe PATHs'
# import senteval
sys.path.insert(0, PATH_SENTEVAL)
import senteval
def prepare(params, samples):
params.infersent.build_vocab([' '.join(s) for s in samples], tokenize=False)
def batcher(params, batch):
sentences = [' '.join(s) for s in batch]
embeddings = params.infersent.encode(sentences, bsize=params.batch_size, tokenize=False)
return embeddings
"""
Evaluation of trained model on Transfer Tasks (SentEval)
"""
# define senteval params
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
# Load InferSent model
params_model = {'bsize': 64, 'word_emb_dim': 300, 'enc_lstm_dim': 2048,
'pool_type': 'max', 'dpout_model': 0.0, 'version': V}
model = InferSent(params_model)
model.load_state_dict(torch.load(MODEL_PATH))
model.set_w2v_path(PATH_TO_W2V)
params_senteval['infersent'] = model.cuda()
se = senteval.engine.SE(params_senteval, batcher, prepare)
transfer_tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',
'Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion']
results = se.eval(transfer_tasks)
print(results)
| 2,463 | 31 | 92 | py |
SimCSE | SimCSE-main/SentEval/examples/bow.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import, division, unicode_literals
import sys
import io
import numpy as np
import logging
# Set PATHs
PATH_TO_SENTEVAL = '../'
PATH_TO_DATA = '../data'
# PATH_TO_VEC = 'glove/glove.840B.300d.txt'
PATH_TO_VEC = 'fasttext/crawl-300d-2M.vec'
# import SentEval
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
# Create dictionary
def create_dictionary(sentences, threshold=0):
words = {}
for s in sentences:
for word in s:
words[word] = words.get(word, 0) + 1
if threshold > 0:
newwords = {}
for word in words:
if words[word] >= threshold:
newwords[word] = words[word]
words = newwords
words['<s>'] = 1e9 + 4
words['</s>'] = 1e9 + 3
words['<p>'] = 1e9 + 2
sorted_words = sorted(words.items(), key=lambda x: -x[1]) # inverse sort
id2word = []
word2id = {}
for i, (w, _) in enumerate(sorted_words):
id2word.append(w)
word2id[w] = i
return id2word, word2id
# Get word vectors from vocabulary (glove, word2vec, fasttext ..)
def get_wordvec(path_to_vec, word2id):
word_vec = {}
with io.open(path_to_vec, 'r', encoding='utf-8') as f:
# if word2vec or fasttext file : skip first line "next(f)"
for line in f:
word, vec = line.split(' ', 1)
if word in word2id:
word_vec[word] = np.fromstring(vec, sep=' ')
logging.info('Found {0} words with word vectors, out of \
{1} words'.format(len(word_vec), len(word2id)))
return word_vec
# SentEval prepare and batcher
def prepare(params, samples):
_, params.word2id = create_dictionary(samples)
params.word_vec = get_wordvec(PATH_TO_VEC, params.word2id)
params.wvec_dim = 300
return
def batcher(params, batch):
batch = [sent if sent != [] else ['.'] for sent in batch]
embeddings = []
for sent in batch:
sentvec = []
for word in sent:
if word in params.word_vec:
sentvec.append(params.word_vec[word])
if not sentvec:
vec = np.zeros(params.wvec_dim)
sentvec.append(vec)
sentvec = np.mean(sentvec, 0)
embeddings.append(sentvec)
embeddings = np.vstack(embeddings)
return embeddings
# Set params for SentEval
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
se = senteval.engine.SE(params_senteval, batcher, prepare)
transfer_tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',
'Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion']
results = se.eval(transfer_tasks)
print(results)
| 3,423 | 29.300885 | 82 | py |
SimCSE | SimCSE-main/SentEval/examples/googleuse.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import, division
import os
import sys
import logging
import tensorflow as tf
import tensorflow_hub as hub
tf.logging.set_verbosity(0)
# Set PATHs
PATH_TO_SENTEVAL = '../'
PATH_TO_DATA = '../data'
# import SentEval
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
# tensorflow session
session = tf.Session()
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# SentEval prepare and batcher
def prepare(params, samples):
return
def batcher(params, batch):
batch = [' '.join(sent) if sent != [] else '.' for sent in batch]
embeddings = params['google_use'](batch)
return embeddings
def make_embed_fn(module):
with tf.Graph().as_default():
sentences = tf.placeholder(tf.string)
embed = hub.Module(module)
embeddings = embed(sentences)
session = tf.train.MonitoredSession()
return lambda x: session.run(embeddings, {sentences: x})
# Start TF session and load Google Universal Sentence Encoder
encoder = make_embed_fn("https://tfhub.dev/google/universal-sentence-encoder-large/2")
# Set params for SentEval
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
params_senteval['google_use'] = encoder
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
se = senteval.engine.SE(params_senteval, batcher, prepare)
transfer_tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',
'Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion']
results = se.eval(transfer_tasks)
print(results)
| 2,205 | 31.441176 | 86 | py |
SimCSE | SimCSE-main/SentEval/examples/models.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
This file contains the definition of encoders used in https://arxiv.org/pdf/1705.02364.pdf
"""
import numpy as np
import time
import torch
import torch.nn as nn
class InferSent(nn.Module):
def __init__(self, config):
super(InferSent, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.dpout_model = config['dpout_model']
self.version = 1 if 'version' not in config else config['version']
self.enc_lstm = nn.LSTM(self.word_emb_dim, self.enc_lstm_dim, 1,
bidirectional=True, dropout=self.dpout_model)
assert self.version in [1, 2]
if self.version == 1:
self.bos = '<s>'
self.eos = '</s>'
self.max_pad = True
self.moses_tok = False
elif self.version == 2:
self.bos = '<p>'
self.eos = '</p>'
self.max_pad = False
self.moses_tok = True
def is_cuda(self):
# either all weights are on cpu or they are on gpu
return self.enc_lstm.bias_hh_l0.data.is_cuda
def forward(self, sent_tuple):
# sent_len: [max_len, ..., min_len] (bsize)
# sent: (seqlen x bsize x worddim)
sent, sent_len = sent_tuple
# Sort by length (keep idx)
sent_len_sorted, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent_len_sorted = sent_len_sorted.copy()
idx_unsort = np.argsort(idx_sort)
idx_sort = torch.from_numpy(idx_sort).cuda() if self.is_cuda() \
else torch.from_numpy(idx_sort)
sent = sent.index_select(1, idx_sort)
# Handling padding in Recurrent Networks
sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len_sorted)
sent_output = self.enc_lstm(sent_packed)[0] # seqlen x batch x 2*nhid
sent_output = nn.utils.rnn.pad_packed_sequence(sent_output)[0]
# Un-sort by length
idx_unsort = torch.from_numpy(idx_unsort).cuda() if self.is_cuda() \
else torch.from_numpy(idx_unsort)
sent_output = sent_output.index_select(1, idx_unsort)
# Pooling
if self.pool_type == "mean":
sent_len = torch.FloatTensor(sent_len.copy()).unsqueeze(1).cuda()
emb = torch.sum(sent_output, 0).squeeze(0)
emb = emb / sent_len.expand_as(emb)
elif self.pool_type == "max":
if not self.max_pad:
sent_output[sent_output == 0] = -1e9
emb = torch.max(sent_output, 0)[0]
if emb.ndimension() == 3:
emb = emb.squeeze(0)
assert emb.ndimension() == 2
return emb
def set_w2v_path(self, w2v_path):
self.w2v_path = w2v_path
def get_word_dict(self, sentences, tokenize=True):
# create vocab of words
word_dict = {}
sentences = [s.split() if not tokenize else self.tokenize(s) for s in sentences]
for sent in sentences:
for word in sent:
if word not in word_dict:
word_dict[word] = ''
word_dict[self.bos] = ''
word_dict[self.eos] = ''
return word_dict
def get_w2v(self, word_dict):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with w2v vectors
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if word in word_dict:
word_vec[word] = np.fromstring(vec, sep=' ')
print('Found %s(/%s) words with w2v vectors' % (len(word_vec), len(word_dict)))
return word_vec
def get_w2v_k(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with k first w2v vectors
k = 0
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if k <= K:
word_vec[word] = np.fromstring(vec, sep=' ')
k += 1
if k > K:
if word in [self.bos, self.eos]:
word_vec[word] = np.fromstring(vec, sep=' ')
if k > K and all([w in word_vec for w in [self.bos, self.eos]]):
break
return word_vec
def build_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
word_dict = self.get_word_dict(sentences, tokenize)
self.word_vec = self.get_w2v(word_dict)
print('Vocab size : %s' % (len(self.word_vec)))
# build w2v vocab with k most frequent words
def build_vocab_k_words(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
self.word_vec = self.get_w2v_k(K)
print('Vocab size : %s' % (K))
def update_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'warning : w2v path not set'
assert hasattr(self, 'word_vec'), 'build_vocab before updating it'
word_dict = self.get_word_dict(sentences, tokenize)
# keep only new words
for word in self.word_vec:
if word in word_dict:
del word_dict[word]
# udpate vocabulary
if word_dict:
new_word_vec = self.get_w2v(word_dict)
self.word_vec.update(new_word_vec)
else:
new_word_vec = []
print('New vocab size : %s (added %s words)'% (len(self.word_vec), len(new_word_vec)))
def get_batch(self, batch):
# sent in batch in decreasing order of lengths
# batch: (bsize, max_len, word_dim)
embed = np.zeros((len(batch[0]), len(batch), self.word_emb_dim))
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
return torch.FloatTensor(embed)
def tokenize(self, s):
from nltk.tokenize import word_tokenize
if self.moses_tok:
s = ' '.join(word_tokenize(s))
s = s.replace(" n't ", "n 't ") # HACK to get ~MOSES tokenization
return s.split()
else:
return word_tokenize(s)
def prepare_samples(self, sentences, bsize, tokenize, verbose):
sentences = [[self.bos] + s.split() + [self.eos] if not tokenize else
[self.bos] + self.tokenize(s) + [self.eos] for s in sentences]
n_w = np.sum([len(x) for x in sentences])
# filters words without w2v vectors
for i in range(len(sentences)):
s_f = [word for word in sentences[i] if word in self.word_vec]
if not s_f:
import warnings
warnings.warn('No words in "%s" (idx=%s) have w2v vectors. \
Replacing by "</s>"..' % (sentences[i], i))
s_f = [self.eos]
sentences[i] = s_f
lengths = np.array([len(s) for s in sentences])
n_wk = np.sum(lengths)
if verbose:
print('Nb words kept : %s/%s (%.1f%s)' % (
n_wk, n_w, 100.0 * n_wk / n_w, '%'))
# sort by decreasing length
lengths, idx_sort = np.sort(lengths)[::-1], np.argsort(-lengths)
sentences = np.array(sentences)[idx_sort]
return sentences, lengths, idx_sort
def encode(self, sentences, bsize=64, tokenize=True, verbose=False):
tic = time.time()
sentences, lengths, idx_sort = self.prepare_samples(
sentences, bsize, tokenize, verbose)
embeddings = []
for stidx in range(0, len(sentences), bsize):
batch = self.get_batch(sentences[stidx:stidx + bsize])
if self.is_cuda():
batch = batch.cuda()
with torch.no_grad():
batch = self.forward((batch, lengths[stidx:stidx + bsize])).data.cpu().numpy()
embeddings.append(batch)
embeddings = np.vstack(embeddings)
# unsort
idx_unsort = np.argsort(idx_sort)
embeddings = embeddings[idx_unsort]
if verbose:
print('Speed : %.1f sentences/s (%s mode, bsize=%s)' % (
len(embeddings)/(time.time()-tic),
'gpu' if self.is_cuda() else 'cpu', bsize))
return embeddings
def visualize(self, sent, tokenize=True):
sent = sent.split() if not tokenize else self.tokenize(sent)
sent = [[self.bos] + [word for word in sent if word in self.word_vec] + [self.eos]]
if ' '.join(sent[0]) == '%s %s' % (self.bos, self.eos):
import warnings
warnings.warn('No words in "%s" have w2v vectors. Replacing \
by "%s %s"..' % (sent, self.bos, self.eos))
batch = self.get_batch(sent)
if self.is_cuda():
batch = batch.cuda()
output = self.enc_lstm(batch)[0]
output, idxs = torch.max(output, 0)
# output, idxs = output.squeeze(), idxs.squeeze()
idxs = idxs.data.cpu().numpy()
argmaxs = [np.sum((idxs == k)) for k in range(len(sent[0]))]
# visualize model
import matplotlib.pyplot as plt
x = range(len(sent[0]))
y = [100.0 * n / np.sum(argmaxs) for n in argmaxs]
plt.xticks(x, sent[0], rotation=45)
plt.bar(x, y)
plt.ylabel('%')
plt.title('Visualisation of words importance')
plt.show()
return output, idxs
| 9,875 | 36.12782 | 94 | py |
SimCSE | SimCSE-main/SentEval/examples/gensen.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Clone GenSen repo here: https://github.com/Maluuba/gensen.git
And follow instructions for loading the model used in batcher
"""
from __future__ import absolute_import, division, unicode_literals
import sys
import logging
# import GenSen package
from gensen import GenSen, GenSenSingle
# Set PATHs
PATH_TO_SENTEVAL = '../'
PATH_TO_DATA = '../data'
# import SentEval
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
# SentEval prepare and batcher
def prepare(params, samples):
return
def batcher(params, batch):
batch = [' '.join(sent) if sent != [] else '.' for sent in batch]
_, reps_h_t = gensen.get_representation(
sentences, pool='last', return_numpy=True, tokenize=True
)
embeddings = reps_h_t
return embeddings
# Load GenSen model
gensen_1 = GenSenSingle(
model_folder='../data/models',
filename_prefix='nli_large_bothskip',
pretrained_emb='../data/embedding/glove.840B.300d.h5'
)
gensen_2 = GenSenSingle(
model_folder='../data/models',
filename_prefix='nli_large_bothskip_parse',
pretrained_emb='../data/embedding/glove.840B.300d.h5'
)
gensen_encoder = GenSen(gensen_1, gensen_2)
reps_h, reps_h_t = gensen.get_representation(
sentences, pool='last', return_numpy=True, tokenize=True
)
# Set params for SentEval
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
params_senteval['gensen'] = gensen_encoder
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
se = senteval.engine.SE(params_senteval, batcher, prepare)
transfer_tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',
'Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion']
results = se.eval(transfer_tasks)
print(results)
| 2,429 | 31.4 | 82 | py |
SimCSE | SimCSE-main/SentEval/examples/skipthought.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import, division, unicode_literals
"""
Example of file for SkipThought in SentEval
"""
import logging
import sys
sys.setdefaultencoding('utf8')
# Set PATHs
PATH_TO_SENTEVAL = '../'
PATH_TO_DATA = '../data/senteval_data/'
PATH_TO_SKIPTHOUGHT = ''
assert PATH_TO_SKIPTHOUGHT != '', 'Download skipthought and set correct PATH'
# import skipthought and Senteval
sys.path.insert(0, PATH_TO_SKIPTHOUGHT)
import skipthoughts
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
def prepare(params, samples):
return
def batcher(params, batch):
batch = [str(' '.join(sent), errors="ignore") if sent != [] else '.' for sent in batch]
embeddings = skipthoughts.encode(params['encoder'], batch,
verbose=False, use_eos=True)
return embeddings
# Set params for SentEval
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 10, 'batch_size': 512}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'adam', 'batch_size': 64,
'tenacity': 5, 'epoch_size': 4}
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
# Load SkipThought model
params_senteval['encoder'] = skipthoughts.load_model()
se = senteval.engine.SE(params_senteval, batcher, prepare)
transfer_tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',
'Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion']
results = se.eval(transfer_tasks)
print(results)
| 2,048 | 32.048387 | 97 | py |
SimCSE | SimCSE-main/SentEval/senteval/engine.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
Generic sentence evaluation scripts wrapper
'''
from __future__ import absolute_import, division, unicode_literals
from senteval import utils
from senteval.binary import CREval, MREval, MPQAEval, SUBJEval
from senteval.snli import SNLIEval
from senteval.trec import TRECEval
from senteval.sick import SICKEntailmentEval, SICKEval
from senteval.mrpc import MRPCEval
from senteval.sts import STS12Eval, STS13Eval, STS14Eval, STS15Eval, STS16Eval, STSBenchmarkEval, SICKRelatednessEval, STSBenchmarkFinetune
from senteval.sst import SSTEval
from senteval.rank import ImageCaptionRetrievalEval
from senteval.probing import *
class SE(object):
def __init__(self, params, batcher, prepare=None):
# parameters
params = utils.dotdict(params)
params.usepytorch = True if 'usepytorch' not in params else params.usepytorch
params.seed = 1111 if 'seed' not in params else params.seed
params.batch_size = 128 if 'batch_size' not in params else params.batch_size
params.nhid = 0 if 'nhid' not in params else params.nhid
params.kfold = 5 if 'kfold' not in params else params.kfold
if 'classifier' not in params or not params['classifier']:
params.classifier = {'nhid': 0}
assert 'nhid' in params.classifier, 'Set number of hidden units in classifier config!!'
self.params = params
# batcher and prepare
self.batcher = batcher
self.prepare = prepare if prepare else lambda x, y: None
self.list_tasks = ['CR', 'MR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
'SICKRelatedness', 'SICKEntailment', 'STSBenchmark',
'SNLI', 'ImageCaptionRetrieval', 'STS12', 'STS13',
'STS14', 'STS15', 'STS16',
'Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion', 'SICKRelatedness-finetune', 'STSBenchmark-finetune', 'STSBenchmark-fix']
def eval(self, name):
# evaluate on evaluation [name], either takes string or list of strings
if (isinstance(name, list)):
self.results = {x: self.eval(x) for x in name}
return self.results
tpath = self.params.task_path
assert name in self.list_tasks, str(name) + ' not in ' + str(self.list_tasks)
# Original SentEval tasks
if name == 'CR':
self.evaluation = CREval(tpath + '/downstream/CR', seed=self.params.seed)
elif name == 'MR':
self.evaluation = MREval(tpath + '/downstream/MR', seed=self.params.seed)
elif name == 'MPQA':
self.evaluation = MPQAEval(tpath + '/downstream/MPQA', seed=self.params.seed)
elif name == 'SUBJ':
self.evaluation = SUBJEval(tpath + '/downstream/SUBJ', seed=self.params.seed)
elif name == 'SST2':
self.evaluation = SSTEval(tpath + '/downstream/SST/binary', nclasses=2, seed=self.params.seed)
elif name == 'SST5':
self.evaluation = SSTEval(tpath + '/downstream/SST/fine', nclasses=5, seed=self.params.seed)
elif name == 'TREC':
self.evaluation = TRECEval(tpath + '/downstream/TREC', seed=self.params.seed)
elif name == 'MRPC':
self.evaluation = MRPCEval(tpath + '/downstream/MRPC', seed=self.params.seed)
elif name == 'SICKRelatedness':
self.evaluation = SICKRelatednessEval(tpath + '/downstream/SICK', seed=self.params.seed)
elif name == 'STSBenchmark':
self.evaluation = STSBenchmarkEval(tpath + '/downstream/STS/STSBenchmark', seed=self.params.seed)
elif name == 'STSBenchmark-fix':
self.evaluation = STSBenchmarkEval(tpath + '/downstream/STS/STSBenchmark-fix', seed=self.params.seed)
elif name == 'STSBenchmark-finetune':
self.evaluation = STSBenchmarkFinetune(tpath + '/downstream/STS/STSBenchmark', seed=self.params.seed)
elif name == 'SICKRelatedness-finetune':
self.evaluation = SICKEval(tpath + '/downstream/SICK', seed=self.params.seed)
elif name == 'SICKEntailment':
self.evaluation = SICKEntailmentEval(tpath + '/downstream/SICK', seed=self.params.seed)
elif name == 'SNLI':
self.evaluation = SNLIEval(tpath + '/downstream/SNLI', seed=self.params.seed)
elif name in ['STS12', 'STS13', 'STS14', 'STS15', 'STS16']:
fpath = name + '-en-test'
self.evaluation = eval(name + 'Eval')(tpath + '/downstream/STS/' + fpath, seed=self.params.seed)
elif name == 'ImageCaptionRetrieval':
self.evaluation = ImageCaptionRetrievalEval(tpath + '/downstream/COCO', seed=self.params.seed)
# Probing Tasks
elif name == 'Length':
self.evaluation = LengthEval(tpath + '/probing', seed=self.params.seed)
elif name == 'WordContent':
self.evaluation = WordContentEval(tpath + '/probing', seed=self.params.seed)
elif name == 'Depth':
self.evaluation = DepthEval(tpath + '/probing', seed=self.params.seed)
elif name == 'TopConstituents':
self.evaluation = TopConstituentsEval(tpath + '/probing', seed=self.params.seed)
elif name == 'BigramShift':
self.evaluation = BigramShiftEval(tpath + '/probing', seed=self.params.seed)
elif name == 'Tense':
self.evaluation = TenseEval(tpath + '/probing', seed=self.params.seed)
elif name == 'SubjNumber':
self.evaluation = SubjNumberEval(tpath + '/probing', seed=self.params.seed)
elif name == 'ObjNumber':
self.evaluation = ObjNumberEval(tpath + '/probing', seed=self.params.seed)
elif name == 'OddManOut':
self.evaluation = OddManOutEval(tpath + '/probing', seed=self.params.seed)
elif name == 'CoordinationInversion':
self.evaluation = CoordinationInversionEval(tpath + '/probing', seed=self.params.seed)
self.params.current_task = name
self.evaluation.do_prepare(self.params, self.prepare)
self.results = self.evaluation.run(self.params, self.batcher)
return self.results
| 6,525 | 49.2 | 139 | py |
SimCSE | SimCSE-main/SentEval/senteval/rank.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
Image-Caption Retrieval with COCO dataset
'''
from __future__ import absolute_import, division, unicode_literals
import os
import sys
import logging
import numpy as np
try:
import cPickle as pickle
except ImportError:
import pickle
from senteval.tools.ranking import ImageSentenceRankingPytorch
class ImageCaptionRetrievalEval(object):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task: Image Caption Retrieval *****\n\n')
# Get captions and image features
self.seed = seed
train, dev, test = self.loadFile(task_path)
self.coco_data = {'train': train, 'dev': dev, 'test': test}
def do_prepare(self, params, prepare):
samples = self.coco_data['train']['sent'] + \
self.coco_data['dev']['sent'] + \
self.coco_data['test']['sent']
prepare(params, samples)
def loadFile(self, fpath):
coco = {}
for split in ['train', 'valid', 'test']:
list_sent = []
list_img_feat = []
if sys.version_info < (3, 0):
with open(os.path.join(fpath, split + '.pkl')) as f:
cocodata = pickle.load(f)
else:
with open(os.path.join(fpath, split + '.pkl'), 'rb') as f:
cocodata = pickle.load(f, encoding='latin1')
for imgkey in range(len(cocodata['features'])):
assert len(cocodata['image_to_caption_ids'][imgkey]) >= 5, \
cocodata['image_to_caption_ids'][imgkey]
for captkey in cocodata['image_to_caption_ids'][imgkey][0:5]:
sent = cocodata['captions'][captkey]['cleaned_caption']
sent += ' .' # add punctuation to end of sentence in COCO
list_sent.append(sent.encode('utf-8').split())
list_img_feat.append(cocodata['features'][imgkey])
assert len(list_sent) == len(list_img_feat) and \
len(list_sent) % 5 == 0
list_img_feat = np.array(list_img_feat).astype('float32')
coco[split] = {'sent': list_sent, 'imgfeat': list_img_feat}
return coco['train'], coco['valid'], coco['test']
def run(self, params, batcher):
coco_embed = {'train': {'sentfeat': [], 'imgfeat': []},
'dev': {'sentfeat': [], 'imgfeat': []},
'test': {'sentfeat': [], 'imgfeat': []}}
for key in self.coco_data:
logging.info('Computing embedding for {0}'.format(key))
# Sort to reduce padding
self.coco_data[key]['sent'] = np.array(self.coco_data[key]['sent'])
self.coco_data[key]['sent'], idx_sort = np.sort(self.coco_data[key]['sent']), np.argsort(self.coco_data[key]['sent'])
idx_unsort = np.argsort(idx_sort)
coco_embed[key]['X'] = []
nsent = len(self.coco_data[key]['sent'])
for ii in range(0, nsent, params.batch_size):
batch = self.coco_data[key]['sent'][ii:ii + params.batch_size]
embeddings = batcher(params, batch)
coco_embed[key]['sentfeat'].append(embeddings)
coco_embed[key]['sentfeat'] = np.vstack(coco_embed[key]['sentfeat'])[idx_unsort]
coco_embed[key]['imgfeat'] = np.array(self.coco_data[key]['imgfeat'])
logging.info('Computed {0} embeddings'.format(key))
config = {'seed': self.seed, 'projdim': 1000, 'margin': 0.2}
clf = ImageSentenceRankingPytorch(train=coco_embed['train'],
valid=coco_embed['dev'],
test=coco_embed['test'],
config=config)
bestdevscore, r1_i2t, r5_i2t, r10_i2t, medr_i2t, \
r1_t2i, r5_t2i, r10_t2i, medr_t2i = clf.run()
logging.debug("\nTest scores | Image to text: \
{0}, {1}, {2}, {3}".format(r1_i2t, r5_i2t, r10_i2t, medr_i2t))
logging.debug("Test scores | Text to image: \
{0}, {1}, {2}, {3}\n".format(r1_t2i, r5_t2i, r10_t2i, medr_t2i))
return {'devacc': bestdevscore,
'acc': [(r1_i2t, r5_i2t, r10_i2t, medr_i2t),
(r1_t2i, r5_t2i, r10_t2i, medr_t2i)],
'ndev': len(coco_embed['dev']['sentfeat']),
'ntest': len(coco_embed['test']['sentfeat'])}
| 4,643 | 41.605505 | 129 | py |
SimCSE | SimCSE-main/SentEval/senteval/snli.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
SNLI - Entailment
'''
from __future__ import absolute_import, division, unicode_literals
import codecs
import os
import io
import copy
import logging
import numpy as np
from senteval.tools.validation import SplitClassifier
class SNLIEval(object):
def __init__(self, taskpath, seed=1111):
logging.debug('***** Transfer task : SNLI Entailment*****\n\n')
self.seed = seed
train1 = self.loadFile(os.path.join(taskpath, 's1.train'))
train2 = self.loadFile(os.path.join(taskpath, 's2.train'))
trainlabels = io.open(os.path.join(taskpath, 'labels.train'),
encoding='utf-8').read().splitlines()
valid1 = self.loadFile(os.path.join(taskpath, 's1.dev'))
valid2 = self.loadFile(os.path.join(taskpath, 's2.dev'))
validlabels = io.open(os.path.join(taskpath, 'labels.dev'),
encoding='utf-8').read().splitlines()
test1 = self.loadFile(os.path.join(taskpath, 's1.test'))
test2 = self.loadFile(os.path.join(taskpath, 's2.test'))
testlabels = io.open(os.path.join(taskpath, 'labels.test'),
encoding='utf-8').read().splitlines()
# sort data (by s2 first) to reduce padding
sorted_train = sorted(zip(train2, train1, trainlabels),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
train2, train1, trainlabels = map(list, zip(*sorted_train))
sorted_valid = sorted(zip(valid2, valid1, validlabels),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
valid2, valid1, validlabels = map(list, zip(*sorted_valid))
sorted_test = sorted(zip(test2, test1, testlabels),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
test2, test1, testlabels = map(list, zip(*sorted_test))
self.samples = train1 + train2 + valid1 + valid2 + test1 + test2
self.data = {'train': (train1, train2, trainlabels),
'valid': (valid1, valid2, validlabels),
'test': (test1, test2, testlabels)
}
def do_prepare(self, params, prepare):
return prepare(params, self.samples)
def loadFile(self, fpath):
with codecs.open(fpath, 'rb', 'latin-1') as f:
return [line.split() for line in
f.read().splitlines()]
def run(self, params, batcher):
self.X, self.y = {}, {}
dico_label = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
for key in self.data:
if key not in self.X:
self.X[key] = []
if key not in self.y:
self.y[key] = []
input1, input2, mylabels = self.data[key]
enc_input = []
n_labels = len(mylabels)
for ii in range(0, n_labels, params.batch_size):
batch1 = input1[ii:ii + params.batch_size]
batch2 = input2[ii:ii + params.batch_size]
if len(batch1) == len(batch2) and len(batch1) > 0:
enc1 = batcher(params, batch1)
enc2 = batcher(params, batch2)
enc_input.append(np.hstack((enc1, enc2, enc1 * enc2,
np.abs(enc1 - enc2))))
if (ii*params.batch_size) % (20000*params.batch_size) == 0:
logging.info("PROGRESS (encoding): %.2f%%" %
(100 * ii / n_labels))
self.X[key] = np.vstack(enc_input)
self.y[key] = [dico_label[y] for y in mylabels]
config = {'nclasses': 3, 'seed': self.seed,
'usepytorch': params.usepytorch,
'cudaEfficient': True,
'nhid': params.nhid, 'noreg': True}
config_classifier = copy.deepcopy(params.classifier)
config_classifier['max_epoch'] = 15
config_classifier['epoch_size'] = 1
config['classifier'] = config_classifier
clf = SplitClassifier(self.X, self.y, config)
devacc, testacc = clf.run()
logging.debug('Dev acc : {0} Test acc : {1} for SNLI\n'
.format(devacc, testacc))
return {'devacc': devacc, 'acc': testacc,
'ndev': len(self.data['valid'][0]),
'ntest': len(self.data['test'][0])}
| 4,577 | 39.157895 | 75 | py |
SimCSE | SimCSE-main/SentEval/senteval/utils.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import, division, unicode_literals
import numpy as np
import re
import inspect
from torch import optim
def create_dictionary(sentences):
words = {}
for s in sentences:
for word in s:
if word in words:
words[word] += 1
else:
words[word] = 1
words['<s>'] = 1e9 + 4
words['</s>'] = 1e9 + 3
words['<p>'] = 1e9 + 2
# words['<UNK>'] = 1e9 + 1
sorted_words = sorted(words.items(), key=lambda x: -x[1]) # inverse sort
id2word = []
word2id = {}
for i, (w, _) in enumerate(sorted_words):
id2word.append(w)
word2id[w] = i
return id2word, word2id
def cosine(u, v):
return np.dot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v))
class dotdict(dict):
""" dot.notation access to dictionary attributes """
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def get_optimizer(s):
"""
Parse optimizer parameters.
Input should be of the form:
- "sgd,lr=0.01"
- "adagrad,lr=0.1,lr_decay=0.05"
"""
if "," in s:
method = s[:s.find(',')]
optim_params = {}
for x in s[s.find(',') + 1:].split(','):
split = x.split('=')
assert len(split) == 2
assert re.match("^[+-]?(\d+(\.\d*)?|\.\d+)$", split[1]) is not None
optim_params[split[0]] = float(split[1])
else:
method = s
optim_params = {}
if method == 'adadelta':
optim_fn = optim.Adadelta
elif method == 'adagrad':
optim_fn = optim.Adagrad
elif method == 'adam':
optim_fn = optim.Adam
elif method == 'adamax':
optim_fn = optim.Adamax
elif method == 'asgd':
optim_fn = optim.ASGD
elif method == 'rmsprop':
optim_fn = optim.RMSprop
elif method == 'rprop':
optim_fn = optim.Rprop
elif method == 'sgd':
optim_fn = optim.SGD
assert 'lr' in optim_params
else:
raise Exception('Unknown optimization method: "%s"' % method)
# check that we give good parameters to the optimizer
expected_args = inspect.getargspec(optim_fn.__init__)[0]
assert expected_args[:2] == ['self', 'params']
if not all(k in expected_args[2:] for k in optim_params.keys()):
raise Exception('Unexpected parameters: expected "%s", got "%s"' % (
str(expected_args[2:]), str(optim_params.keys())))
return optim_fn, optim_params
| 2,713 | 27.270833 | 79 | py |
SimCSE | SimCSE-main/SentEval/senteval/binary.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
Binary classifier and corresponding datasets : MR, CR, SUBJ, MPQA
'''
from __future__ import absolute_import, division, unicode_literals
import io
import os
import numpy as np
import logging
from senteval.tools.validation import InnerKFoldClassifier
class BinaryClassifierEval(object):
def __init__(self, pos, neg, seed=1111):
self.seed = seed
self.samples, self.labels = pos + neg, [1] * len(pos) + [0] * len(neg)
self.n_samples = len(self.samples)
def do_prepare(self, params, prepare):
# prepare is given the whole text
return prepare(params, self.samples)
# prepare puts everything it outputs in "params" : params.word2id etc
# Those output will be further used by "batcher".
def loadFile(self, fpath):
with io.open(fpath, 'r', encoding='latin-1') as f:
return [line.split() for line in f.read().splitlines()]
def run(self, params, batcher):
enc_input = []
# Sort to reduce padding
sorted_corpus = sorted(zip(self.samples, self.labels),
key=lambda z: (len(z[0]), z[1]))
sorted_samples = [x for (x, y) in sorted_corpus]
sorted_labels = [y for (x, y) in sorted_corpus]
logging.info('Generating sentence embeddings')
for ii in range(0, self.n_samples, params.batch_size):
batch = sorted_samples[ii:ii + params.batch_size]
embeddings = batcher(params, batch)
enc_input.append(embeddings)
enc_input = np.vstack(enc_input)
logging.info('Generated sentence embeddings')
config = {'nclasses': 2, 'seed': self.seed,
'usepytorch': params.usepytorch,
'classifier': params.classifier,
'nhid': params.nhid, 'kfold': params.kfold}
clf = InnerKFoldClassifier(enc_input, np.array(sorted_labels), config)
devacc, testacc = clf.run()
logging.debug('Dev acc : {0} Test acc : {1}\n'.format(devacc, testacc))
return {'devacc': devacc, 'acc': testacc, 'ndev': self.n_samples,
'ntest': self.n_samples}
class CREval(BinaryClassifierEval):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task : CR *****\n\n')
pos = self.loadFile(os.path.join(task_path, 'custrev.pos'))
neg = self.loadFile(os.path.join(task_path, 'custrev.neg'))
super(self.__class__, self).__init__(pos, neg, seed)
class MREval(BinaryClassifierEval):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task : MR *****\n\n')
pos = self.loadFile(os.path.join(task_path, 'rt-polarity.pos'))
neg = self.loadFile(os.path.join(task_path, 'rt-polarity.neg'))
super(self.__class__, self).__init__(pos, neg, seed)
class SUBJEval(BinaryClassifierEval):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task : SUBJ *****\n\n')
obj = self.loadFile(os.path.join(task_path, 'subj.objective'))
subj = self.loadFile(os.path.join(task_path, 'subj.subjective'))
super(self.__class__, self).__init__(obj, subj, seed)
class MPQAEval(BinaryClassifierEval):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task : MPQA *****\n\n')
pos = self.loadFile(os.path.join(task_path, 'mpqa.pos'))
neg = self.loadFile(os.path.join(task_path, 'mpqa.neg'))
super(self.__class__, self).__init__(pos, neg, seed)
| 3,712 | 38.924731 | 79 | py |
SimCSE | SimCSE-main/SentEval/senteval/mrpc.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
MRPC : Microsoft Research Paraphrase (detection) Corpus
'''
from __future__ import absolute_import, division, unicode_literals
import os
import logging
import numpy as np
import io
from senteval.tools.validation import KFoldClassifier
from sklearn.metrics import f1_score
class MRPCEval(object):
def __init__(self, task_path, seed=1111):
logging.info('***** Transfer task : MRPC *****\n\n')
self.seed = seed
train = self.loadFile(os.path.join(task_path,
'msr_paraphrase_train.txt'))
test = self.loadFile(os.path.join(task_path,
'msr_paraphrase_test.txt'))
self.mrpc_data = {'train': train, 'test': test}
def do_prepare(self, params, prepare):
# TODO : Should we separate samples in "train, test"?
samples = self.mrpc_data['train']['X_A'] + \
self.mrpc_data['train']['X_B'] + \
self.mrpc_data['test']['X_A'] + self.mrpc_data['test']['X_B']
return prepare(params, samples)
def loadFile(self, fpath):
mrpc_data = {'X_A': [], 'X_B': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
text = line.strip().split('\t')
mrpc_data['X_A'].append(text[3].split())
mrpc_data['X_B'].append(text[4].split())
mrpc_data['y'].append(text[0])
mrpc_data['X_A'] = mrpc_data['X_A'][1:]
mrpc_data['X_B'] = mrpc_data['X_B'][1:]
mrpc_data['y'] = [int(s) for s in mrpc_data['y'][1:]]
return mrpc_data
def run(self, params, batcher):
mrpc_embed = {'train': {}, 'test': {}}
for key in self.mrpc_data:
logging.info('Computing embedding for {0}'.format(key))
# Sort to reduce padding
text_data = {}
sorted_corpus = sorted(zip(self.mrpc_data[key]['X_A'],
self.mrpc_data[key]['X_B'],
self.mrpc_data[key]['y']),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
text_data['A'] = [x for (x, y, z) in sorted_corpus]
text_data['B'] = [y for (x, y, z) in sorted_corpus]
text_data['y'] = [z for (x, y, z) in sorted_corpus]
for txt_type in ['A', 'B']:
mrpc_embed[key][txt_type] = []
for ii in range(0, len(text_data['y']), params.batch_size):
batch = text_data[txt_type][ii:ii + params.batch_size]
embeddings = batcher(params, batch)
mrpc_embed[key][txt_type].append(embeddings)
mrpc_embed[key][txt_type] = np.vstack(mrpc_embed[key][txt_type])
mrpc_embed[key]['y'] = np.array(text_data['y'])
logging.info('Computed {0} embeddings'.format(key))
# Train
trainA = mrpc_embed['train']['A']
trainB = mrpc_embed['train']['B']
trainF = np.c_[np.abs(trainA - trainB), trainA * trainB]
trainY = mrpc_embed['train']['y']
# Test
testA = mrpc_embed['test']['A']
testB = mrpc_embed['test']['B']
testF = np.c_[np.abs(testA - testB), testA * testB]
testY = mrpc_embed['test']['y']
config = {'nclasses': 2, 'seed': self.seed,
'usepytorch': params.usepytorch,
'classifier': params.classifier,
'nhid': params.nhid, 'kfold': params.kfold}
clf = KFoldClassifier(train={'X': trainF, 'y': trainY},
test={'X': testF, 'y': testY}, config=config)
devacc, testacc, yhat = clf.run()
testf1 = round(100*f1_score(testY, yhat), 2)
logging.debug('Dev acc : {0} Test acc {1}; Test F1 {2} for MRPC.\n'
.format(devacc, testacc, testf1))
return {'devacc': devacc, 'acc': testacc, 'f1': testf1,
'ndev': len(trainA), 'ntest': len(testA)}
| 4,202 | 39.028571 | 80 | py |
SimCSE | SimCSE-main/SentEval/senteval/probing.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
probing tasks
'''
from __future__ import absolute_import, division, unicode_literals
import os
import io
import copy
import logging
import numpy as np
from senteval.tools.validation import SplitClassifier
class PROBINGEval(object):
def __init__(self, task, task_path, seed=1111):
self.seed = seed
self.task = task
logging.debug('***** (Probing) Transfer task : %s classification *****', self.task.upper())
self.task_data = {'train': {'X': [], 'y': []},
'dev': {'X': [], 'y': []},
'test': {'X': [], 'y': []}}
self.loadFile(task_path)
logging.info('Loaded %s train - %s dev - %s test for %s' %
(len(self.task_data['train']['y']), len(self.task_data['dev']['y']),
len(self.task_data['test']['y']), self.task))
def do_prepare(self, params, prepare):
samples = self.task_data['train']['X'] + self.task_data['dev']['X'] + \
self.task_data['test']['X']
return prepare(params, samples)
def loadFile(self, fpath):
self.tok2split = {'tr': 'train', 'va': 'dev', 'te': 'test'}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
line = line.rstrip().split('\t')
self.task_data[self.tok2split[line[0]]]['X'].append(line[-1].split())
self.task_data[self.tok2split[line[0]]]['y'].append(line[1])
labels = sorted(np.unique(self.task_data['train']['y']))
self.tok2label = dict(zip(labels, range(len(labels))))
self.nclasses = len(self.tok2label)
for split in self.task_data:
for i, y in enumerate(self.task_data[split]['y']):
self.task_data[split]['y'][i] = self.tok2label[y]
def run(self, params, batcher):
task_embed = {'train': {}, 'dev': {}, 'test': {}}
bsize = params.batch_size
logging.info('Computing embeddings for train/dev/test')
for key in self.task_data:
# Sort to reduce padding
sorted_data = sorted(zip(self.task_data[key]['X'],
self.task_data[key]['y']),
key=lambda z: (len(z[0]), z[1]))
self.task_data[key]['X'], self.task_data[key]['y'] = map(list, zip(*sorted_data))
task_embed[key]['X'] = []
for ii in range(0, len(self.task_data[key]['y']), bsize):
batch = self.task_data[key]['X'][ii:ii + bsize]
embeddings = batcher(params, batch)
task_embed[key]['X'].append(embeddings)
task_embed[key]['X'] = np.vstack(task_embed[key]['X'])
task_embed[key]['y'] = np.array(self.task_data[key]['y'])
logging.info('Computed embeddings')
config_classifier = {'nclasses': self.nclasses, 'seed': self.seed,
'usepytorch': params.usepytorch,
'classifier': params.classifier}
if self.task == "WordContent" and params.classifier['nhid'] > 0:
config_classifier = copy.deepcopy(config_classifier)
config_classifier['classifier']['nhid'] = 0
print(params.classifier['nhid'])
clf = SplitClassifier(X={'train': task_embed['train']['X'],
'valid': task_embed['dev']['X'],
'test': task_embed['test']['X']},
y={'train': task_embed['train']['y'],
'valid': task_embed['dev']['y'],
'test': task_embed['test']['y']},
config=config_classifier)
devacc, testacc = clf.run()
logging.debug('\nDev acc : %.1f Test acc : %.1f for %s classification\n' % (devacc, testacc, self.task.upper()))
return {'devacc': devacc, 'acc': testacc,
'ndev': len(task_embed['dev']['X']),
'ntest': len(task_embed['test']['X'])}
"""
Surface Information
"""
class LengthEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'sentence_length.txt')
# labels: bins
PROBINGEval.__init__(self, 'Length', task_path, seed)
class WordContentEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'word_content.txt')
# labels: 200 target words
PROBINGEval.__init__(self, 'WordContent', task_path, seed)
"""
Latent Structural Information
"""
class DepthEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'tree_depth.txt')
# labels: bins
PROBINGEval.__init__(self, 'Depth', task_path, seed)
class TopConstituentsEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'top_constituents.txt')
# labels: 'PP_NP_VP_.' .. (20 classes)
PROBINGEval.__init__(self, 'TopConstituents', task_path, seed)
class BigramShiftEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'bigram_shift.txt')
# labels: 0 or 1
PROBINGEval.__init__(self, 'BigramShift', task_path, seed)
# TODO: Voice?
"""
Latent Semantic Information
"""
class TenseEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'past_present.txt')
# labels: 'PRES', 'PAST'
PROBINGEval.__init__(self, 'Tense', task_path, seed)
class SubjNumberEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'subj_number.txt')
# labels: 'NN', 'NNS'
PROBINGEval.__init__(self, 'SubjNumber', task_path, seed)
class ObjNumberEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'obj_number.txt')
# labels: 'NN', 'NNS'
PROBINGEval.__init__(self, 'ObjNumber', task_path, seed)
class OddManOutEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'odd_man_out.txt')
# labels: 'O', 'C'
PROBINGEval.__init__(self, 'OddManOut', task_path, seed)
class CoordinationInversionEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'coordination_inversion.txt')
# labels: 'O', 'I'
PROBINGEval.__init__(self, 'CoordinationInversion', task_path, seed)
| 6,786 | 38.459302 | 120 | py |
SimCSE | SimCSE-main/SentEval/senteval/sick.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
SICK Relatedness and Entailment
'''
from __future__ import absolute_import, division, unicode_literals
import os
import io
import logging
import numpy as np
from sklearn.metrics import mean_squared_error
from scipy.stats import pearsonr, spearmanr
from senteval.tools.relatedness import RelatednessPytorch
from senteval.tools.validation import SplitClassifier
class SICKEval(object):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task : SICK-Relatedness*****\n\n')
self.seed = seed
train = self.loadFile(os.path.join(task_path, 'SICK_train.txt'))
dev = self.loadFile(os.path.join(task_path, 'SICK_trial.txt'))
test = self.loadFile(os.path.join(task_path, 'SICK_test_annotated.txt'))
self.sick_data = {'train': train, 'dev': dev, 'test': test}
def do_prepare(self, params, prepare):
samples = self.sick_data['train']['X_A'] + \
self.sick_data['train']['X_B'] + \
self.sick_data['dev']['X_A'] + \
self.sick_data['dev']['X_B'] + \
self.sick_data['test']['X_A'] + self.sick_data['test']['X_B']
return prepare(params, samples)
def loadFile(self, fpath):
skipFirstLine = True
sick_data = {'X_A': [], 'X_B': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
if skipFirstLine:
skipFirstLine = False
else:
text = line.strip().split('\t')
sick_data['X_A'].append(text[1].split())
sick_data['X_B'].append(text[2].split())
sick_data['y'].append(text[3])
sick_data['y'] = [float(s) for s in sick_data['y']]
return sick_data
def run(self, params, batcher):
sick_embed = {'train': {}, 'dev': {}, 'test': {}}
bsize = params.batch_size
for key in self.sick_data:
logging.info('Computing embedding for {0}'.format(key))
# Sort to reduce padding
sorted_corpus = sorted(zip(self.sick_data[key]['X_A'],
self.sick_data[key]['X_B'],
self.sick_data[key]['y']),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
self.sick_data[key]['X_A'] = [x for (x, y, z) in sorted_corpus]
self.sick_data[key]['X_B'] = [y for (x, y, z) in sorted_corpus]
self.sick_data[key]['y'] = [z for (x, y, z) in sorted_corpus]
for txt_type in ['X_A', 'X_B']:
sick_embed[key][txt_type] = []
for ii in range(0, len(self.sick_data[key]['y']), bsize):
batch = self.sick_data[key][txt_type][ii:ii + bsize]
embeddings = batcher(params, batch)
sick_embed[key][txt_type].append(embeddings)
sick_embed[key][txt_type] = np.vstack(sick_embed[key][txt_type])
sick_embed[key]['y'] = np.array(self.sick_data[key]['y'])
logging.info('Computed {0} embeddings'.format(key))
# Train
trainA = sick_embed['train']['X_A']
trainB = sick_embed['train']['X_B']
trainF = np.c_[np.abs(trainA - trainB), trainA * trainB]
trainY = self.encode_labels(self.sick_data['train']['y'])
# Dev
devA = sick_embed['dev']['X_A']
devB = sick_embed['dev']['X_B']
devF = np.c_[np.abs(devA - devB), devA * devB]
devY = self.encode_labels(self.sick_data['dev']['y'])
# Test
testA = sick_embed['test']['X_A']
testB = sick_embed['test']['X_B']
testF = np.c_[np.abs(testA - testB), testA * testB]
testY = self.encode_labels(self.sick_data['test']['y'])
config = {'seed': self.seed, 'nclasses': 5}
clf = RelatednessPytorch(train={'X': trainF, 'y': trainY},
valid={'X': devF, 'y': devY},
test={'X': testF, 'y': testY},
devscores=self.sick_data['dev']['y'],
config=config)
devspr, yhat = clf.run()
pr = pearsonr(yhat, self.sick_data['test']['y'])[0]
sr = spearmanr(yhat, self.sick_data['test']['y'])[0]
pr = 0 if pr != pr else pr
sr = 0 if sr != sr else sr
se = mean_squared_error(yhat, self.sick_data['test']['y'])
logging.debug('Dev : Spearman {0}'.format(devspr))
logging.debug('Test : Pearson {0} Spearman {1} MSE {2} \
for SICK Relatedness\n'.format(pr, sr, se))
return {'devspearman': devspr, 'pearson': pr, 'spearman': sr, 'mse': se,
'yhat': yhat, 'ndev': len(devA), 'ntest': len(testA)}
def encode_labels(self, labels, nclass=5):
"""
Label encoding from Tree LSTM paper (Tai, Socher, Manning)
"""
Y = np.zeros((len(labels), nclass)).astype('float32')
for j, y in enumerate(labels):
for i in range(nclass):
if i+1 == np.floor(y) + 1:
Y[j, i] = y - np.floor(y)
if i+1 == np.floor(y):
Y[j, i] = np.floor(y) - y + 1
return Y
class SICKEntailmentEval(SICKEval):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task : SICK-Entailment*****\n\n')
self.seed = seed
train = self.loadFile(os.path.join(task_path, 'SICK_train.txt'))
dev = self.loadFile(os.path.join(task_path, 'SICK_trial.txt'))
test = self.loadFile(os.path.join(task_path, 'SICK_test_annotated.txt'))
self.sick_data = {'train': train, 'dev': dev, 'test': test}
def loadFile(self, fpath):
label2id = {'CONTRADICTION': 0, 'NEUTRAL': 1, 'ENTAILMENT': 2}
skipFirstLine = True
sick_data = {'X_A': [], 'X_B': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
if skipFirstLine:
skipFirstLine = False
else:
text = line.strip().split('\t')
sick_data['X_A'].append(text[1].split())
sick_data['X_B'].append(text[2].split())
sick_data['y'].append(text[4])
sick_data['y'] = [label2id[s] for s in sick_data['y']]
return sick_data
def run(self, params, batcher):
sick_embed = {'train': {}, 'dev': {}, 'test': {}}
bsize = params.batch_size
for key in self.sick_data:
logging.info('Computing embedding for {0}'.format(key))
# Sort to reduce padding
sorted_corpus = sorted(zip(self.sick_data[key]['X_A'],
self.sick_data[key]['X_B'],
self.sick_data[key]['y']),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
self.sick_data[key]['X_A'] = [x for (x, y, z) in sorted_corpus]
self.sick_data[key]['X_B'] = [y for (x, y, z) in sorted_corpus]
self.sick_data[key]['y'] = [z for (x, y, z) in sorted_corpus]
for txt_type in ['X_A', 'X_B']:
sick_embed[key][txt_type] = []
for ii in range(0, len(self.sick_data[key]['y']), bsize):
batch = self.sick_data[key][txt_type][ii:ii + bsize]
embeddings = batcher(params, batch)
sick_embed[key][txt_type].append(embeddings)
sick_embed[key][txt_type] = np.vstack(sick_embed[key][txt_type])
logging.info('Computed {0} embeddings'.format(key))
# Train
trainA = sick_embed['train']['X_A']
trainB = sick_embed['train']['X_B']
trainF = np.c_[np.abs(trainA - trainB), trainA * trainB]
trainY = np.array(self.sick_data['train']['y'])
# Dev
devA = sick_embed['dev']['X_A']
devB = sick_embed['dev']['X_B']
devF = np.c_[np.abs(devA - devB), devA * devB]
devY = np.array(self.sick_data['dev']['y'])
# Test
testA = sick_embed['test']['X_A']
testB = sick_embed['test']['X_B']
testF = np.c_[np.abs(testA - testB), testA * testB]
testY = np.array(self.sick_data['test']['y'])
config = {'nclasses': 3, 'seed': self.seed,
'usepytorch': params.usepytorch,
'classifier': params.classifier,
'nhid': params.nhid}
clf = SplitClassifier(X={'train': trainF, 'valid': devF, 'test': testF},
y={'train': trainY, 'valid': devY, 'test': testY},
config=config)
devacc, testacc = clf.run()
logging.debug('\nDev acc : {0} Test acc : {1} for \
SICK entailment\n'.format(devacc, testacc))
return {'devacc': devacc, 'acc': testacc,
'ndev': len(devA), 'ntest': len(testA)}
| 9,243 | 41.599078 | 80 | py |
SimCSE | SimCSE-main/SentEval/senteval/trec.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
TREC question-type classification
'''
from __future__ import absolute_import, division, unicode_literals
import os
import io
import logging
import numpy as np
from senteval.tools.validation import KFoldClassifier
class TRECEval(object):
def __init__(self, task_path, seed=1111):
logging.info('***** Transfer task : TREC *****\n\n')
self.seed = seed
self.train = self.loadFile(os.path.join(task_path, 'train_5500.label'))
self.test = self.loadFile(os.path.join(task_path, 'TREC_10.label'))
def do_prepare(self, params, prepare):
samples = self.train['X'] + self.test['X']
return prepare(params, samples)
def loadFile(self, fpath):
trec_data = {'X': [], 'y': []}
tgt2idx = {'ABBR': 0, 'DESC': 1, 'ENTY': 2,
'HUM': 3, 'LOC': 4, 'NUM': 5}
with io.open(fpath, 'r', encoding='latin-1') as f:
for line in f:
target, sample = line.strip().split(':', 1)
sample = sample.split(' ', 1)[1].split()
assert target in tgt2idx, target
trec_data['X'].append(sample)
trec_data['y'].append(tgt2idx[target])
return trec_data
def run(self, params, batcher):
train_embeddings, test_embeddings = [], []
# Sort to reduce padding
sorted_corpus_train = sorted(zip(self.train['X'], self.train['y']),
key=lambda z: (len(z[0]), z[1]))
train_samples = [x for (x, y) in sorted_corpus_train]
train_labels = [y for (x, y) in sorted_corpus_train]
sorted_corpus_test = sorted(zip(self.test['X'], self.test['y']),
key=lambda z: (len(z[0]), z[1]))
test_samples = [x for (x, y) in sorted_corpus_test]
test_labels = [y for (x, y) in sorted_corpus_test]
# Get train embeddings
for ii in range(0, len(train_labels), params.batch_size):
batch = train_samples[ii:ii + params.batch_size]
embeddings = batcher(params, batch)
train_embeddings.append(embeddings)
train_embeddings = np.vstack(train_embeddings)
logging.info('Computed train embeddings')
# Get test embeddings
for ii in range(0, len(test_labels), params.batch_size):
batch = test_samples[ii:ii + params.batch_size]
embeddings = batcher(params, batch)
test_embeddings.append(embeddings)
test_embeddings = np.vstack(test_embeddings)
logging.info('Computed test embeddings')
config_classifier = {'nclasses': 6, 'seed': self.seed,
'usepytorch': params.usepytorch,
'classifier': params.classifier,
'kfold': params.kfold}
clf = KFoldClassifier({'X': train_embeddings,
'y': np.array(train_labels)},
{'X': test_embeddings,
'y': np.array(test_labels)},
config_classifier)
devacc, testacc, _ = clf.run()
logging.debug('\nDev acc : {0} Test acc : {1} \
for TREC\n'.format(devacc, testacc))
return {'devacc': devacc, 'acc': testacc,
'ndev': len(self.train['X']), 'ntest': len(self.test['X'])}
| 3,565 | 38.622222 | 79 | py |
SimCSE | SimCSE-main/SentEval/senteval/sst.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
SST - binary classification
'''
from __future__ import absolute_import, division, unicode_literals
import os
import io
import logging
import numpy as np
from senteval.tools.validation import SplitClassifier
class SSTEval(object):
def __init__(self, task_path, nclasses=2, seed=1111):
self.seed = seed
# binary of fine-grained
assert nclasses in [2, 5]
self.nclasses = nclasses
self.task_name = 'Binary' if self.nclasses == 2 else 'Fine-Grained'
logging.debug('***** Transfer task : SST %s classification *****\n\n', self.task_name)
train = self.loadFile(os.path.join(task_path, 'sentiment-train'))
dev = self.loadFile(os.path.join(task_path, 'sentiment-dev'))
test = self.loadFile(os.path.join(task_path, 'sentiment-test'))
self.sst_data = {'train': train, 'dev': dev, 'test': test}
def do_prepare(self, params, prepare):
samples = self.sst_data['train']['X'] + self.sst_data['dev']['X'] + \
self.sst_data['test']['X']
return prepare(params, samples)
def loadFile(self, fpath):
sst_data = {'X': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
if self.nclasses == 2:
sample = line.strip().split('\t')
sst_data['y'].append(int(sample[1]))
sst_data['X'].append(sample[0].split())
elif self.nclasses == 5:
sample = line.strip().split(' ', 1)
sst_data['y'].append(int(sample[0]))
sst_data['X'].append(sample[1].split())
assert max(sst_data['y']) == self.nclasses - 1
return sst_data
def run(self, params, batcher):
sst_embed = {'train': {}, 'dev': {}, 'test': {}}
bsize = params.batch_size
for key in self.sst_data:
logging.info('Computing embedding for {0}'.format(key))
# Sort to reduce padding
sorted_data = sorted(zip(self.sst_data[key]['X'],
self.sst_data[key]['y']),
key=lambda z: (len(z[0]), z[1]))
self.sst_data[key]['X'], self.sst_data[key]['y'] = map(list, zip(*sorted_data))
sst_embed[key]['X'] = []
for ii in range(0, len(self.sst_data[key]['y']), bsize):
batch = self.sst_data[key]['X'][ii:ii + bsize]
embeddings = batcher(params, batch)
sst_embed[key]['X'].append(embeddings)
sst_embed[key]['X'] = np.vstack(sst_embed[key]['X'])
sst_embed[key]['y'] = np.array(self.sst_data[key]['y'])
logging.info('Computed {0} embeddings'.format(key))
config_classifier = {'nclasses': self.nclasses, 'seed': self.seed,
'usepytorch': params.usepytorch,
'classifier': params.classifier}
clf = SplitClassifier(X={'train': sst_embed['train']['X'],
'valid': sst_embed['dev']['X'],
'test': sst_embed['test']['X']},
y={'train': sst_embed['train']['y'],
'valid': sst_embed['dev']['y'],
'test': sst_embed['test']['y']},
config=config_classifier)
devacc, testacc = clf.run()
logging.debug('\nDev acc : {0} Test acc : {1} for \
SST {2} classification\n'.format(devacc, testacc, self.task_name))
return {'devacc': devacc, 'acc': testacc,
'ndev': len(sst_embed['dev']['X']),
'ntest': len(sst_embed['test']['X'])}
| 3,946 | 39.690722 | 94 | py |
SimCSE | SimCSE-main/SentEval/senteval/tools/relatedness.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Semantic Relatedness (supervised) with Pytorch
"""
from __future__ import absolute_import, division, unicode_literals
import copy
import numpy as np
import torch
from torch import nn
import torch.optim as optim
from scipy.stats import pearsonr, spearmanr
class RelatednessPytorch(object):
# Can be used for SICK-Relatedness, and STS14
def __init__(self, train, valid, test, devscores, config):
# fix seed
np.random.seed(config['seed'])
torch.manual_seed(config['seed'])
assert torch.cuda.is_available(), 'torch.cuda required for Relatedness'
torch.cuda.manual_seed(config['seed'])
self.train = train
self.valid = valid
self.test = test
self.devscores = devscores
self.inputdim = train['X'].shape[1]
self.nclasses = config['nclasses']
self.seed = config['seed']
self.l2reg = 0.
self.batch_size = 64
self.maxepoch = 1000
self.early_stop = True
self.model = nn.Sequential(
nn.Linear(self.inputdim, self.nclasses),
nn.Softmax(dim=-1),
)
self.loss_fn = nn.MSELoss()
if torch.cuda.is_available():
self.model = self.model.cuda()
self.loss_fn = self.loss_fn.cuda()
self.loss_fn.size_average = False
self.optimizer = optim.Adam(self.model.parameters(),
weight_decay=self.l2reg)
def prepare_data(self, trainX, trainy, devX, devy, testX, testy):
# Transform probs to log-probs for KL-divergence
trainX = torch.from_numpy(trainX).float().cuda()
trainy = torch.from_numpy(trainy).float().cuda()
devX = torch.from_numpy(devX).float().cuda()
devy = torch.from_numpy(devy).float().cuda()
testX = torch.from_numpy(testX).float().cuda()
testY = torch.from_numpy(testy).float().cuda()
return trainX, trainy, devX, devy, testX, testy
def run(self):
self.nepoch = 0
bestpr = -1
early_stop_count = 0
r = np.arange(1, 6)
stop_train = False
# Preparing data
trainX, trainy, devX, devy, testX, testy = self.prepare_data(
self.train['X'], self.train['y'],
self.valid['X'], self.valid['y'],
self.test['X'], self.test['y'])
# Training
while not stop_train and self.nepoch <= self.maxepoch:
self.trainepoch(trainX, trainy, nepoches=50)
yhat = np.dot(self.predict_proba(devX), r)
pr = spearmanr(yhat, self.devscores)[0]
pr = 0 if pr != pr else pr # if NaN bc std=0
# early stop on Pearson
if pr > bestpr:
bestpr = pr
bestmodel = copy.deepcopy(self.model)
elif self.early_stop:
if early_stop_count >= 3:
stop_train = True
early_stop_count += 1
self.model = bestmodel
yhat = np.dot(self.predict_proba(testX), r)
return bestpr, yhat
def trainepoch(self, X, y, nepoches=1):
self.model.train()
for _ in range(self.nepoch, self.nepoch + nepoches):
permutation = np.random.permutation(len(X))
all_costs = []
for i in range(0, len(X), self.batch_size):
# forward
idx = torch.from_numpy(permutation[i:i + self.batch_size]).long().cuda()
Xbatch = X[idx]
ybatch = y[idx]
output = self.model(Xbatch)
# loss
loss = self.loss_fn(output, ybatch)
all_costs.append(loss.item())
# backward
self.optimizer.zero_grad()
loss.backward()
# Update parameters
self.optimizer.step()
self.nepoch += nepoches
def predict_proba(self, devX):
self.model.eval()
probas = []
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:i + self.batch_size]
if len(probas) == 0:
probas = self.model(Xbatch).data.cpu().numpy()
else:
probas = np.concatenate((probas, self.model(Xbatch).data.cpu().numpy()), axis=0)
return probas
| 4,552 | 32.725926 | 100 | py |
SimCSE | SimCSE-main/SentEval/senteval/tools/validation.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Validation and classification
(train) : inner-kfold classifier
(train, test) : kfold classifier
(train, dev, test) : split classifier
"""
from __future__ import absolute_import, division, unicode_literals
import logging
import numpy as np
from senteval.tools.classifier import MLP
import sklearn
assert(sklearn.__version__ >= "0.18.0"), \
"need to update sklearn to version >= 0.18.0"
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
def get_classif_name(classifier_config, usepytorch):
if not usepytorch:
modelname = 'sklearn-LogReg'
else:
nhid = classifier_config['nhid']
optim = 'adam' if 'optim' not in classifier_config else classifier_config['optim']
bs = 64 if 'batch_size' not in classifier_config else classifier_config['batch_size']
modelname = 'pytorch-MLP-nhid%s-%s-bs%s' % (nhid, optim, bs)
return modelname
# Pytorch version
class InnerKFoldClassifier(object):
"""
(train) split classifier : InnerKfold.
"""
def __init__(self, X, y, config):
self.X = X
self.y = y
self.featdim = X.shape[1]
self.nclasses = config['nclasses']
self.seed = config['seed']
self.devresults = []
self.testresults = []
self.usepytorch = config['usepytorch']
self.classifier_config = config['classifier']
self.modelname = get_classif_name(self.classifier_config, self.usepytorch)
self.k = 5 if 'kfold' not in config else config['kfold']
def run(self):
logging.info('Training {0} with (inner) {1}-fold cross-validation'
.format(self.modelname, self.k))
regs = [10**t for t in range(-5, -1)] if self.usepytorch else \
[2**t for t in range(-2, 4, 1)]
skf = StratifiedKFold(n_splits=self.k, shuffle=True, random_state=1111)
innerskf = StratifiedKFold(n_splits=self.k, shuffle=True,
random_state=1111)
count = 0
for train_idx, test_idx in skf.split(self.X, self.y):
count += 1
X_train, X_test = self.X[train_idx], self.X[test_idx]
y_train, y_test = self.y[train_idx], self.y[test_idx]
scores = []
for reg in regs:
regscores = []
for inner_train_idx, inner_test_idx in innerskf.split(X_train, y_train):
X_in_train, X_in_test = X_train[inner_train_idx], X_train[inner_test_idx]
y_in_train, y_in_test = y_train[inner_train_idx], y_train[inner_test_idx]
if self.usepytorch:
clf = MLP(self.classifier_config, inputdim=self.featdim,
nclasses=self.nclasses, l2reg=reg,
seed=self.seed)
clf.fit(X_in_train, y_in_train,
validation_data=(X_in_test, y_in_test))
else:
clf = LogisticRegression(C=reg, random_state=self.seed)
clf.fit(X_in_train, y_in_train)
regscores.append(clf.score(X_in_test, y_in_test))
scores.append(round(100*np.mean(regscores), 2))
optreg = regs[np.argmax(scores)]
logging.info('Best param found at split {0}: l2reg = {1} \
with score {2}'.format(count, optreg, np.max(scores)))
self.devresults.append(np.max(scores))
if self.usepytorch:
clf = MLP(self.classifier_config, inputdim=self.featdim,
nclasses=self.nclasses, l2reg=optreg,
seed=self.seed)
clf.fit(X_train, y_train, validation_split=0.05)
else:
clf = LogisticRegression(C=optreg, random_state=self.seed)
clf.fit(X_train, y_train)
self.testresults.append(round(100*clf.score(X_test, y_test), 2))
devaccuracy = round(np.mean(self.devresults), 2)
testaccuracy = round(np.mean(self.testresults), 2)
return devaccuracy, testaccuracy
class KFoldClassifier(object):
"""
(train, test) split classifier : cross-validation on train.
"""
def __init__(self, train, test, config):
self.train = train
self.test = test
self.featdim = self.train['X'].shape[1]
self.nclasses = config['nclasses']
self.seed = config['seed']
self.usepytorch = config['usepytorch']
self.classifier_config = config['classifier']
self.modelname = get_classif_name(self.classifier_config, self.usepytorch)
self.k = 5 if 'kfold' not in config else config['kfold']
def run(self):
# cross-validation
logging.info('Training {0} with {1}-fold cross-validation'
.format(self.modelname, self.k))
regs = [10**t for t in range(-5, -1)] if self.usepytorch else \
[2**t for t in range(-1, 6, 1)]
skf = StratifiedKFold(n_splits=self.k, shuffle=True,
random_state=self.seed)
scores = []
for reg in regs:
scanscores = []
for train_idx, test_idx in skf.split(self.train['X'],
self.train['y']):
# Split data
X_train, y_train = self.train['X'][train_idx], self.train['y'][train_idx]
X_test, y_test = self.train['X'][test_idx], self.train['y'][test_idx]
# Train classifier
if self.usepytorch:
clf = MLP(self.classifier_config, inputdim=self.featdim,
nclasses=self.nclasses, l2reg=reg,
seed=self.seed)
clf.fit(X_train, y_train, validation_data=(X_test, y_test))
else:
clf = LogisticRegression(C=reg, random_state=self.seed)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
scanscores.append(score)
# Append mean score
scores.append(round(100*np.mean(scanscores), 2))
# evaluation
logging.info([('reg:' + str(regs[idx]), scores[idx])
for idx in range(len(scores))])
optreg = regs[np.argmax(scores)]
devaccuracy = np.max(scores)
logging.info('Cross-validation : best param found is reg = {0} \
with score {1}'.format(optreg, devaccuracy))
logging.info('Evaluating...')
if self.usepytorch:
clf = MLP(self.classifier_config, inputdim=self.featdim,
nclasses=self.nclasses, l2reg=optreg,
seed=self.seed)
clf.fit(self.train['X'], self.train['y'], validation_split=0.05)
else:
clf = LogisticRegression(C=optreg, random_state=self.seed)
clf.fit(self.train['X'], self.train['y'])
yhat = clf.predict(self.test['X'])
testaccuracy = clf.score(self.test['X'], self.test['y'])
testaccuracy = round(100*testaccuracy, 2)
return devaccuracy, testaccuracy, yhat
class SplitClassifier(object):
"""
(train, valid, test) split classifier.
"""
def __init__(self, X, y, config):
self.X = X
self.y = y
self.nclasses = config['nclasses']
self.featdim = self.X['train'].shape[1]
self.seed = config['seed']
self.usepytorch = config['usepytorch']
self.classifier_config = config['classifier']
self.cudaEfficient = False if 'cudaEfficient' not in config else \
config['cudaEfficient']
self.modelname = get_classif_name(self.classifier_config, self.usepytorch)
self.noreg = False if 'noreg' not in config else config['noreg']
self.config = config
def run(self):
logging.info('Training {0} with standard validation..'
.format(self.modelname))
regs = [10**t for t in range(-5, -1)] if self.usepytorch else \
[2**t for t in range(-2, 4, 1)]
if self.noreg:
regs = [1e-9 if self.usepytorch else 1e9]
scores = []
for reg in regs:
if self.usepytorch:
clf = MLP(self.classifier_config, inputdim=self.featdim,
nclasses=self.nclasses, l2reg=reg,
seed=self.seed, cudaEfficient=self.cudaEfficient)
# TODO: Find a hack for reducing nb epoches in SNLI
clf.fit(self.X['train'], self.y['train'],
validation_data=(self.X['valid'], self.y['valid']))
else:
clf = LogisticRegression(C=reg, random_state=self.seed)
clf.fit(self.X['train'], self.y['train'])
scores.append(round(100*clf.score(self.X['valid'],
self.y['valid']), 2))
logging.info([('reg:'+str(regs[idx]), scores[idx])
for idx in range(len(scores))])
optreg = regs[np.argmax(scores)]
devaccuracy = np.max(scores)
logging.info('Validation : best param found is reg = {0} with score \
{1}'.format(optreg, devaccuracy))
clf = LogisticRegression(C=optreg, random_state=self.seed)
logging.info('Evaluating...')
if self.usepytorch:
clf = MLP(self.classifier_config, inputdim=self.featdim,
nclasses=self.nclasses, l2reg=optreg,
seed=self.seed, cudaEfficient=self.cudaEfficient)
# TODO: Find a hack for reducing nb epoches in SNLI
clf.fit(self.X['train'], self.y['train'],
validation_data=(self.X['valid'], self.y['valid']))
else:
clf = LogisticRegression(C=optreg, random_state=self.seed)
clf.fit(self.X['train'], self.y['train'])
testaccuracy = clf.score(self.X['test'], self.y['test'])
testaccuracy = round(100*testaccuracy, 2)
return devaccuracy, testaccuracy
| 10,358 | 40.939271 | 93 | py |
SimCSE | SimCSE-main/SentEval/senteval/tools/classifier.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Pytorch Classifier class in the style of scikit-learn
Classifiers include Logistic Regression and MLP
"""
from __future__ import absolute_import, division, unicode_literals
import numpy as np
import copy
from senteval import utils
import torch
from torch import nn
import torch.nn.functional as F
class PyTorchClassifier(object):
def __init__(self, inputdim, nclasses, l2reg=0., batch_size=64, seed=1111,
cudaEfficient=False):
# fix seed
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
self.inputdim = inputdim
self.nclasses = nclasses
self.l2reg = l2reg
self.batch_size = batch_size
self.cudaEfficient = cudaEfficient
def prepare_split(self, X, y, validation_data=None, validation_split=None):
# Preparing validation data
assert validation_split or validation_data
if validation_data is not None:
trainX, trainy = X, y
devX, devy = validation_data
else:
permutation = np.random.permutation(len(X))
trainidx = permutation[int(validation_split * len(X)):]
devidx = permutation[0:int(validation_split * len(X))]
trainX, trainy = X[trainidx], y[trainidx]
devX, devy = X[devidx], y[devidx]
device = torch.device('cpu') if self.cudaEfficient else torch.device('cuda')
trainX = torch.from_numpy(trainX).to(device, dtype=torch.float32)
trainy = torch.from_numpy(trainy).to(device, dtype=torch.int64)
devX = torch.from_numpy(devX).to(device, dtype=torch.float32)
devy = torch.from_numpy(devy).to(device, dtype=torch.int64)
return trainX, trainy, devX, devy
def fit(self, X, y, validation_data=None, validation_split=None,
early_stop=True):
self.nepoch = 0
bestaccuracy = -1
stop_train = False
early_stop_count = 0
# Preparing validation data
trainX, trainy, devX, devy = self.prepare_split(X, y, validation_data,
validation_split)
# Training
while not stop_train and self.nepoch <= self.max_epoch:
self.trainepoch(trainX, trainy, epoch_size=self.epoch_size)
accuracy = self.score(devX, devy)
if accuracy > bestaccuracy:
bestaccuracy = accuracy
bestmodel = copy.deepcopy(self.model)
elif early_stop:
if early_stop_count >= self.tenacity:
stop_train = True
early_stop_count += 1
self.model = bestmodel
return bestaccuracy
def trainepoch(self, X, y, epoch_size=1):
self.model.train()
for _ in range(self.nepoch, self.nepoch + epoch_size):
permutation = np.random.permutation(len(X))
all_costs = []
for i in range(0, len(X), self.batch_size):
# forward
idx = torch.from_numpy(permutation[i:i + self.batch_size]).long().to(X.device)
Xbatch = X[idx]
ybatch = y[idx]
if self.cudaEfficient:
Xbatch = Xbatch.cuda()
ybatch = ybatch.cuda()
output = self.model(Xbatch)
# loss
loss = self.loss_fn(output, ybatch)
all_costs.append(loss.data.item())
# backward
self.optimizer.zero_grad()
loss.backward()
# Update parameters
self.optimizer.step()
self.nepoch += epoch_size
def score(self, devX, devy):
self.model.eval()
correct = 0
if not isinstance(devX, torch.cuda.FloatTensor) or self.cudaEfficient:
devX = torch.FloatTensor(devX).cuda()
devy = torch.LongTensor(devy).cuda()
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:i + self.batch_size]
ybatch = devy[i:i + self.batch_size]
if self.cudaEfficient:
Xbatch = Xbatch.cuda()
ybatch = ybatch.cuda()
output = self.model(Xbatch)
pred = output.data.max(1)[1]
correct += pred.long().eq(ybatch.data.long()).sum().item()
accuracy = 1.0 * correct / len(devX)
return accuracy
def predict(self, devX):
self.model.eval()
if not isinstance(devX, torch.cuda.FloatTensor):
devX = torch.FloatTensor(devX).cuda()
yhat = np.array([])
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:i + self.batch_size]
output = self.model(Xbatch)
yhat = np.append(yhat,
output.data.max(1)[1].cpu().numpy())
yhat = np.vstack(yhat)
return yhat
def predict_proba(self, devX):
self.model.eval()
probas = []
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:i + self.batch_size]
vals = F.softmax(self.model(Xbatch).data.cpu().numpy())
if not probas:
probas = vals
else:
probas = np.concatenate(probas, vals, axis=0)
return probas
"""
MLP with Pytorch (nhid=0 --> Logistic Regression)
"""
class MLP(PyTorchClassifier):
def __init__(self, params, inputdim, nclasses, l2reg=0., batch_size=64,
seed=1111, cudaEfficient=False):
super(self.__class__, self).__init__(inputdim, nclasses, l2reg,
batch_size, seed, cudaEfficient)
"""
PARAMETERS:
-nhid: number of hidden units (0: Logistic Regression)
-optim: optimizer ("sgd,lr=0.1", "adam", "rmsprop" ..)
-tenacity: how many times dev acc does not increase before stopping
-epoch_size: each epoch corresponds to epoch_size pass on the train set
-max_epoch: max number of epoches
-dropout: dropout for MLP
"""
self.nhid = 0 if "nhid" not in params else params["nhid"]
self.optim = "adam" if "optim" not in params else params["optim"]
self.tenacity = 5 if "tenacity" not in params else params["tenacity"]
self.epoch_size = 4 if "epoch_size" not in params else params["epoch_size"]
self.max_epoch = 200 if "max_epoch" not in params else params["max_epoch"]
self.dropout = 0. if "dropout" not in params else params["dropout"]
self.batch_size = 64 if "batch_size" not in params else params["batch_size"]
if params["nhid"] == 0:
self.model = nn.Sequential(
nn.Linear(self.inputdim, self.nclasses),
).cuda()
else:
self.model = nn.Sequential(
nn.Linear(self.inputdim, params["nhid"]),
nn.Dropout(p=self.dropout),
nn.Sigmoid(),
nn.Linear(params["nhid"], self.nclasses),
).cuda()
self.loss_fn = nn.CrossEntropyLoss().cuda()
self.loss_fn.size_average = False
optim_fn, optim_params = utils.get_optimizer(self.optim)
self.optimizer = optim_fn(self.model.parameters(), **optim_params)
self.optimizer.param_groups[0]['weight_decay'] = self.l2reg
| 7,737 | 37.118227 | 94 | py |
SimCSE | SimCSE-main/SentEval/senteval/tools/ranking.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Image Annotation/Search for COCO with Pytorch
"""
from __future__ import absolute_import, division, unicode_literals
import logging
import copy
import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
import torch.optim as optim
class COCOProjNet(nn.Module):
def __init__(self, config):
super(COCOProjNet, self).__init__()
self.imgdim = config['imgdim']
self.sentdim = config['sentdim']
self.projdim = config['projdim']
self.imgproj = nn.Sequential(
nn.Linear(self.imgdim, self.projdim),
)
self.sentproj = nn.Sequential(
nn.Linear(self.sentdim, self.projdim),
)
def forward(self, img, sent, imgc, sentc):
# imgc : (bsize, ncontrast, imgdim)
# sentc : (bsize, ncontrast, sentdim)
# img : (bsize, imgdim)
# sent : (bsize, sentdim)
img = img.unsqueeze(1).expand_as(imgc).contiguous()
img = img.view(-1, self.imgdim)
imgc = imgc.view(-1, self.imgdim)
sent = sent.unsqueeze(1).expand_as(sentc).contiguous()
sent = sent.view(-1, self.sentdim)
sentc = sentc.view(-1, self.sentdim)
imgproj = self.imgproj(img)
imgproj = imgproj / torch.sqrt(torch.pow(imgproj, 2).sum(1, keepdim=True)).expand_as(imgproj)
imgcproj = self.imgproj(imgc)
imgcproj = imgcproj / torch.sqrt(torch.pow(imgcproj, 2).sum(1, keepdim=True)).expand_as(imgcproj)
sentproj = self.sentproj(sent)
sentproj = sentproj / torch.sqrt(torch.pow(sentproj, 2).sum(1, keepdim=True)).expand_as(sentproj)
sentcproj = self.sentproj(sentc)
sentcproj = sentcproj / torch.sqrt(torch.pow(sentcproj, 2).sum(1, keepdim=True)).expand_as(sentcproj)
# (bsize*ncontrast, projdim)
anchor1 = torch.sum((imgproj*sentproj), 1)
anchor2 = torch.sum((sentproj*imgproj), 1)
img_sentc = torch.sum((imgproj*sentcproj), 1)
sent_imgc = torch.sum((sentproj*imgcproj), 1)
# (bsize*ncontrast)
return anchor1, anchor2, img_sentc, sent_imgc
def proj_sentence(self, sent):
output = self.sentproj(sent)
output = output / torch.sqrt(torch.pow(output, 2).sum(1, keepdim=True)).expand_as(output)
return output # (bsize, projdim)
def proj_image(self, img):
output = self.imgproj(img)
output = output / torch.sqrt(torch.pow(output, 2).sum(1, keepdim=True)).expand_as(output)
return output # (bsize, projdim)
class PairwiseRankingLoss(nn.Module):
"""
Pairwise ranking loss
"""
def __init__(self, margin):
super(PairwiseRankingLoss, self).__init__()
self.margin = margin
def forward(self, anchor1, anchor2, img_sentc, sent_imgc):
cost_sent = torch.clamp(self.margin - anchor1 + img_sentc,
min=0.0).sum()
cost_img = torch.clamp(self.margin - anchor2 + sent_imgc,
min=0.0).sum()
loss = cost_sent + cost_img
return loss
class ImageSentenceRankingPytorch(object):
# Image Sentence Ranking on COCO with Pytorch
def __init__(self, train, valid, test, config):
# fix seed
self.seed = config['seed']
np.random.seed(self.seed)
torch.manual_seed(self.seed)
torch.cuda.manual_seed(self.seed)
self.train = train
self.valid = valid
self.test = test
self.imgdim = len(train['imgfeat'][0])
self.sentdim = len(train['sentfeat'][0])
self.projdim = config['projdim']
self.margin = config['margin']
self.batch_size = 128
self.ncontrast = 30
self.maxepoch = 20
self.early_stop = True
config_model = {'imgdim': self.imgdim,'sentdim': self.sentdim,
'projdim': self.projdim}
self.model = COCOProjNet(config_model).cuda()
self.loss_fn = PairwiseRankingLoss(margin=self.margin).cuda()
self.optimizer = optim.Adam(self.model.parameters())
def prepare_data(self, trainTxt, trainImg, devTxt, devImg,
testTxt, testImg):
trainTxt = torch.FloatTensor(trainTxt)
trainImg = torch.FloatTensor(trainImg)
devTxt = torch.FloatTensor(devTxt).cuda()
devImg = torch.FloatTensor(devImg).cuda()
testTxt = torch.FloatTensor(testTxt).cuda()
testImg = torch.FloatTensor(testImg).cuda()
return trainTxt, trainImg, devTxt, devImg, testTxt, testImg
def run(self):
self.nepoch = 0
bestdevscore = -1
early_stop_count = 0
stop_train = False
# Preparing data
logging.info('prepare data')
trainTxt, trainImg, devTxt, devImg, testTxt, testImg = \
self.prepare_data(self.train['sentfeat'], self.train['imgfeat'],
self.valid['sentfeat'], self.valid['imgfeat'],
self.test['sentfeat'], self.test['imgfeat'])
# Training
while not stop_train and self.nepoch <= self.maxepoch:
logging.info('start epoch')
self.trainepoch(trainTxt, trainImg, devTxt, devImg, nepoches=1)
logging.info('Epoch {0} finished'.format(self.nepoch))
results = {'i2t': {'r1': 0, 'r5': 0, 'r10': 0, 'medr': 0},
't2i': {'r1': 0, 'r5': 0, 'r10': 0, 'medr': 0},
'dev': bestdevscore}
score = 0
for i in range(5):
devTxt_i = devTxt[i*5000:(i+1)*5000]
devImg_i = devImg[i*5000:(i+1)*5000]
# Compute dev ranks img2txt
r1_i2t, r5_i2t, r10_i2t, medr_i2t = self.i2t(devImg_i,
devTxt_i)
results['i2t']['r1'] += r1_i2t / 5
results['i2t']['r5'] += r5_i2t / 5
results['i2t']['r10'] += r10_i2t / 5
results['i2t']['medr'] += medr_i2t / 5
logging.info("Image to text: {0}, {1}, {2}, {3}"
.format(r1_i2t, r5_i2t, r10_i2t, medr_i2t))
# Compute dev ranks txt2img
r1_t2i, r5_t2i, r10_t2i, medr_t2i = self.t2i(devImg_i,
devTxt_i)
results['t2i']['r1'] += r1_t2i / 5
results['t2i']['r5'] += r5_t2i / 5
results['t2i']['r10'] += r10_t2i / 5
results['t2i']['medr'] += medr_t2i / 5
logging.info("Text to Image: {0}, {1}, {2}, {3}"
.format(r1_t2i, r5_t2i, r10_t2i, medr_t2i))
score += (r1_i2t + r5_i2t + r10_i2t +
r1_t2i + r5_t2i + r10_t2i) / 5
logging.info("Dev mean Text to Image: {0}, {1}, {2}, {3}".format(
results['t2i']['r1'], results['t2i']['r5'],
results['t2i']['r10'], results['t2i']['medr']))
logging.info("Dev mean Image to text: {0}, {1}, {2}, {3}".format(
results['i2t']['r1'], results['i2t']['r5'],
results['i2t']['r10'], results['i2t']['medr']))
# early stop on Pearson
if score > bestdevscore:
bestdevscore = score
bestmodel = copy.deepcopy(self.model)
elif self.early_stop:
if early_stop_count >= 3:
stop_train = True
early_stop_count += 1
self.model = bestmodel
# Compute test for the 5 splits
results = {'i2t': {'r1': 0, 'r5': 0, 'r10': 0, 'medr': 0},
't2i': {'r1': 0, 'r5': 0, 'r10': 0, 'medr': 0},
'dev': bestdevscore}
for i in range(5):
testTxt_i = testTxt[i*5000:(i+1)*5000]
testImg_i = testImg[i*5000:(i+1)*5000]
# Compute test ranks img2txt
r1_i2t, r5_i2t, r10_i2t, medr_i2t = self.i2t(testImg_i, testTxt_i)
results['i2t']['r1'] += r1_i2t / 5
results['i2t']['r5'] += r5_i2t / 5
results['i2t']['r10'] += r10_i2t / 5
results['i2t']['medr'] += medr_i2t / 5
# Compute test ranks txt2img
r1_t2i, r5_t2i, r10_t2i, medr_t2i = self.t2i(testImg_i, testTxt_i)
results['t2i']['r1'] += r1_t2i / 5
results['t2i']['r5'] += r5_t2i / 5
results['t2i']['r10'] += r10_t2i / 5
results['t2i']['medr'] += medr_t2i / 5
return bestdevscore, results['i2t']['r1'], results['i2t']['r5'], \
results['i2t']['r10'], results['i2t']['medr'], \
results['t2i']['r1'], results['t2i']['r5'], \
results['t2i']['r10'], results['t2i']['medr']
def trainepoch(self, trainTxt, trainImg, devTxt, devImg, nepoches=1):
self.model.train()
for _ in range(self.nepoch, self.nepoch + nepoches):
permutation = list(np.random.permutation(len(trainTxt)))
all_costs = []
for i in range(0, len(trainTxt), self.batch_size):
# forward
if i % (self.batch_size*500) == 0 and i > 0:
logging.info('samples : {0}'.format(i))
r1_i2t, r5_i2t, r10_i2t, medr_i2t = self.i2t(devImg,
devTxt)
logging.info("Image to text: {0}, {1}, {2}, {3}".format(
r1_i2t, r5_i2t, r10_i2t, medr_i2t))
# Compute test ranks txt2img
r1_t2i, r5_t2i, r10_t2i, medr_t2i = self.t2i(devImg,
devTxt)
logging.info("Text to Image: {0}, {1}, {2}, {3}".format(
r1_t2i, r5_t2i, r10_t2i, medr_t2i))
idx = torch.LongTensor(permutation[i:i + self.batch_size])
imgbatch = Variable(trainImg.index_select(0, idx)).cuda()
sentbatch = Variable(trainTxt.index_select(0, idx)).cuda()
idximgc = np.random.choice(permutation[:i] +
permutation[i + self.batch_size:],
self.ncontrast*idx.size(0))
idxsentc = np.random.choice(permutation[:i] +
permutation[i + self.batch_size:],
self.ncontrast*idx.size(0))
idximgc = torch.LongTensor(idximgc)
idxsentc = torch.LongTensor(idxsentc)
# Get indexes for contrastive images and sentences
imgcbatch = Variable(trainImg.index_select(0, idximgc)).view(
-1, self.ncontrast, self.imgdim).cuda()
sentcbatch = Variable(trainTxt.index_select(0, idxsentc)).view(
-1, self.ncontrast, self.sentdim).cuda()
anchor1, anchor2, img_sentc, sent_imgc = self.model(
imgbatch, sentbatch, imgcbatch, sentcbatch)
# loss
loss = self.loss_fn(anchor1, anchor2, img_sentc, sent_imgc)
all_costs.append(loss.data.item())
# backward
self.optimizer.zero_grad()
loss.backward()
# Update parameters
self.optimizer.step()
self.nepoch += nepoches
def t2i(self, images, captions):
"""
Images: (5N, imgdim) matrix of images
Captions: (5N, sentdim) matrix of captions
"""
with torch.no_grad():
# Project images and captions
img_embed, sent_embed = [], []
for i in range(0, len(images), self.batch_size):
img_embed.append(self.model.proj_image(
Variable(images[i:i + self.batch_size])))
sent_embed.append(self.model.proj_sentence(
Variable(captions[i:i + self.batch_size])))
img_embed = torch.cat(img_embed, 0).data
sent_embed = torch.cat(sent_embed, 0).data
npts = int(img_embed.size(0) / 5)
idxs = torch.cuda.LongTensor(range(0, len(img_embed), 5))
ims = img_embed.index_select(0, idxs)
ranks = np.zeros(5 * npts)
for index in range(npts):
# Get query captions
queries = sent_embed[5*index: 5*index + 5]
# Compute scores
scores = torch.mm(queries, ims.transpose(0, 1)).cpu().numpy()
inds = np.zeros(scores.shape)
for i in range(len(inds)):
inds[i] = np.argsort(scores[i])[::-1]
ranks[5 * index + i] = np.where(inds[i] == index)[0][0]
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
return (r1, r5, r10, medr)
def i2t(self, images, captions):
"""
Images: (5N, imgdim) matrix of images
Captions: (5N, sentdim) matrix of captions
"""
with torch.no_grad():
# Project images and captions
img_embed, sent_embed = [], []
for i in range(0, len(images), self.batch_size):
img_embed.append(self.model.proj_image(
Variable(images[i:i + self.batch_size])))
sent_embed.append(self.model.proj_sentence(
Variable(captions[i:i + self.batch_size])))
img_embed = torch.cat(img_embed, 0).data
sent_embed = torch.cat(sent_embed, 0).data
npts = int(img_embed.size(0) / 5)
index_list = []
ranks = np.zeros(npts)
for index in range(npts):
# Get query image
query_img = img_embed[5 * index]
# Compute scores
scores = torch.mm(query_img.view(1, -1),
sent_embed.transpose(0, 1)).view(-1)
scores = scores.cpu().numpy()
inds = np.argsort(scores)[::-1]
index_list.append(inds[0])
# Score
rank = 1e20
for i in range(5*index, 5*index + 5, 1):
tmp = np.where(inds == i)[0][0]
if tmp < rank:
rank = tmp
ranks[index] = rank
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
return (r1, r5, r10, medr)
| 15,275 | 41.433333 | 109 | py |
pytorch_conv4D | pytorch_conv4D-master/conv4d.py | import numpy as np
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Conv4d_broadcast(nn.Module):
def __init__(self, in_channels,
out_channels,
kernel_size,
padding,
stride=1,
padding_mode='circular',
dilation=1,
groups=1,
bias=True,
Nd=4,
bias_initializer=None,
kernel_initializer= None,
channels_last=False):
super(Conv4d_broadcast, self).__init__()
assert padding_mode == 'circular' or padding == 0 and padding_mode == 'zeros', \
'Implemented only for circular or no padding'
assert stride == 1, "not implemented"
assert dilation == 1, "not implemented"
assert groups == 1, "not implemented"
assert Nd <= 4 and Nd > 2, "not implemented"
if not isinstance(kernel_size, (tuple, list)):
kernel_size = tuple(kernel_size for _ in range(Nd))
if not isinstance(padding, (tuple, list)):
padding = tuple(padding for _ in range(Nd))
# assert np.all(np.array(padding) == np.array(kernel_size) - 1), "works only in circular mode"
self.conv_f = (nn.Conv2d, nn.Conv3d)[Nd - 3]
self.out_channels = out_channels
self.kernel_size = kernel_size
self.padding = padding
self.padding_mode = padding_mode
self.use_bias = bias
self.bias = nn.Parameter(torch.randn(out_channels)) if bias else self.register_parameter('bias', None)
if bias_initializer is not None:
bias_initializer(self.bias)
self.conv_layers = torch.nn.ModuleList()
for _ in range(self.kernel_size[0]):
conv_layer = self.conv_f(
in_channels=in_channels,
out_channels=self.out_channels,
bias=False,
kernel_size=self.kernel_size[1:],
)
if kernel_initializer is not None:
kernel_initializer(conv_layer.weight)
if channels_last:
channels_last = [torch.channels_last, torch.channels_last_3d][Nd-3]
conv_layer.to(memory_format=channels_last)
self.conv_layers.append(conv_layer)
def do_padding(self, input):
(b, c_i) = tuple(input.shape[0:2])
size_i = tuple(input.shape[2:])
size_p = [size_i[i] + self.padding[i] for i in range(len(size_i))]
padding = tuple(np.array(
[((self.padding[i+1]+1)//2, self.padding[i+1]//2) for i in range(len(size_i[1:]))]
).reshape(-1)[::-1])
input = F.pad( # Ls padding
input.reshape(b, -1, *size_i[1:]),
padding,
'circular',
0
).reshape(b, c_i, -1, *size_p[1:])
return input
def forward(self, input):
if self.padding_mode == 'circular':
input = self.do_padding(input)
(b, c_i) = tuple(input.shape[0:2])
size_i = tuple(input.shape[2:])
size_k = self.kernel_size
padding = list(self.padding)
size_o = (size_i[0], ) + tuple([size_i[x+1] - size_k[x+1] + 1 for x in range(len(size_i[1:]))])
result = torch.zeros((b, self.out_channels) + size_o, device=input.device)
for i in range(size_k[0]):
cinput = torch.transpose(input, 1, 2) # 1 -> channels, 2 -> Lt
cinput = cinput.reshape(-1, c_i, *size_i[1:]) # merge bs and Lt
output = self.conv_layers[i](cinput)
output = output.reshape(b, size_i[0], *output.shape[1:])
output = torch.transpose(output, 1, 2)
result = result + torch.roll(output, -1 * i, 2)
if self.use_bias:
resultShape = result.shape
result = result.view(b, resultShape[1], -1)
result += self.bias.reshape(1, self.out_channels, 1)
result = result.view(resultShape)
shift = math.ceil(padding[0] / 2)
result = torch.roll(result, shift, 2)
# after we rearranged 3D convolutions we can cut 4th dimention
# depending on padding type circular or not
dim_size = size_i[0] + self.padding[0] - size_k[0] + 1
result = result[:, :, :dim_size, ]
return result
class Conv4d_groups(nn.Module):
def __init__(self, in_channels: int,
out_channels: int,
kernel_size,
padding,
stride=1,
padding_mode='circular',
dilation: int = 1,
groups: int = 1,
bias: bool = True,
Nd: int = 4,
bias_initializer=None,
kernel_initializer=None,
channels_last=False):
super(Conv4d_groups, self).__init__()
assert padding_mode == 'circular' or padding == 0, 'Implemented only for circular or no padding'
assert stride == 1, "not implemented"
assert dilation == 1, "not implemented"
assert groups == 1, "not implemented"
assert Nd <= 4, "not implemented"
assert padding == kernel_size - 1, "works only in circular mode"
if not isinstance(kernel_size, tuple):
kernel_size = tuple(kernel_size for _ in range(Nd))
if not isinstance(padding, tuple):
padding = tuple(padding for _ in range(Nd))
self.conv_f = (nn.Conv1d, nn.Conv2d, nn.Conv3d)[Nd - 2]
self.out_channels = out_channels
self.kernel_size = kernel_size
self.padding = padding
self.padding_mode = padding_mode
self.use_bias = bias
self.bias = nn.Parameter(torch.randn(out_channels)) if bias else self.register_parameter('bias', None)
if bias_initializer is not None:
bias_initializer(self.bias)
self.conv = self.conv_f(in_channels=in_channels*self.kernel_size[0],
out_channels=self.out_channels*self.kernel_size[0],
bias=False,
stride=stride,
kernel_size=self.kernel_size[1:],
padding_mode=self.padding_mode,
groups=self.kernel_size[0])
if channels_last:
channels_last = [torch.channels_last, torch.channels_last_3d][Nd-3]
self.conv.to(memory_format=channels_last)
if kernel_initializer is not None:
kernel_initializer(self.conv.weight)
def do_padding(self, input):
(b, c_i) = tuple(input.shape[0:2])
size_i = tuple(input.shape[2:])
size_p = [size_i[i] + self.padding[i] for i in range(len(size_i))]
input = F.pad( # Ls padding
input.reshape(b, -1, *size_i[1:]),
tuple(np.array(
# [(0, self.padding[i+1]) for i in range(len(size_i[1:]))]
[((self.padding[i+1]+1)//2, self.padding[i+1]//2) for i in range(len(size_i[1:]))]
).reshape(-1)),
'circular',
0
).reshape(b, c_i, -1, *size_p[1:])
return input
def forward(self, input):
if self.padding_mode == 'circular':
input = self.do_padding(input)
(b, c_i) = tuple(input.shape[0:2])
size_i = tuple(input.shape[2:])
size_k = self.kernel_size
padding = list(self.padding)
size_o = (size_i[0], ) + tuple([size_i[x+1] - size_k[x+1] + 1 for x in range(len(size_i[1:]))])
# size_o = tuple([size_i[x] + padding[x] - size_k[x] + 1 for x in range(len(size_i))])
cinput = torch.transpose(input, 1, 2) # (bs, channels, Lt, ...) -> (bs, Lt, channels, ...)
cinput = cinput.reshape(b * size_i[0], c_i, *size_i[1:]) # (bs, Lt, ...) -> (bs * Lt, ...)
# # (bs * Lt, c_i, ...) -> (bs * Lt, 1, c_i, ...) -> (bs * Lt, k[0], c_i, ...) -> (bs * Lt, k[0] * c_i, ...)
cinput = cinput[:,np.newaxis,:] \
.expand(cinput.shape[0], self.kernel_size[0], *cinput.shape[1:]) \
.reshape(cinput.shape[0], self.kernel_size[0] * cinput.shape[1], *cinput.shape[2:])
out = self.conv(cinput) # out.shape = (bs * Lt, k[0] * c_o, ...)
# (bs * Lt, c_o * k[0], ...) -> (bs, Lt, k[0], c_o, ...)
out = out.reshape(b, size_i[0], self.kernel_size[0], self.out_channels, *size_o[1:])
out = out.transpose(1, 3)# (bs, Lt, k[0], c_o, ...) -> (bs, c_o, k[0], Lt...)
out = out.split(1, dim=2) # (bs, c_o, k[0], Lt...)-> list( (bs, c_o, 1, Lt...) )
out = torch.stack([torch.roll(out[i].squeeze(2), -1 * i, 2) for i in range(len(out))], dim=0)
result = torch.sum(out, dim=0)
if self.use_bias:
resultShape = result.shape
result = result.view(b,resultShape[1],-1)
result += self.bias.reshape(1, self.out_channels, 1)
result = result.view(resultShape)
shift = math.ceil(padding[0] / 2)
result = torch.roll(result, shift, 2)
result = result[:, :, :size_o[0], ]
return result
| 9,176 | 40.337838 | 116 | py |
pytorch_conv4D | pytorch_conv4D-master/test_conv4d.py | import pytest
import timeit
import numpy as np
import scipy.stats as sns
from functools import partial
import torch
import torch.nn as nn
from .conv4d import Conv4d_broadcast, Conv4d_groups
try:
import intel_extension_for_pytorch as ipex
device = torch.device("xpu" if torch.xpu.is_available() else "cpu")
sync_cmd = 'torch.xpu.synchronize()'
except:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
sync_cmd = 'torch.cuda.synchronize()'
torch.set_default_dtype(torch.float64)
def init(inChans, outChans, L, Nd, bs, ks, isBias, Conv4dClass, channels_last=True):
def init_broadcast(weights):
def wgen(weights):
for i in range(weights.shape[2]):
yield weights[:, :, i, ]
g = wgen(weights)
def init(x):
tmp = next(g)
x.data = tmp
return x
return init
def init_groups(x, weights):
Lt = weights.shape[2]
tmp = [weights[:, :, i, ...] for i in range(Lt)]
tmp = torch.cat(tmp, dim=0)
x.data = tmp
return x
def init_bias(x, bias):
x.data = bias
return x
assert ks % 2 == 1, 'Since PT 1.5 works only with odd kernel size'
padding_mode = 'circular'
mf = [torch.channels_last, torch.channels_last_3d][Nd-2] if channels_last else torch.contiguous_format
x = torch.randn(bs, inChans, *((L,)*Nd)).to(device)
x = x.to(memory_format=mf)
convPT = nn.Conv3d(
inChans, outChans, ks, stride=1, padding=(ks-1)//2,
bias=isBias, padding_mode=padding_mode
).to(device).to(memory_format=mf)
conv = Conv4dClass(
inChans,
outChans,
Nd=Nd,
kernel_size=ks,
padding=ks-1,
bias=isBias,
padding_mode=padding_mode,
kernel_initializer=
partial(
init_groups,
weights=tuple(convPT.parameters())[0]
) if Conv4dClass.__name__ == 'Conv4d_groups'
else init_broadcast(tuple(convPT.parameters())[0]),
bias_initializer=
lambda x:
init_bias(x, tuple(convPT.parameters())[1]) if isBias else None,
channels_last=channels_last
).to(device)
return x, convPT, conv
@pytest.mark.parametrize('inChans', [1, 2])
@pytest.mark.parametrize('outChans', [1, 2, 8])
@pytest.mark.parametrize('L', [8, 16])
@pytest.mark.parametrize('Nd', [3])
@pytest.mark.parametrize('bs', [256])
# ks = 2 is not working due to bug in pytorch.nn.conv2d with padding=1
@pytest.mark.parametrize('ks', [3, 5, 7])
@pytest.mark.parametrize('isBias', [True, False])
@pytest.mark.parametrize('Conv4dClass', [Conv4d_groups, Conv4d_broadcast])
@pytest.mark.parametrize('channels_last', [True, False])
def test_convNd(inChans, outChans, L, Nd, bs, ks, isBias, Conv4dClass, channels_last):
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
_data, _convPt, _convNd = init(inChans, outChans, L, Nd, bs, ks, isBias, Conv4dClass)
outPT = _convPt(_data)
out = _convNd(_data)
diff = torch.abs((out-outPT)).max()
print(f"convNd max error: {diff:.2g}")
assert diff < 1e-5, f'err: {diff}'
def compare_time(inChans, outChans, L, Nd, bs, ks, isBias, Conv4dClass, channels_last):
import torch
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
number = 1
_data, _convPT, _convNd = init(inChans, outChans, L, Nd, bs, ks, isBias, Conv4dClass,channels_last)
times = np.array(
timeit.repeat(
f"{sync_cmd}; out = _convPT(_data);{sync_cmd};",
globals=locals(), number=number)
)
print("ConvPT Forward time: ", f'{times[1:].mean():3g} pm {sns.sem(times[1:]):3g}')
times = np.array(
timeit.repeat(
f"{sync_cmd}; out = _convNd(_data);{sync_cmd};",
globals=locals(), number=number)
)
print("ConvNd Forward time: ", f'{times[1:].mean():3g} pm {sns.sem(times[1:]):3g}')
times = np.array(
timeit.repeat(
f"{sync_cmd};out=_convPT(_data);out.sum().backward();{sync_cmd};",
globals=locals(), number=number)
)
print(
"ConvPt Forward+Backward time: ",
f'{times[1:].mean():3g} pm {sns.sem(times[1:]):3g}'
)
times = np.array(
timeit.repeat(
f"{sync_cmd};_convNd(_data).sum().backward();{sync_cmd};",
globals=locals(), number=number)
)
print(
"ConvNd Forward+Backward time: ",
f'{times[1:].mean():3g} pm {sns.sem(times[1:]):3g}')
def run_4d_benchmark(inChans, outChans, L, bs, ks, isBias, Conv4dClass, channels_last):
import torch
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
number = 1
Nd = 4
mf = torch.channels_last_3d if channels_last else torch.contiguous_format
_data = torch.randn(bs, inChans, *((L,)*Nd)).to(device) #.to(memory_format=mf)
_convNd = Conv4dClass(
inChans, outChans, Nd=Nd, kernel_size=ks, padding=ks-1, bias=isBias,
padding_mode='circular', channels_last=channels_last).to(device)
times = np.array(
timeit.repeat(
f"{sync_cmd}; out = _convNd(_data);{sync_cmd};",
globals=locals(), number=number)
)
print("Forward time: ", f'{times[1:].mean():3g} pm {sns.sem(times[1:]):3g}')
times = np.array(
timeit.repeat(
f"{sync_cmd}; out = _convNd(_data).sum().backward(); {sync_cmd}",
globals=locals(), number=number)
)
print("Forward+Backward time: ", f'{times[1:].mean():3g} pm {sns.sem(times[1:]):3g}')
if __name__ == "__main__":
for conv_type in [Conv4d_broadcast, Conv4d_groups]:
for channels_last in [True, False]:
print("========================================================")
print(conv_type, '| channels_last =', channels_last)
print("========================================================")
print("--> Bechmark 3D")
test_convNd(inChans=8, outChans=32, L=16, Nd=3, bs=64, ks=3,
isBias=True, Conv4dClass=conv_type,
channels_last=channels_last)
compare_time(inChans=64, outChans=64, L=16, Nd=3, bs=64, ks=3,
isBias=True, Conv4dClass=conv_type,
channels_last=channels_last)
print("--> Benchmark 4D")
print("----> inChannels = 18, outChannels = 32")
run_4d_benchmark(inChans=18, outChans=32, L=8, bs=64,
ks=3, isBias=True, Conv4dClass=conv_type, channels_last=channels_last)
print("----> inChannels = 32, outChannels = 32")
run_4d_benchmark(inChans=32, outChans=32, L=8, bs=64,
ks=3, isBias=True, Conv4dClass=conv_type, channels_last=channels_last)
print("----> inChannels = 32, outChannels = 48")
run_4d_benchmark(inChans=32, outChans=48, L=8, bs=64,
ks=3, isBias=True, Conv4dClass=conv_type, channels_last=channels_last)
| 7,179 | 35.262626 | 106 | py |
phocnet | phocnet-master/install.py | import os
import shutil
import logging
import argparse
from subprocess import call
import sys
def main(cudnn_dir, no_caffe, opencv_dir, install_dir, install_caffe_dir):
# init logger
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('install.py')
# init submodules
call(['git', 'submodule', 'init'])
call(['git', 'submodule', 'update'])
# compile caffe
# cmake
if not no_caffe:
logger.info('Running CMake to configure Caffe submodule...')
if install_caffe_dir is None:
install_caffe_dir = os.path.join(install_dir, 'caffe')
else:
install_caffe_dir = os.path.join(install_caffe_dir, 'caffe')
os.chdir('caffe')
if os.path.exists('build'):
shutil.rmtree('build')
os.makedirs('build')
os.chdir('build')
call_list = ['cmake', '..', '-DCMAKE_INSTALL_PREFIX=%s' % install_caffe_dir]
if cudnn_dir is not None:
call_list.append('-DCUDNN_DIR=%s' % cudnn_dir)
if opencv_dir is not None:
call_list.append('-DOpenCV_DIR=%s' % opencv_dir)
if call(call_list) != 0:
raise ValueError('Error during CMake run')
# make
logger.info('Compiling Caffe submodule...')
if call(['make', 'install']) != 0:
raise ValueError('Error during make')
os.chdir('../..')
# copy to desired location
install_path = os.path.join(install_dir, 'lib','python' + '.'.join(sys.version.split('.')[:2]), 'site-packages')
if not os.path.exists(install_path):
os.makedirs(install_path)
shutil.copytree('src/phocnet', install_path + '/phocnet')
logger.info('Finished installation.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Script for easy install of the PHOCNet library (dependencies must be present).')
parser.add_argument('--cudnn-dir', type=str, help='Path to the CUDNN root dir.')
parser.add_argument('--opencv-dir', type=str, help='Path to the OpenCV share dir.')
parser.add_argument('--install-dir', type=str, required=True, help='Path to install the PHOCNet library into.')
parser.add_argument('--install-caffe-dir', type=str, help='Path to install the custom Caffe library into. If unspecified, the install_ir path is chosen.')
parser.add_argument('--no-caffe', action='store_true',
help='If this flag is provided, the PHOCNet library is installed without the custom Caffe (e.g. if you installed a different Caffe version and don''')
args = vars(parser.parse_args())
main(**args)
| 2,659 | 41.222222 | 174 | py |
phocnet | phocnet-master/tools/predict_phocs.py | #!/usr/bin/env python
'''
Script for predicting PHOCs for a number of images residing in a folder on disk.
'''
import argparse
import logging
import os
import caffe
import numpy as np
import cv2
from phocnet.evaluation.cnn import net_output_for_word_image_list
def main(img_dir, output_dir, pretrained_phocnet, deploy_proto, min_image_width_height, gpu_id):
logging_format = '[%(asctime)-19s, %(name)s, %(levelname)s] %(message)s'
logging.basicConfig(level=logging.INFO,
format=logging_format)
logger = logging.getLogger('Predict PHOCs')
if gpu_id is None:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
logger.info('Loading PHOCNet...')
phocnet = caffe.Net(deploy_proto, caffe.TEST, weights=pretrained_phocnet)
# find all images in the supplied dir
logger.info('Found %d word images to process', len(os.listdir(img_dir)))
word_img_list = [cv2.imread(os.path.join(img_dir, filename), cv2.CV_LOAD_IMAGE_GRAYSCALE)
for filename in sorted(os.listdir(img_dir)) if filename not in ['.', '..']]
# push images through the PHOCNet
logger.info('Predicting PHOCs...')
predicted_phocs = net_output_for_word_image_list(phocnet=phocnet, word_img_list=word_img_list,
min_img_width_height=min_image_width_height)
# save everything
logger.info('Saving...')
np.save(os.path.join(output_dir, 'predicted_phocs.npy'), predicted_phocs)
logger.info('Finished')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Predict PHOCs from a pretrained PHOCNet. The PHOCs are saved as Numpy Array to disk.')
parser.add_argument('--min_image_width_height', '-miwh', action='store', type=int, default=26,
help='The minimum image width or height to be passed through the PHOCNet. Default: 26')
parser.add_argument('--output_dir', '-od', action='store', type=str, default='.',
help='The directory where to store the PHOC Numpy Array. Default: .')
parser.add_argument('--img_dir', '-id', action='store', type=str, required=True,
help='All images in this folder are processed in ASCII order of their '+
'respective names. A PHOC is predicted for each.')
parser.add_argument('--pretrained_phocnet', '-pp', action='store', type=str, required=True,
help='Path to a pretrained PHOCNet binaryproto file.')
parser.add_argument('--deploy_proto', '-dp', action='store', type=str, required=True,
help='Path to PHOCNet deploy prototxt file.')
parser.add_argument('--gpu_id', '-gpu', action='store', type=int,
help='The ID of the GPU to use. If not specified, training is run in CPU mode.')
args = vars(parser.parse_args())
main(**args)
| 2,673 | 42.836066 | 133 | py |
phocnet | phocnet-master/tools/save_deploy_proto.py | #!/usr/bin/env python
import argparse
import os
from phocnet.caffe.model_proto_generator import ModelProtoGenerator
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Save a PHOCNet deploy proto file to disk.')
parser.add_argument('--output_dir', '-od', action='store', type=str, default='.',
help='The directory where to save the deploy proto. Default: .')
parser.add_argument('--phoc_size', '-ps', action='store', type=int, default=604,
help='The dimensionality of the PHOC. Default: 604')
args = vars(parser.parse_args())
proto = ModelProtoGenerator(use_cudnn_engine=False).get_phocnet(word_image_lmdb_path=None, phoc_lmdb_path=None,
phoc_size=args['phoc_size'], generate_deploy=True)
with open(os.path.join(args['output_dir'], 'deploy_phocnet.prototxt'), 'w') as deploy_file:
deploy_file.write('#Deploy PHOCNet\n')
deploy_file.write(str(proto)) | 1,028 | 59.529412 | 118 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.