id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
283636 | """
Find Keyword
===============================================================================
Finds a string in the terms of a column of a document collection.
>>> from techminer2 import *
>>> directory = "/workspaces/techminer2/data/"
>>> find_keyword(contains='artificial intelligence', directory=directory)
artificial intelligence
artificial intelligence
artificial intelligence (ai)
artificial intelligence systems
artificial intelligence technologies
artificial intelligence technologies
artificial intelligence technology
novel artificial intelligence
"""
from os.path import isfile, join
import pandas as pd
from .thesaurus import load_file_as_dict
def find_keyword(
contains=None,
startswith=None,
endswith=None,
directory="./",
):
"""
Find the specified keyword and reorder the thesaurus to reflect the search.
"""
thesaurus_file = join(directory, "keywords.txt")
if isfile(thesaurus_file):
th = load_file_as_dict(thesaurus_file)
else:
raise FileNotFoundError("The file {} does not exist.".format(thesaurus_file))
reversed_th = {value: key for key, values in th.items() for value in values}
df = pd.DataFrame(
{
"text": reversed_th.keys(),
"key": reversed_th.values(),
}
)
if contains is not None:
result = []
if isinstance(contains, str):
contains = [contains]
for word in contains:
result.append(df[df.text.str.contains(word)])
df = pd.concat(result)
elif startswith is not None:
result = []
if isinstance(startswith, str):
startswith = [startswith]
for word in startswith:
result.append(df[df.text.str.startswith(word)])
df = pd.concat(result)
elif endswith is not None:
result = []
if isinstance(endswith, str):
endswith = [endswith]
for word in endswith:
result.append(df[df.text.str.endswith(word)])
df = pd.concat(result)
else:
raise ValueError("No filter provided")
keys = df.key.drop_duplicates()
findings = {key: th[key] for key in sorted(keys)}
for key, items in sorted(findings.items()):
print(key)
if len(items) > 1:
for item in sorted(items):
print(" ", item)
# reorder the thesaurus to reflect the search
for key in findings.keys():
th.pop(key)
with open(thesaurus_file, "w", encoding="utf-8") as file:
for key in sorted(findings.keys()):
file.write(key + "\n")
for item in findings[key]:
file.write(" " + item + "\n")
for key in sorted(th.keys()):
file.write(key + "\n")
for item in th[key]:
file.write(" " + item + "\n")
| StarcoderdataPython |
11386403 | <filename>pyNastran/converters/avl/test_avl_gui.py
import os
import unittest
import numpy as np
from cpylog import get_logger
import pyNastran
from pyNastran.gui.testing_methods import FakeGUIMethods
from pyNastran.converters.avl.avl import read_avl
from pyNastran.converters.avl.avl_io import AVL_IO
PKG_PATH = pyNastran.__path__[0]
MODEL_PATH = os.path.join(PKG_PATH, 'converters', 'avl', 'examples')
class AvlGUI(FakeGUIMethods):
def __init__(self):
FakeGUIMethods.__init__(self)
self.model = AVL_IO(self)
self.build_fmts(['avl'], stop_on_failure=True)
class TestAvlGUI(unittest.TestCase):
def test_avl_geometry_01(self):
"""tests the 737 model"""
log = get_logger(level='warning', encoding='utf-8')
geometry_filename = os.path.join(MODEL_PATH, 'b737.avl')
geometry_filename_out = os.path.join(MODEL_PATH, 'b737_out.avl')
model = read_avl(geometry_filename)
model.write_avl(geometry_filename_out)
test = AvlGUI()
test.log = log
test.on_load_geometry(geometry_filename, geometry_format='avl', raise_error=True)
def test_avl_geometry_02(self):
"""tests the bd model"""
log = get_logger(level='warning', encoding='utf-8')
geometry_filename = os.path.join(MODEL_PATH, 'bd.avl')
geometry_filename_out = os.path.join(MODEL_PATH, 'bd_out.avl')
model = read_avl(geometry_filename)
model.write_avl(geometry_filename_out)
test = AvlGUI()
test.log = log
test.on_load_geometry(geometry_filename, geometry_format='avl', raise_error=True)
def test_avl_geometry_03(self):
"""tests the greff model"""
log = get_logger(level='warning', encoding='utf-8')
geometry_filename = os.path.join(MODEL_PATH, 'greff.avl')
geometry_filename_out = os.path.join(MODEL_PATH, 'greff_out.avl')
model = read_avl(geometry_filename)
model.write_avl(geometry_filename_out)
test = AvlGUI()
test.log = log
test.on_load_geometry(geometry_filename, geometry_format='avl', raise_error=True)
if __name__ == '__main__': # pragma: no cover
unittest.main()
| StarcoderdataPython |
35272 | <gh_stars>10-100
from typing import Any, Dict, Generator, List, Optional
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from probnmn.config import Config
from probnmn.utils.checkpointing import CheckpointManager
class _Trainer(object):
r"""
A base class for generic training of models. This class can have multiple models interacting
with each other, rather than a single model, which is suitable to our use-case (for example,
``module_training`` phase has two models:
:class:`~probnmn.models.program_generator.ProgramGenerator` and
:class:`~probnmn.models.nmn.NeuralModuleNetwork`). It offers full flexibility, with sensible
defaults which may be changed (or disabled) while extending this class.
Extended Summary
----------------
1. Default :class:`~torch.optim.Adam` Optimizer, updates parameters of all models in this
trainer. Learning rate and weight decay for this optimizer are picked up from the provided
config.
2. Default :class:`~torch.optim.lr_scheduler.ReduceLROnPlateau` learning rate scheduler. Gamma
and patience arguments are picked up from the provided config. Observed metric is assumed
to be of type "higher is better". For 'lower is better" metrics, make sure to reciprocate.
3. Tensorboard logging of loss curves, metrics etc.
4. Serialization of models and optimizer as checkpoint (.pth) files after every validation.
The observed metric for keeping track of best checkpoint is of type "higher is better",
follow (2) above if the observed metric is of type "lower is better".
Extend this class and override suitable methods as per requirements, some important ones are:
1. :meth:`step`, provides complete customization, this is the method which comprises of one
full training iteration, and internally calls (in order) - :meth:`_before_iteration`,
:meth:`_do_iteration` and :meth:`_after_iteration`. Most of the times you may not require
overriding this method, instead one of the mentioned three methods called by `:meth:`step`.
2. :meth:`_do_iteration`, with core training loop - what happens every iteration, given a
``batch`` from the dataloader this class holds.
3. :meth:`_before_iteration` and :meth:`_after_iteration`, for any pre- or post-processing
steps. Default behaviour:
* :meth:`_before_iteration` - call ``optimizer.zero_grad()``
* :meth:`_after_iteration` - call ``optimizer.step()`` and do tensorboard logging.
4. :meth:`after_validation`, to specify any steps after evaluation. Default behaviour is to
do learning rate scheduling and log validation metrics on tensorboard.
Notes
-----
All models are `passed by assignment`, so they could be shared with an external evaluator.
Do not set ``self._models = ...`` anywhere while extending this class.
Parameters
----------
config: Config
A :class:`~probnmn.Config` object with all the relevant configuration parameters.
dataloader: torch.utils.data.DataLoader
A :class:`~torch.utils.data.DataLoader` which provides batches of training examples. It
wraps one of :mod:`probnmn.data.datasets` depending on the evaluation phase.
models: Dict[str, Type[nn.Module]]
All the models which interact with each other during training. These are one or more from
:mod:`probnmn.models` depending on the training phase.
serialization_dir: str
Path to a directory for tensorboard logging and serializing checkpoints.
gpu_ids: List[int], optional (default=[0])
List of GPU IDs to use or evaluation, ``[-1]`` - use CPU.
"""
def __init__(
self,
config: Config,
dataloader: DataLoader,
models: Dict[str, nn.Module],
serialization_dir: str,
gpu_ids: List[int] = [0],
):
self._C = config
# Make dataloader cyclic for sampling batches perpetually.
self._dataloader = self._cycle(dataloader)
self._models = models
# Set device according to specified GPU ids.
self._device = torch.device(f"cuda:{gpu_ids[0]}" if gpu_ids[0] >= 0 else "cpu")
# Shift models to device, and wrap in DataParallel for Multi-GPU execution (if needed).
for model_name in self._models:
self._models[model_name] = self._models[model_name].to(self._device)
if len(gpu_ids) > 1 and -1 not in gpu_ids:
# Don't wrap to DataParallel if single GPU ID or -1 (CPU) is provided.
self._models[model_name] = nn.DataParallel(self._models[model_name], gpu_ids)
# Accumulate parameters of all models to construct Adam Optimizer.
all_parameters: List[Any] = []
for model_name in self._models:
all_parameters.extend(list(self._models[model_name].parameters()))
self._optimizer = optim.Adam(
all_parameters, lr=self._C.OPTIM.LR_INITIAL, weight_decay=self._C.OPTIM.WEIGHT_DECAY
)
# Default learning rate scheduler: (lr *= gamma) when observed metric plateaus for
# "patience" number of validation steps.
self._lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
self._optimizer,
mode="max",
factor=self._C.OPTIM.LR_GAMMA,
patience=self._C.OPTIM.LR_PATIENCE,
threshold=1e-3,
)
# Tensorboard summary writer for logging losses and metrics.
self._tensorboard_writer = SummaryWriter(log_dir=serialization_dir)
# Checkpoint manager to serialize model, optimizer and lr scheduler periodically.
self._checkpoint_manager = CheckpointManager(
serialization_dir=serialization_dir,
keep_recent=100,
optimizer=self._optimizer,
scheduler=self._lr_scheduler,
**models,
)
# Initialize a counter to keep track of the iteration number.
# This increments everytime ``step`` is called.
self._iteration: int = -1
def step(self, iteration: Optional[int] = None):
r"""
Perform one iteration of training.
Parameters
----------
iteration: int, optional (default = None)
Iteration number (useful to hard set to any number when loading checkpoint).
If ``None``, use the internal :attr:`self._iteration` counter.
"""
self._before_iteration()
batch = next(self._dataloader)
output_dict = self._do_iteration(batch)
self._after_iteration(output_dict)
self._iteration = iteration or self._iteration + 1
def _before_iteration(self):
r"""
Steps to do before doing the forward pass of iteration. Default behavior is to simply
call :meth:`zero_grad` for optimizer. Called inside :meth:`step`.
"""
self._optimizer.zero_grad()
def _do_iteration(self, batch: Dict[str, Any]) -> Dict[str, Any]:
r"""
Forward and backward passes on models, given a batch sampled from dataloader.
Parameters
----------
batch: Dict[str, Any]
A batch of training examples sampled from dataloader. See :func:`step` and
:meth:`_cycle` on how this batch is sampled.
Returns
-------
Dict[str, Any]
An output dictionary typically returned by the models. This would be passed to
:meth:`_after_iteration` for tensorboard logging.
"""
# What a single iteration usually would look like.
iteration_output_dict = self._models["model"](batch)
batch_loss = iteration_output_dict["loss"].mean()
batch_loss.backward()
return {"loss": batch_loss}
def _after_iteration(self, output_dict: Dict[str, Any]):
r"""
Steps to do after doing the forward pass of iteration. Default behavior is to simply
do gradient update through ``optimizer.step()``, and log metrics to tensorboard.
Parameters
----------
output_dict: Dict[str, Any]
This is exactly the object returned by :meth:_do_iteration`, which would contain all
the required losses for tensorboard logging.
"""
self._optimizer.step()
# keys: {"loss"} + ... {other keys such as "elbo"}
for key in output_dict:
if isinstance(output_dict[key], dict):
# Use ``add_scalars`` for dicts in a nested ``output_dict``.
self._tensorboard_writer.add_scalars(
f"train/{key}", output_dict[key], self._iteration
)
else:
# Use ``add_scalar`` for floats / zero-dim tensors in ``output_dict``.
self._tensorboard_writer.add_scalar(
f"train/{key}", output_dict[key], self._iteration
)
def after_validation(self, val_metrics: Dict[str, Any], iteration: Optional[int] = None):
r"""
Steps to do after an external :class:`~probnmn.evaluators._evaluator._Evaluator` performs
evaluation. This is not called by :meth:`step`, call it from outside at appropriate time.
Default behavior is to perform learning rate scheduling, serializaing checkpoint and to
log validation metrics to tensorboard.
Since this implementation assumes a key ``"metric"`` in ``val_metrics``, it is convenient
to set this key while overriding this method, when there are multiple models and multiple
metrics and there is one metric which decides best checkpoint.
Parameters
----------
val_metrics: Dict[str, Any]
Validation metrics for all the models. Returned by ``evaluate`` method of
:class:`~probnmn.evaluators._evaluator._Evaluator` (or its extended class).
iteration: int, optional (default = None)
Iteration number. If ``None``, use the internal :attr:`self._iteration` counter.
"""
if iteration is not None:
self._iteration = iteration
# Serialize model and optimizer and keep track of best checkpoint.
self._checkpoint_manager.step(self._iteration, val_metrics["metric"])
# Perform learning rate scheduling based on validation perplexity.
self._lr_scheduler.step(val_metrics["metric"])
# Log learning rate after scheduling.
self._tensorboard_writer.add_scalar(
"train/lr", self._optimizer.param_groups[0]["lr"], self._iteration
)
# Log all validation metrics to tensorboard (pop the "metric" key, which was only relevant
# to learning rate scheduling and checkpointing).
val_metrics.pop("metric")
for model_name in val_metrics:
for metric_name in val_metrics[model_name]:
self._tensorboard_writer.add_scalar(
f"val/metrics/{model_name}/{metric_name}",
val_metrics[model_name][metric_name],
self._iteration,
)
def load_checkpoint(self, checkpoint_path: str, iteration: Optional[int] = None):
r"""
Load a checkpoint to continue training from. The iteration when this checkpoint was
serialized, is inferred from its name (so do not rename after serialization).
Parameters
----------
checkpoint_path: str
Path to a checkpoint containing models and optimizers of the phase which is being
trained on.
iteration: int, optional (default = None)
Iteration number. If ``None``, get it from the checkpoint.
"""
_iteration = self._checkpoint_manager.load(checkpoint_path)
# By default, the provided iteration overrides what is found in checkpoint.
iteration = iteration or _iteration
self._iteration = iteration
def _cycle(self, dataloader: DataLoader) -> Generator[Dict[str, torch.Tensor], None, None]:
r"""
A generator which yields a random batch from dataloader perpetually. This generator is
used in the constructor.
Extended Summary
----------------
This is done so because we train for a fixed number of iterations, and do not have the
notion of 'epochs'. Using ``itertools.cycle`` with dataloader is harmful and may cause
unexpeced memory leaks.
"""
while True:
for batch in dataloader:
for key in batch:
batch[key] = batch[key].to(self._device)
yield batch
@property
def iteration(self):
return self._iteration
@property
def models(self):
return self._models
| StarcoderdataPython |
8063262 | <gh_stars>0
from engagevoice.sdk_wrapper import *
RC_CLIENT_ID=""
RC_CLIENT_SECRET=""
RC_USERNAME=""
RC_PASSWORD=""
RC_EXTENSION=""
LEGACY_USERNAME= ""
LEGACY_PASSWORD= ""
MODE = "ENGAGE"
def get_account_dial_groups():
print ("get_account_dial_groups()")
try:
endpoint = "admin/accounts/~/dialGroups"
response = ev.get(endpoint, None, None)
print (response)
except Exception as e:
print (e)
#Returns a dial group for an account
def get_account_dial_group(groupId):
endpoint = "admin/accounts/~/dialGroups/" + groupId
try:
ev.get(endpoint, None, callback)
except Exception as e:
print (e)
if (MODE == "ENGAGE"):
ev = RestClient(RC_CLIENT_ID, RC_CLIENT_SECRET)
username= RC_USERNAME
password = <PASSWORD>
extensionNum = RC_EXTENSION
else:
ev = RestClient();
username= LEGACY_USERNAME
password = <PASSWORD>
extensionNum = ""
try:
resp = ev.login(username, password, extensionNum)
if resp:
get_account_dial_groups()
except Exception as e:
print (e)
| StarcoderdataPython |
3476408 | import re
from cast.Lexer import Lexer, PatternMatchingLexer
from cast.Token import ppToken
from cast.pp_Parser import pp_Parser
from cast.SourceCode import SourceCodeString
from cast.Logger import Factory as LoggerFactory
moduleLogger = LoggerFactory().getModuleLogger(__name__)
def parseDefine( match, lineno, colno, terminalId, lexer ):
identifier_regex = r'([a-zA-Z_]|\\[uU]([0-9a-fA-F]{4})([0-9a-fA-F]{4})?)([a-zA-Z_0-9]|\\[uU]([0-9a-fA-F]{4})([0-9a-fA-F]{4})?)*'
if re.match(r'[ \t]+%s\(' % (identifier_regex), lexer.string):
terminalId = pp_Parser.TERMINAL_DEFINE_FUNCTION
else:
terminalId = pp_Parser.TERMINAL_DEFINE
lexer.addToken(ppToken(terminalId, lexer.resource, pp_Parser.terminals[terminalId], match, lineno, colno))
def parseDefined( match, lineno, colno, terminalId, lexer ):
separatorId = pp_Parser.TERMINAL_DEFINED_SEPARATOR
lexer.addToken(ppToken(terminalId, lexer.resource, pp_Parser.terminals[terminalId], match, lineno, colno))
lexer.addToken(ppToken(separatorId, lexer.resource, pp_Parser.terminals[separatorId], match, lineno, colno))
def parseInclude( match, lineno, colno, terminalId, lexer ):
headerGlobal = re.compile(r'[<][^\n>]+[>]')
headerLocal = re.compile(r'["][^\n"]+["]')
leadingWhitespace = re.compile(r'[\t ]*')
lexer.addToken(ppToken(terminalId, lexer.resource, pp_Parser.terminals[terminalId], match, lineno, colno))
lexer.advance( leadingWhitespace.match(lexer.string).group(0) )
regexes = {
pp_Parser.TERMINAL_HEADER_GLOBAL: headerGlobal,
pp_Parser.TERMINAL_HEADER_LOCAL: headerLocal
}
for terminalId, regex in regexes.items():
rmatch = regex.match(lexer.string)
if rmatch:
rstring = rmatch.group(0)
token = ppToken(terminalId, lexer.resource, pp_Parser.terminals[terminalId], rstring, lexer.lineno, lexer.colno)
lexer.addToken(token)
lexer.advance(rstring)
break
def token(string, lineno, colno, terminalId, lexer):
lexer.addToken(ppToken(terminalId, lexer.resource, pp_Parser.terminals[terminalId], string, lineno, colno))
class ppLexer(Lexer):
regex = [
( re.compile(r'^[ \t]*#[ \t]*include_next(?![a-zA-Z])'), pp_Parser.TERMINAL_INCLUDE, parseInclude ),
( re.compile(r'^[ \t]*#[ \t]*include(?![a-zA-Z])'), pp_Parser.TERMINAL_INCLUDE, parseInclude ),
( re.compile(r'^[ \t]*#[ \t]*define(?![a-zA-Z])'), pp_Parser.TERMINAL_DEFINE, parseDefine ),
( re.compile(r'^[ \t]*#[ \t]*ifdef(?![a-zA-Z])'), pp_Parser.TERMINAL_IFDEF, token ),
( re.compile(r'^[ \t]*#[ \t]*ifndef(?![a-zA-Z])'), pp_Parser.TERMINAL_IFNDEF, token ),
( re.compile(r'^[ \t]*#[ \t]*if(?![a-zA-Z])'), pp_Parser.TERMINAL_IF, token ),
( re.compile(r'^[ \t]*#[ \t]*else(?![a-zA-Z])'), pp_Parser.TERMINAL_ELSE, token ),
( re.compile(r'^[ \t]*#[ \t]*elif(?![a-zA-Z])'), pp_Parser.TERMINAL_ELIF, token ),
( re.compile(r'^[ \t]*#[ \t]*pragma(?![a-zA-Z])'), pp_Parser.TERMINAL_PRAGMA, token ),
( re.compile(r'^[ \t]*#[ \t]*error(?![a-zA-Z])'), pp_Parser.TERMINAL_ERROR, token ),
( re.compile(r'^[ \t]*#[ \t]*warning(?![a-zA-Z])'), pp_Parser.TERMINAL_WARNING, token ),
( re.compile(r'^[ \t]*#[ \t]*line(?![a-zA-Z])'), pp_Parser.TERMINAL_LINE, token ),
( re.compile(r'^[ \t]*#[ \t]*undef(?![a-zA-Z])'), pp_Parser.TERMINAL_UNDEF, token ),
( re.compile(r'^[ \t]*#[ \t]*endif\s?.*'), pp_Parser.TERMINAL_ENDIF, token ),
( re.compile(r'defined'), pp_Parser.TERMINAL_DEFINED, parseDefined ),
( re.compile(r'\.\.\.'), pp_Parser.TERMINAL_ELIPSIS, token ),
( re.compile(r'[\.]?[0-9]([0-9]|[eEpP][+-]|[a-zA-Z_]|\\[uU]([0-9a-fA-F]{4})([0-9a-fA-F]{4})?|\.)*'), pp_Parser.TERMINAL_PP_NUMBER, token ),
#( re.compile(r'(([0-9]+)?\.([0-9]+)|[0-9]+\.)([eE][-+]?[0-9]+)?[flFL]?'), pp_Parser.TERMINAL_PP_NUMBER, token ),
( re.compile(r'([a-zA-Z_]|\\[uU]([0-9a-fA-F]{4})([0-9a-fA-F]{4})?)([a-zA-Z_0-9]|\\[uU]([0-9a-fA-F]{4})([0-9a-fA-F]{4})?)*'), pp_Parser.TERMINAL_IDENTIFIER, token ),
( re.compile(r"[L]?'([^\\'\n]|\\[\\\"\'nrbtfav\?]|\\[0-7]{1,3}|\\x[0-9a-fA-F]+|\\[uU]([0-9a-fA-F]{4})([0-9a-fA-F]{4})?)+'"), pp_Parser.TERMINAL_CHARACTER_CONSTANT, token ),
( re.compile(r'[L]?"([^\\\"\n]|\\[\\"\'nrbtfav\?]|\\[0-7]{1,3}|\\x[0-9a-fA-F]+|\\[uU]([0-9a-fA-F]{4})([0-9a-fA-F]{4})?)*"'), pp_Parser.TERMINAL_STRING_LITERAL, token ),
( re.compile(r'\['), pp_Parser.TERMINAL_LSQUARE, token ),
( re.compile(r'\]'), pp_Parser.TERMINAL_RSQUARE, token ),
( re.compile(r'\('), pp_Parser.TERMINAL_LPAREN, token ),
( re.compile(r'\)'), pp_Parser.TERMINAL_RPAREN, token ),
( re.compile(r'\{'), pp_Parser.TERMINAL_LBRACE, token ),
( re.compile(r'\}'), pp_Parser.TERMINAL_RBRACE, token ),
( re.compile(r'\.'), pp_Parser.TERMINAL_DOT, token ),
( re.compile(r'->'), pp_Parser.TERMINAL_ARROW, token ),
( re.compile(r'\+\+'), pp_Parser.TERMINAL_INCR, token ),
( re.compile(r'--'), pp_Parser.TERMINAL_DECR, token ),
( re.compile(r'\*='), pp_Parser.TERMINAL_MULEQ, token ),
( re.compile(r'\+='), pp_Parser.TERMINAL_ADDEQ, token ),
( re.compile(r'-='), pp_Parser.TERMINAL_SUBEQ, token ),
( re.compile(r'%='), pp_Parser.TERMINAL_MODEQ, token ),
( re.compile(r'&='), pp_Parser.TERMINAL_BITANDEQ, token ),
( re.compile(r'\|='), pp_Parser.TERMINAL_BITOREQ, token ),
( re.compile(r'\^='), pp_Parser.TERMINAL_BITXOREQ, token ),
( re.compile(r'<<='), pp_Parser.TERMINAL_LSHIFTEQ, token ),
( re.compile(r'>>='), pp_Parser.TERMINAL_RSHIFTEQ, token ),
( re.compile(r'&(?!&)'), pp_Parser.TERMINAL_BITAND, token ),
( re.compile(r'\*(?!=)'), pp_Parser.TERMINAL_MUL, token ),
( re.compile(r'\+(?!=)'), pp_Parser.TERMINAL_ADD, token ),
( re.compile(r'-(?!=)'), pp_Parser.TERMINAL_SUB, token ),
( re.compile(r'!(?!=)'), pp_Parser.TERMINAL_EXCLAMATION_POINT, token ),
( re.compile(r'%(?!=)'), pp_Parser.TERMINAL_MOD, token ),
( re.compile(r'<<(?!=)'), pp_Parser.TERMINAL_LSHIFT, token ),
( re.compile(r'>>(?!=)'), pp_Parser.TERMINAL_RSHIFT, token ),
( re.compile(r'<(?!=)'), pp_Parser.TERMINAL_LT, token ),
( re.compile(r'>(?!=)'), pp_Parser.TERMINAL_GT, token ),
( re.compile(r'<='), pp_Parser.TERMINAL_LTEQ, token ),
( re.compile(r'>='), pp_Parser.TERMINAL_GTEQ, token ),
( re.compile(r'=='), pp_Parser.TERMINAL_EQ, token ),
( re.compile(r'!='), pp_Parser.TERMINAL_NEQ, token ),
( re.compile(r'\^(?!=)'), pp_Parser.TERMINAL_BITXOR, token ),
( re.compile(r'\|(?!\|)'), pp_Parser.TERMINAL_BITOR, token ),
( re.compile(r'~'), pp_Parser.TERMINAL_BITNOT, token ),
( re.compile(r'&&'), pp_Parser.TERMINAL_AND, token ),
( re.compile(r'\|\|'), pp_Parser.TERMINAL_OR, token ),
( re.compile(r'='), pp_Parser.TERMINAL_ASSIGN, token ),
( re.compile(r'\?'), pp_Parser.TERMINAL_QUESTIONMARK, token ),
( re.compile(r':'), pp_Parser.TERMINAL_COLON, token ),
( re.compile(r';'), pp_Parser.TERMINAL_SEMI, token ),
( re.compile(r','), pp_Parser.TERMINAL_COMMA, token ),
( re.compile(r'##'), pp_Parser.TERMINAL_POUNDPOUND, token ),
( re.compile(r'#(?!#)'), pp_Parser.TERMINAL_POUND, token ),
( re.compile(r'[ \t]+', 0), None, None ),
( re.compile(r'/\*.*?\*/', re.S), None, None ),
( re.compile(r'//.*', 0), None, None ),
( re.compile(r'/='), pp_Parser.TERMINAL_DIVEQ, token ),
( re.compile(r'/'), pp_Parser.TERMINAL_DIV, token )
]
def __init__(self, sourceCode):
self.__dict__.update(locals())
self.string = sourceCode.getString()
self.resource = sourceCode.getResource()
self.colno = sourceCode.getColumn()
self.lineno = sourceCode.getLine()
self.cST_lines = self.string.split('\n')
self.lineno -= 1
self.logger = LoggerFactory().getClassLogger(__name__, self.__class__.__name__)
self.tokenBuffer = []
def matchString(self, string):
for (regex, terminalId, function) in self.regex:
match = regex.match(string)
if match:
return ppToken(terminalId, self.resource, pp_Parser.terminals[terminalId], match.group(0), 0, 0)
return None
def _advance(self, lines):
self.cST_lines = self.cST_lines[lines:]
def _hasToken(self):
return len(self.tokenBuffer) > 0
def _popToken(self):
token = self.tokenBuffer[0]
self.tokenBuffer = self.tokenBuffer[1:]
return token
def _addToken(self, token):
self.tokenBuffer.append(token)
def __iter__(self):
return self
def __next__(self):
if self._hasToken():
token = self._popToken()
return token
if not len(self.cST_lines):
raise StopIteration()
buf = []
buf_line = 0
lines = 0
token = None
emit_separator = False
emit_csource = False
continuation = False
advance = 0
cComment = False
for index, line in enumerate(self.cST_lines):
self.lineno += 1
if not cComment and (self._isPreprocessingLine( line ) or continuation):
continuation = False
if len(buf):
self.lineno -= 1
emit_csource = True
break
if '/*' in line and '*/' not in line:
line = re.sub('/\*.*$', '', line)
try:
i = index
while True:
i += 1
lines += 1
self.lineno += 1
if '*/' in self.cST_lines[i]:
line += re.sub('^.*\*/', '', self.cST_lines[i])
break
except IndexError:
pass
if line.strip() == '#':
lines += 1
continue
if len(line) and line[-1] == '\\':
line = line[:-1]
continuation = True
cPPL_PatternMatcher = PatternMatchingLexer( SourceCodeString(self.resource, line, self.lineno, 1), self.regex )
for cPPT in cPPL_PatternMatcher:
self._addToken(ppToken(cPPT.id, self.resource, cPPT.terminal_str, cPPT.source_string, cPPT.lineno, cPPT.colno))
if cPPT.terminal_str.upper() in ['INCLUDE', 'DEFINE', 'DEFINE_FUNCTION', 'PRAGMA', 'ERROR', 'WARNING', 'LINE', 'ENDIF', 'UNDEF']:
emit_separator = True
if continuation:
lines += 1
continue
if emit_separator:
terminalId = pp_Parser.TERMINAL_SEPARATOR
self._addToken( ppToken(terminalId, self.resource, pp_Parser.terminals[terminalId], '', self.lineno, 1) )
self._advance( lines + 1 )
if self._hasToken():
return self._popToken()
raise Exception('Unexpected')
else:
emit_csource = True
if not len(buf):
buf_line = self.lineno
buf.append(line)
lines += 1
if not cComment and '/*' in line and '*/' not in line:
cComment = True
if cComment and '*/' in line:
cComment = False
self._advance(lines)
if emit_csource:
csourceId = pp_Parser.TERMINAL_CSOURCE
separatorId = pp_Parser.TERMINAL_SEPARATOR
token = ppToken(csourceId, self.resource, pp_Parser.terminals[csourceId], '\n'.join(buf), buf_line, 1)
self._addToken( ppToken(separatorId, self.resource, pp_Parser.terminals[separatorId], '', self.lineno, 1) )
return token
raise StopIteration()
def _isPreprocessingLine(self, line):
if not line: return False
stripped_line = line.strip()
if len(stripped_line) and stripped_line[0] == '#':
return True
return False
| StarcoderdataPython |
8149929 | """ base class for pvoutput """
from datetime import datetime, time
from math import floor
import re
from typing import Any, AnyStr
from .exceptions import InvalidRegexpError, DonationRequired
def round_to_base(number, base):
"""rounds down to a specific base number
based on answer in https://stackoverflow.com/a/2272174/188774
"""
return base * round(floor(number / base))
class PVOutputBase:
"""base class for the PVOutput API"""
def __init__(
self,
apikey: str,
systemid: int,
donation_made: bool = False,
stats_period: int = 5,
):
if not isinstance(systemid, int):
raise TypeError("systemid should be int")
if not isinstance(apikey, str):
raise TypeError("apikey should be str")
self.apikey = apikey
self.systemid = systemid
self.donation_made = donation_made
self.stats_period = stats_period
def _headers(self) -> dict:
"""Relevant documentation: https://pvoutput.org/help/api_specification.html#http-headers
:return: headers for calls to the API
:rtype: dict
"""
headers = {
"X-Pvoutput-Apikey": self.apikey,
"X-Pvoutput-SystemId": str(self.systemid),
}
return headers
def get_time_by_base(self) -> str:
"""rounds the current time to the base specified (ie, to 15 minutes or 5 minutes etc)"""
now = datetime.now()
hour = int(now.strftime("%H"))
# round the minute to the current stats period
minute = round_to_base(now.minute, self.stats_period)
return time(hour=hour, minute=minute).strftime("%H:%M")
@classmethod
def _validate_format(
cls,
format_string: AnyStr,
key: str,
value: Any,
):
"""handles the regular expression format checks"""
try:
compiled = re.compile(format_string)
match = compiled.match(value)
if match is None:
raise ValueError(
f"key '{key}', with value '{value}' does not match '{format_string!r}'"
)
except re.error as error:
raise InvalidRegexpError(
"Error for key '{key}' with format '{format_string!r}': {error}"
) from error
# pylint: disable=too-many-branches
def validate_data(self, data, apiset):
"""Does a super-simple validation based on the api def raises errors if it's wrong, returns True if it's OK
This'll only raise an error on the first error it finds
:param data: the data to validate.
:type data: dict
:param apiset: A set of validation rules, eg: pvoutput.ADDSTATUS_PARAMETERS
:type apiset: dict
:raises TypeError: if the type testing fails.
:raises ValueError: if you're trying to pass an invalid value.
:raises pvoutput.InvalidRegexpError: if value does not match the regexp in format.
"""
# if you set a 'required_oneof' key in apiset, validation will require at least one of those keys to be set
if "required_oneof" in apiset.keys() and (
len([key for key in data.keys() if key in apiset["required_oneof"]["keys"]])
== 0
):
raise ValueError(
f"one of {','.join(apiset['required_oneof']['keys'])} MUST be set"
)
for key in apiset.keys():
# check that that required values are set
if apiset[key].get("required", False) and key not in data.keys():
if "default" in apiset[key]:
# set a default value
data[key] = apiset[key]["default"]
else:
raise ValueError(f"key {key} required in data")
# check maxlen
if "maxlen" in apiset[key] and key in data:
if len(data[key]) > apiset[key]["maxlen"]:
raise ValueError(
f"Value too long for key {key} {len(data[key])}>{apiset[key]['maxlen']}"
)
# check the value is in the set of valid choices
if "choices" in apiset[key] and key in data:
if data[key] not in apiset[key]["choices"]:
raise ValueError(
f"Invalid value for key {key}: '{data[key]}', should be in {apiset[key]['choices']} "
)
# check there's no extra fields in the data
for key in data:
if key not in apiset.keys():
raise ValueError(f"key {key} isn't valid in the API spec")
if apiset[key].get("type") and not isinstance(
data[key], apiset[key]["type"]
):
if data[key] is not None:
raise TypeError(
f"data[{key}] type ({type(data[key])} is invalid - should be {str(apiset[key]['type'])})"
)
for key in data:
if "format" in apiset[key]:
self._validate_format(apiset[key]["format"], key, data[key])
# can run additional functions over the data
if "additional_validators" in apiset[key]:
for validator in apiset[key]["additional_validators"]:
validator(data[key])
# TODO: 'd' can't be more than 14 days ago, if a donator, goes out to 90
# check if donation_made == True and age of thing
# if self.donation_made:
# # check if more than 90 days ago
# else:
# # check if more than 14 days ago
# check for donation-only keys
if apiset[key].get("donation_required") and not self.donation_made:
raise DonationRequired(
f"key {key} requires an account which has donated"
)
# check if you're outside max/min values
if apiset[key].get("maxval") and data.get(key) > apiset[key].get("maxval"):
raise ValueError(
f"{key} cannot be higher than {apiset[key]['maxval']}, is {data[key]}"
)
if apiset[key].get("minval") and data.get(key) < apiset[key].get("minval"):
raise ValueError(
f"{key} cannot be lower than {apiset[key]['minval']}, is {data[key]}"
)
return True
| StarcoderdataPython |
5076444 | <reponame>B-Trindade/Distributed-Chat
from dataclasses import dataclass
import datetime
@dataclass
class Message:
sender: str
receiver: str
content: object
timestamp: datetime | StarcoderdataPython |
1779959 | from dbnd import task
def f1():
pass
def f2():
pass
def f3():
pass
def f4():
pass
def f5():
pass
@task
def f6():
pass
| StarcoderdataPython |
97114 | <reponame>anxodio/aoc2021
from pathlib import Path
from typing import List
import itertools
def measurement_increases_counter(measurements: List[int]) -> int:
return sum(
measurement2 > measurement1
for measurement1, measurement2 in itertools.pairwise(measurements)
)
def test_measurement_increases_counter():
assert (
measurement_increases_counter(
[
199,
200,
208,
210,
200,
207,
240,
269,
260,
263,
]
)
== 7
)
if __name__ == "__main__":
with open((Path(__file__).parent / "input.txt")) as f:
measurements = [int(line) for line in f.readlines()]
print(measurement_increases_counter(measurements))
| StarcoderdataPython |
1673743 | from pathlib import Path
import pytest
from pytest import approx
import themisasi as ta
from datetime import datetime, timedelta, date
#
R = Path(__file__).parent
datfn = R / "thg_l1_asf_gako_2011010617_v01.cdf"
cal1fn = R / "themis_skymap_gako_20110305-+_vXX.sav"
cal2fn = R / "thg_l2_asc_gako_19700101_v01.cdf"
assert datfn.is_file()
assert cal1fn.is_file()
assert cal2fn.is_file()
def test_missing_file(tmp_path):
badfn = tmp_path / "notafile.cdf"
with pytest.raises(FileNotFoundError):
ta.load(badfn)
def test_filename_simple():
data = ta.load(datfn)
assert data["imgs"].site == "gako"
assert data["imgs"].shape == (23, 256, 256) and data["imgs"].dtype == "uint16"
def test_filename_calname():
with pytest.raises(ValueError):
ta.load(datfn, calfn=cal1fn)
data = ta.load(datfn, calfn=cal2fn)
assert "az" in data and "el" in data
assert data["az"].shape == data["imgs"].shape[1:]
def test_load_filename():
"""load by filename"""
dat = ta.load(datfn)
times = dat.time.values.astype("datetime64[us]").astype(datetime)
assert (times >= datetime(2011, 1, 6, 17)).all()
assert (times <= datetime(2011, 1, 6, 18)).all()
@pytest.mark.parametrize(
"site, time", [("gako", "2011-01-06T17:00:00"), ("gako", datetime(2011, 1, 6, 17))]
)
def test_load_site_time(site, time):
"""load by sitename + time"""
dat = ta.load(R, site, time)
assert dat["imgs"].shape[0] == 1
t = dat.time.values.astype("datetime64[us]").astype(datetime)
assert abs(t - datetime(2011, 1, 6, 17, 0, 0)) < timedelta(seconds=0.5)
@pytest.mark.parametrize(
"site, treq",
[
("gako", ("2011-01-06T17:00:00", "2011-01-06T17:00:12")),
("gako", ("2011-01-06T16:59:59", "2011-01-06T17:00:12")),
],
)
def test_load_site_timerange(site, treq):
"""load by sitename + timerange"""
dat = ta.load(R, site, treq=treq)
assert dat["imgs"].shape[0] == 4
times = dat.time.values.astype("datetime64[us]").astype(datetime)
assert (times >= datetime(2011, 1, 6, 17)).all()
assert (times <= datetime(2011, 1, 6, 17, 0, 12)).all()
@pytest.mark.parametrize(
"path, val, err",
[
(datfn, 1, TypeError),
(datfn, "2011-01-06T16:59:59", ValueError),
(datfn, "2011-01-06T18:00:01", ValueError),
(R, "2010-01-01", FileNotFoundError),
(R, ("2010-01-01", "2010-01-01T01"), FileNotFoundError),
],
)
def test_bad_time(path, val, err):
with pytest.raises(err):
ta.load(path, "gako", treq=val)
def test_good_time():
dat = ta.load(datfn, treq="2011-01-06T17:00:12")
assert dat["imgs"].shape[0] == 1
time = dat.time.values.astype("datetime64[us]").astype(datetime)
assert abs(time - datetime(2011, 1, 6, 17, 0, 12)) < timedelta(seconds=0.02)
dat = ta.load(datfn, treq=("2011-01-06T17:00:00", "2011-01-06T17:00:12"))
assert dat["imgs"].shape[0] == 4
time = dat.time.values.astype("datetime64[us]").astype(datetime)
def test_autoload_cal():
dat = ta.load(R, "gako", "2011-01-06T17:00:00")
assert "el" in dat and "az" in dat
def test_calread_idl():
cal = ta.loadcal(cal1fn)
assert cal["el"][29, 161] == approx(15.458)
assert cal["az"][29, 161] == approx(1.6255488)
assert cal.lon == approx(-145.16)
def test_calread_cdf():
cal = ta.loadcal(cal2fn)
assert cal["el"][29, 161] == approx(19.132568)
assert cal["az"][29, 161] == approx(183.81241)
assert cal.lon == approx(-145.16)
def test_calread_sitedate():
cal = ta.loadcal(R, "gako", "2011-01-06")
assert cal.caltime.date() == date(2007, 2, 2)
| StarcoderdataPython |
355780 | <filename>src/genie/libs/parser/iosxe/tests/ShowIpNatStatistics/cli/equal/golden_output_1_expected.py<gh_stars>100-1000
expected_output = {
"active_translations": {"dynamic": 0, "extended": 0, "static": 0, "total": 0},
"cef_punted_pkts": 0,
"cef_translated_pkts": 0,
"dynamic_mappings": {
"inside_source": {
"id": {
1: {
"access_list": "test-robot",
"match": "access-list test-robot pool test-robot",
"pool": {
"test-robot": {
"allocated": 0,
"allocated_percentage": 0,
"end": "10.1.1.1",
"misses": 0,
"netmask": "255.255.255.252",
"start": "10.1.1.1",
"total_addresses": 1,
"type": "generic",
}
},
"refcount": 0,
}
}
}
},
"expired_translations": 11013,
"hits": 3358708,
"interfaces": {
"inside": ["TenGigabitEthernet0/1/0"],
"outside": ["TenGigabitEthernet0/2/0"],
},
"ip_alias_add_fail": 0,
"limit_entry_add_fail": 0,
"mapping_stats_drop": 0,
"misses": 11050,
"pool_stats_drop": 0,
"port_block_alloc_fail": 0,
}
| StarcoderdataPython |
5029388 | <filename>convert_gt.py
from math import ceil
import os
from tqdm import tqdm
from dict2xml import dict2xml
import shutil
if __name__=='__main__':
data_root = os.path.join(os.getcwd(), '..', 'CCTSDB')
anno_ori_file = os.path.join(data_root, 'GroundTruth', 'groundtruth0000-9999.txt')
anno_convert_folder = os.path.join(os.getcwd(),'..','CCTSDB_anno')
if os.path.exists(anno_convert_folder):
shutil.rmtree(anno_convert_folder)
os.mkdir(anno_convert_folder)
label_convert = {'prohibitory':'0', 'mandatory':'1', 'warning':'2'}
rf = open(anno_ori_file,'r')
broken_img_num = (5408,5529)
boxes = list()
last_num = -1
for line in tqdm(rf.readlines()):
attr = line.strip().split(';')
if not attr[-1]:
continue
num = int(attr[0].split('.')[0])
if num in broken_img_num:
continue
label = attr[-1]
boxes.append([int(float(x)) for x in attr[1:-1]] + [label])
if num == last_num:
continue
else:
img_folder = 'image' + f'{num//1000}000-{num//1000}999'
anno_info = {
'folder': img_folder,
'filename': attr[0],
'path': os.path.join(img_folder, attr[0])
}
for index, b in enumerate(boxes):
anno_info[f'object_{index}'] = {
'name': b[-1],
'bndbox':{
'xmin': b[0],
'ymin': b[1],
'xmax': b[2],
'ymax': b[3]
}
}
with open(os.path.join(anno_convert_folder, attr[0].replace('png','xml')), 'w') as wf:
xml = dict2xml(anno_info, wrap ='annotation', indent =" ")
wf.write(xml)
last_num = num
boxes=list()
rf.close()
| StarcoderdataPython |
3345618 | <reponame>larribas/dagger-contrib<gh_stars>1-10
"""Collection of serializers for Pandas DataFrames (https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html)."""
from dagger_contrib.serializer.pandas.dataframe.as_csv import AsCSV # noqa
from dagger_contrib.serializer.pandas.dataframe.as_parquet import AsParquet # noqa
| StarcoderdataPython |
1859231 | """
This script evaluates the likelihood for a range of values timing it in the
process to investigate the time complexity.
$ source venv/bin/activate
$ python timing.py demo-data.json demo-output.json demo-config.jso
"""
import algo1
# algo1 is the required algorithm we need from the popsize-distribution
# repository. Looking at one of the plotting scripts we see the following
# example of how to use it.
#
# Second subplot
# Comparison to the density values when r = 1
# lista = [7, 6, 4, 3] # births
# listb = []
# listc = []
# listd = [2] # sampling
# liste = []
# listf = [5, 1] # occurrence
# obs = [lista, listb, listc, listd, liste, listf]
import sys
import json
import timeit
import os
input_json = sys.argv[1]
output_json = sys.argv[2]
config_json = sys.argv[3]
if os.path.isfile(config_json):
with open(config_json) as config_data:
config_dict = json.load(config_data)
lamb, mu, psi, rhoPairs, omega, uPairs = config_dict['acSimParams']['mpParameters']
assert rhoPairs.count(rhoPairs[0]) == len(rhoPairs)
rho = rhoPairs[0][1]
r = 1 # this is the removal probability always fixed to one.
params = lamb, mu, rho, psi, r, omega
num_replicates = config_dict['pyNumReplicates']
else:
raise FileNotFoundError
print(input_json)
with open(input_json) as data_json:
input_data = json.load(data_json)
obs = [input_data["OBirth"], [], [], input_data["ObsUnscheduledSequenced"], [], input_data["OOccurrence"]]
# params
distinguishRemoval = True
# These are the settings for computing the truncation parameter for the llhd
# function.
prev_llhd = -1e6
curr_llhd = -1e4
has_converged = False
iter_count = 0
max_iters = 20 # maximum number of iterations prior to giving up
truncation_param = 10
truncation_delta = 10 # how much to change delta per loop
prop_change_thresh = 1e-3 # threshold for proportion difference to have converged.
while iter_count < max_iters and (not has_converged):
truncation_param = truncation_param + truncation_delta
iter_count = iter_count + 1
prev_llhd, curr_llhd = curr_llhd, algo1.logDensity( obs, params, distinguishRemoval, truncation_param)
has_converged = abs(prev_llhd - curr_llhd) < abs(prop_change_thresh * prev_llhd)
print("did it converge?")
print(has_converged)
print(prev_llhd,curr_llhd)
print(truncation_param)
if has_converged:
# NOTE this function returns the *total* amount of time it takes to
# evaluate the expression the given number of times, so to get the average
# evaluation time you need to divide by the number of replicates!!!
eval_time = timeit.timeit('algo1.logDensity( obs, params, distinguishRemoval, truncation_param)',
globals=globals(),
number = num_replicates)
result = {
"inputJson": input_json,
"hasConverged": has_converged,
"truncationParameter": truncation_param,
"convergedLlhd": curr_llhd,
"numReplicates": num_replicates,
"evaluationTime": eval_time
}
with open(output_json, 'w') as output_file:
json.dump(result, output_file)
| StarcoderdataPython |
5109340 | <filename>spyrk/__init__.py
# This file is part of Spyrk.
#
# Spyrk is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Spyrk is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Spyrk. If not, see <http://www.gnu.org/licenses/>.
"""Spyrk: Python module for Spark devices.
* SparkCloud class provides access to the Spark Cloud.
>>> from spyrk import SparkCloud
Spyrk is licensed under LGPLv3.
http://github.com/Alidron/spyrk
"""
from .spark_cloud import SparkCloud
from .__about__ import (
__title__, __summary__, __uri__, __version__,
__author__, __email__, __license__, __copyright__,
)
__all__ = [
'SparkCloud',
'__title__', '__summary__', '__uri__', '__version__',
'__author__', '__email__', '__license__', '__copyright__',
]
| StarcoderdataPython |
1602742 | from matrix import *
def split(t):
"""
Split a matrix into 4 squares
"""
midRow = int(t.rows/2)
midCol = int(t.cols/2)
topLeft = t.subMatrix(0, 0, midRow - 1, midCol - 1)
topRight = t.subMatrix(0, midCol, midRow - 1, t.cols - 1)
bottomLeft = t.subMatrix(midRow, 0, t.rows - 1, midCol - 1)
bottomRight = t.subMatrix(midRow, midCol, t.rows - 1, t.cols - 1)
return (topLeft, topRight, bottomLeft, bottomRight)
def merge(TL, TR, BL, BR):
"""
Merge 4 squares of a matrix into one [ [TL, TR], [BL, BR]]
"""
out = matrix(TL.rows + BL.rows, TL.cols + TR.cols)
for i in range(TL.rows):
for j in range(TL.cols):
out.set(i, j, TL.get(i, j))
for i in range(TR.rows):
for j in range(TR.cols):
out.set(i, j + TL.cols, TR.get(i, j))
for i in range(BL.rows):
for j in range(BL.cols):
out.set(i + TL.rows, j, BL.get(i, j))
for i in range(BR.rows):
for j in range(BR.cols):
out.set(i + TL.rows, j + TL.cols, BR.get(i, j))
return out
def multiplyStrassen(m1, m2):
"""
Split the matrices in 4 squares and recurse in each of them
using the Strassen algorithm
"""
if m1.rows == 1:
out = matrix(m1.rows, m1.cols)
out.set(0, 0, m1.get(0, 0) * m2.get(0, 0))
return out
a = split(m1)
b = split(m2)
a11 = a[0]
a12 = a[1]
a21 = a[2]
a22 = a[3]
b11 = b[0]
b12 = b[1]
b21 = b[2]
b22 = b[3]
s1 = b12 - b22
s2 = a11 + a12
s3 = a21 + a22
s4 = b21 - b11
s5 = a11 + a22
s6 = b11 + b22
s7 = a12 - a22
s8 = b21 + b22
s9 = a11 - a21
s10 = b11 + b12
p1 = multiplyStrassen(a11, s1)
p2 = multiplyStrassen(s2, b22)
p3 = multiplyStrassen(s3, b11)
p4 = multiplyStrassen(a22, s4)
p5 = multiplyStrassen(s5, s6)
p6 = multiplyStrassen(s7, s8)
p7 = multiplyStrassen(s9, s10)
c11 = p5 + p4 - p2 + p6
c12 = p1 + p2
c21 = p3 + p4
c22 = p5 + p1 - p3 - p7
return merge(c11, c12, c21, c22)
def multiplySquareMatrices(matrix1, matrix2):
"""
Multiply square matrices whose size is powers of two recursively
Must be a power of 2 because of the splitting method
"""
if not matrix1.canMultiply(matrix2):
return matrix(0, 0)
return multiplyStrassen(matrix1, matrix2)
def multiply(matrix1, matrix2):
"""
Multiply 2 matrices by first filling them up with zeros to become of size
2^n x 2^n matrices and then multiplying them recursively
Complexity (n = matrix rows/cols)
Time complexity: O(n^2.8)
"""
m1 = matrix1
m1.fillToSquare()
m2 = matrix2
m2.fillToSquare()
output = multiplySquareMatrices(m1, m2)
output.removeEmptyCells()
return output
def test():
m1 = matrix(2, 2)
m1.data = [[1, 2], [3, 4]]
m2 = matrix(2, 2)
m2.data = [[2, 0], [3, 1]]
expectedResult1 = [[8, 2], [18, 4]]
result1 = multiply(m1, m2)
if result1.data == expectedResult1:
print("Success")
else:
print("Fail")
m3 = matrix(4, 4)
m3.data = [[1, 2, 3, 4], [6, 7, 8, 9], [11, 12, 13, 14], [16, 17, 18, 19]]
result2 = multiply(m3, m3)
expectedResult2 = [[110, 120, 130, 140], [280, 310, 340, 370], [450, 500, 550, 600], [620, 690, 760, 830]]
if result2.data == expectedResult2:
print("Success")
else:
print("Fail")
m4 = matrix(5, 5)
m4.data = [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15], [16, 17, 18, 19, 20], [21, 22, 23, 24, 25]]
result3 = multiply(m4, m4)
expectedResult3 = [[215, 230, 245, 260, 275], [490, 530, 570, 610, 650], [765, 830, 895, 960, 1025], [1040, 1130, 1220, 1310, 1400], [1315, 1430, 1545, 1660, 1775]]
if result3.data == expectedResult3:
print("Success")
else:
print("Fail")
test()
| StarcoderdataPython |
28921 | #
# Copyright (C) 2018 Pico Technology Ltd. See LICENSE file for terms.
#
| StarcoderdataPython |
11282131 | <reponame>earth2marsh/python-oauth<filename>setup.py
#!/usr/bin/env python
#from distutils.core import setup
from setuptools import setup, find_packages
setup(name="oauth2",
version="1.2.1",
description="Library for OAuth version 1.0a.",
author="<NAME>",
author_email="<EMAIL>",
url="http://github.com/simplegeo/python-oauth2",
packages = find_packages(),
install_requires = ['httplib2'],
license = "MIT License",
keywords="oauth",
zip_safe = True,
tests_require=['nose', 'coverage', 'mox'])
| StarcoderdataPython |
1755884 | import logging
from pyzeebe import Job
logger = logging.getLogger(__name__)
class TaskState:
def __init__(self):
self._active_jobs = list()
def remove(self, job: Job) -> None:
try:
self._active_jobs.remove(job.key)
except ValueError:
logger.warning("Could not find Job key %s when trying to remove from TaskState", job.key)
def add(self, job: Job) -> None:
self._active_jobs.append(job.key)
def count_active(self) -> int:
return len(self._active_jobs)
| StarcoderdataPython |
1935170 | <gh_stars>0
from pathlib import Path
import pytest
from ixmp import TimeSeries
from ixmp.backend import ItemType
from ixmp.backend.base import Backend, CachingBackend
from ixmp.testing import make_dantzig
class BE1(Backend):
"""Incomplete subclass."""
def noop(self, *args, **kwargs):
pass
class BE2(Backend):
"""Complete subclass."""
add_model_name = noop
add_scenario_name = noop
cat_get_elements = noop
cat_list = noop
cat_set_elements = noop
check_out = noop
clear_solution = noop
clone = noop
commit = noop
delete = noop
delete_geo = noop
delete_item = noop
delete_meta = noop
discard_changes = noop
get = noop
get_data = noop
get_doc = noop
get_geo = noop
get_meta = noop
get_model_names = noop
get_nodes = noop
get_scenarios = noop
get_scenario_names = noop
get_timeslices = noop
get_units = noop
has_solution = noop
init = noop
init_item = noop
is_default = noop
item_delete_elements = noop
item_get_elements = noop
item_index = noop
item_set_elements = noop
last_update = noop
list_items = noop
remove_meta = noop
run_id = noop
set_as_default = noop
set_data = noop
set_doc = noop
set_geo = noop
set_meta = noop
set_node = noop
set_timeslice = noop
set_unit = noop
def test_class():
# An incomplete Backend subclass can't be instantiated
with pytest.raises(
TypeError, match="Can't instantiate abstract class BE1 with abstract methods"
):
BE1()
# Complete subclass can be instantiated
BE2()
class TestBackend:
@pytest.fixture
def be(self):
return BE2()
# Methods with a default implementation
def test_get_auth(self, be):
assert dict(foo=True, bar=True, baz=True) == be.get_auth(
"user", "foo bar baz".split(), "access"
)
def test_read_file(self, be):
with pytest.raises(NotImplementedError):
be.read_file(Path("foo"), ItemType.VAR)
def test_write_file(self, be):
with pytest.raises(NotImplementedError):
be.write_file(Path("foo"), ItemType.VAR)
@pytest.mark.parametrize(
"args, kwargs",
(
(tuple(), dict()),
# Invalid
pytest.param(("foo",), dict(), marks=pytest.mark.xfail(raises=ValueError)),
pytest.param(tuple(), dict(bar=""), marks=pytest.mark.xfail(raises=ValueError)),
),
)
def test_handle_config(args, kwargs):
"""Test :meth:`JDBCBackend.handle_config`."""
assert dict() == Backend.handle_config(args, kwargs)
class TestCachingBackend:
def test_cache_non_hashable(self):
filters = {"s": ["foo", 42, object()]}
# _cache_key() cannot handle non-hashable object()
with pytest.raises(
TypeError, match="Object of type object is not JSON serializable"
):
CachingBackend._cache_key(object(), "par", "p", filters)
def test_cache_invalidate(self, test_mp):
backend = test_mp._backend
ts = TimeSeries(test_mp, model="foo", scenario="bar", version="new")
backend.cache_invalidate(ts, "par", "baz", dict(x=["x1", "x2"], y=["y1", "y2"]))
def test_del_ts(self, test_mp):
"""Test CachingBackend.del_ts()."""
# Since CachingBackend is an abstract class, test it via JDBCBackend
backend = test_mp._backend
cache_size_pre = len(backend._cache)
# Load data, thereby adding to the cache
s = make_dantzig(test_mp)
s.par("d")
# Cache size has increased
assert len(backend._cache) == cache_size_pre + 1
# Delete the object; associated cache is freed
del s
# Objects were invalidated/removed from cache
assert len(backend._cache) == cache_size_pre
| StarcoderdataPython |
3510017 | <gh_stars>0
#!/usr/bin/env python
import os
import sys
import argparse
import math
from EMAN2 import *
def main():
progname = os.path.basename(sys.argv[0])
usage = progname + """ [options] <input>
Process input.
"""
args_def = {'apix':1.25, 'num':3}
parser = argparse.ArgumentParser()
parser.add_argument("input", nargs='*', help="specify input to be processed")
parser.add_argument("-a", "--apix", type=float, help="specify apix, by default {}".format(args_def['apix']))
parser.add_argument("-n", "--num", type=int, help="specify a num, by default {}".format(args_def['num']))
args = parser.parse_args()
if len(sys.argv) == 1:
print "usage: " + usage
print "Please run '" + progname + " -h' for detailed options."
sys.exit(1)
# get default values
for i in args_def:
if args.__dict__[i] == None:
args.__dict__[i] = args_def[i]
#
sym = Symmetries.get("c1")
orients = sym.gen_orientations("eman",{"delta":10,"inc_mirror":False})
with open('orient.com','w') as orient:
orient.write('lighting mode single\n')
orient.write('set bgTransparency\n\n')
for n,i in enumerate(orients):
alt = i.get_rotation()['alt']
az = i.get_rotation()['az']
x=sin_d(alt)*cos_d(az)
y=sin_d(alt)*sin_d(az)
z=cos_d(alt)
orient.write('lighting key direction {:f} {:f} {:f}\n'.format(x,y,z))
orient.write('copy file {:04d}.png png dpi 300 supersample 4 width 3000 height 3000\n\n'.format(n))
def cos_d(d):
return math.cos(math.radians(d))
def sin_d(d):
return math.sin(math.radians(d))
if __name__ == '__main__':
main()
| StarcoderdataPython |
6440144 | from sqlalchemy.testing import eq_, assert_raises_message, assert_raises, is_
from sqlalchemy import testing
from sqlalchemy.testing import fixtures, engines
from sqlalchemy import util
from sqlalchemy import (
exc, sql, func, select, String, Integer, MetaData, and_, ForeignKey,
union, intersect, except_, union_all, VARCHAR, INT, CHAR, text, Sequence,
bindparam, literal, not_, type_coerce, literal_column, desc, asc,
TypeDecorator, or_, cast, table, column)
from sqlalchemy.engine import default, result as _result
from sqlalchemy.testing.schema import Table, Column
# ongoing - these are old tests. those which are of general use
# to test a dialect are being slowly migrated to
# sqlalhcemy.testing.suite
users = users2 = addresses = metadata = None
class QueryTest(fixtures.TestBase):
__backend__ = True
@classmethod
def setup_class(cls):
global users, users2, addresses, metadata
metadata = MetaData(testing.db)
users = Table(
'query_users', metadata,
Column(
'user_id', INT, primary_key=True,
test_needs_autoincrement=True),
Column('user_name', VARCHAR(20)),
test_needs_acid=True
)
addresses = Table(
'query_addresses', metadata,
Column(
'address_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('user_id', Integer, ForeignKey('query_users.user_id')),
Column('address', String(30)),
test_needs_acid=True
)
users2 = Table(
'u2', metadata,
Column('user_id', INT, primary_key=True),
Column('user_name', VARCHAR(20)),
test_needs_acid=True
)
metadata.create_all()
@engines.close_first
def teardown(self):
addresses.delete().execute()
users.delete().execute()
users2.delete().execute()
@classmethod
def teardown_class(cls):
metadata.drop_all()
@testing.requires.multivalues_inserts
def test_multivalues_insert(self):
users.insert(
values=[
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'}]).execute()
rows = users.select().order_by(users.c.user_id).execute().fetchall()
self.assert_(rows[0] == (7, 'jack'))
self.assert_(rows[1] == (8, 'ed'))
users.insert(values=[(9, 'jack'), (10, 'ed')]).execute()
rows = users.select().order_by(users.c.user_id).execute().fetchall()
self.assert_(rows[2] == (9, 'jack'))
self.assert_(rows[3] == (10, 'ed'))
def test_insert_heterogeneous_params(self):
"""test that executemany parameters are asserted to match the
parameter set of the first."""
assert_raises_message(
exc.StatementError,
r"A value is required for bind parameter 'user_name', in "
"parameter group 2 "
"\(original cause: (sqlalchemy.exc.)?InvalidRequestError: A "
"value is required for bind parameter 'user_name', in "
"parameter group 2\) u?'INSERT INTO query_users",
users.insert().execute,
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9}
)
# this succeeds however. We aren't yet doing
# a length check on all subsequent parameters.
users.insert().execute(
{'user_id': 7},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9}
)
def test_lastrow_accessor(self):
"""Tests the inserted_primary_key and lastrow_has_id() functions."""
def insert_values(engine, table, values):
"""
Inserts a row into a table, returns the full list of values
INSERTed including defaults that fired off on the DB side and
detects rows that had defaults and post-fetches.
"""
# verify implicit_returning is working
if engine.dialect.implicit_returning:
ins = table.insert()
comp = ins.compile(engine, column_keys=list(values))
if not set(values).issuperset(
c.key for c in table.primary_key):
assert comp.returning
result = engine.execute(table.insert(), **values)
ret = values.copy()
for col, id in zip(table.primary_key, result.inserted_primary_key):
ret[col.key] = id
if result.lastrow_has_defaults():
criterion = and_(
*[
col == id for col, id in
zip(table.primary_key, result.inserted_primary_key)])
row = engine.execute(table.select(criterion)).first()
for c in table.c:
ret[c.key] = row[c]
return ret
if testing.against('firebird', 'postgresql', 'oracle', 'mssql'):
assert testing.db.dialect.implicit_returning
if testing.db.dialect.implicit_returning:
test_engines = [
engines.testing_engine(options={'implicit_returning': False}),
engines.testing_engine(options={'implicit_returning': True}),
]
else:
test_engines = [testing.db]
for engine in test_engines:
metadata = MetaData()
for supported, table, values, assertvalues in [
(
{'unsupported': ['sqlite']},
Table(
"t1", metadata,
Column(
'id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('foo', String(30), primary_key=True)),
{'foo': 'hi'},
{'id': 1, 'foo': 'hi'}
),
(
{'unsupported': ['sqlite']},
Table(
"t2", metadata,
Column(
'id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('foo', String(30), primary_key=True),
Column('bar', String(30), server_default='hi')
),
{'foo': 'hi'},
{'id': 1, 'foo': 'hi', 'bar': 'hi'}
),
(
{'unsupported': []},
Table(
"t3", metadata,
Column("id", String(40), primary_key=True),
Column('foo', String(30), primary_key=True),
Column("bar", String(30))
),
{'id': 'hi', 'foo': 'thisisfoo', 'bar': "thisisbar"},
{'id': 'hi', 'foo': 'thisisfoo', 'bar': "thisisbar"}
),
(
{'unsupported': []},
Table(
"t4", metadata,
Column(
'id', Integer,
Sequence('t4_id_seq', optional=True),
primary_key=True),
Column('foo', String(30), primary_key=True),
Column('bar', String(30), server_default='hi')
),
{'foo': 'hi', 'id': 1},
{'id': 1, 'foo': 'hi', 'bar': 'hi'}
),
(
{'unsupported': []},
Table(
"t5", metadata,
Column('id', String(10), primary_key=True),
Column('bar', String(30), server_default='hi')
),
{'id': 'id1'},
{'id': 'id1', 'bar': 'hi'},
),
(
{'unsupported': ['sqlite']},
Table(
"t6", metadata,
Column(
'id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('bar', Integer, primary_key=True)
),
{'bar': 0},
{'id': 1, 'bar': 0},
),
]:
if testing.db.name in supported['unsupported']:
continue
try:
table.create(bind=engine, checkfirst=True)
i = insert_values(engine, table, values)
assert i == assertvalues, "tablename: %s %r %r" % \
(table.name, repr(i), repr(assertvalues))
finally:
table.drop(bind=engine)
# TODO: why not in the sqlite suite?
@testing.only_on('sqlite+pysqlite')
@testing.provide_metadata
def test_lastrowid_zero(self):
from sqlalchemy.dialects import sqlite
eng = engines.testing_engine()
class ExcCtx(sqlite.base.SQLiteExecutionContext):
def get_lastrowid(self):
return 0
eng.dialect.execution_ctx_cls = ExcCtx
t = Table(
't', self.metadata, Column('x', Integer, primary_key=True),
Column('y', Integer))
t.create(eng)
r = eng.execute(t.insert().values(y=5))
eq_(r.inserted_primary_key, [0])
@testing.fails_on(
'sqlite', "sqlite autoincremnt doesn't work with composite pks")
def test_misordered_lastrow(self):
related = Table(
'related', metadata,
Column('id', Integer, primary_key=True),
mysql_engine='MyISAM'
)
t6 = Table(
"t6", metadata,
Column(
'manual_id', Integer, ForeignKey('related.id'),
primary_key=True),
Column(
'auto_id', Integer, primary_key=True,
test_needs_autoincrement=True),
mysql_engine='MyISAM'
)
metadata.create_all()
r = related.insert().values(id=12).execute()
id = r.inserted_primary_key[0]
assert id == 12
r = t6.insert().values(manual_id=id).execute()
eq_(r.inserted_primary_key, [12, 1])
def test_implicit_id_insert_select(self):
stmt = users.insert().from_select(
(users.c.user_id, users.c.user_name),
users.select().where(users.c.user_id == 20))
testing.db.execute(stmt)
def test_row_iteration(self):
users.insert().execute(
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9, 'user_name': 'fred'},
)
r = users.select().execute()
l = []
for row in r:
l.append(row)
self.assert_(len(l) == 3)
@testing.fails_if(
lambda: util.py3k and testing.against('mysql+mysqlconnector'),
"bug in mysqlconnector")
@testing.requires.subqueries
def test_anonymous_rows(self):
users.insert().execute(
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9, 'user_name': 'fred'},
)
sel = select([users.c.user_id]).where(users.c.user_name == 'jack'). \
as_scalar()
for row in select([sel + 1, sel + 3], bind=users.bind).execute():
assert row['anon_1'] == 8
assert row['anon_2'] == 10
@testing.fails_on(
'firebird', "kinterbasdb doesn't send full type information")
def test_order_by_label(self):
"""test that a label within an ORDER BY works on each backend.
This test should be modified to support [ticket:1068] when that ticket
is implemented. For now, you need to put the actual string in the
ORDER BY.
"""
users.insert().execute(
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9, 'user_name': 'fred'},
)
concat = ("test: " + users.c.user_name).label('thedata')
eq_(
select([concat]).order_by("thedata").execute().fetchall(),
[("test: ed",), ("test: fred",), ("test: jack",)]
)
eq_(
select([concat]).order_by("thedata").execute().fetchall(),
[("test: ed",), ("test: fred",), ("test: jack",)]
)
concat = ("test: " + users.c.user_name).label('thedata')
eq_(
select([concat]).order_by(desc('thedata')).execute().fetchall(),
[("test: jack",), ("test: fred",), ("test: ed",)]
)
@testing.requires.order_by_label_with_expression
def test_order_by_label_compound(self):
users.insert().execute(
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9, 'user_name': 'fred'},
)
concat = ("test: " + users.c.user_name).label('thedata')
eq_(
select([concat]).order_by(literal_column('thedata') + "x").
execute().fetchall(),
[("test: ed",), ("test: fred",), ("test: jack",)]
)
def test_row_comparison(self):
users.insert().execute(user_id=7, user_name='jack')
rp = users.select().execute().first()
self.assert_(rp == rp)
self.assert_(not(rp != rp))
equal = (7, 'jack')
self.assert_(rp == equal)
self.assert_(equal == rp)
self.assert_(not (rp != equal))
self.assert_(not (equal != equal))
def endless():
while True:
yield 1
self.assert_(rp != endless())
self.assert_(endless() != rp)
# test that everything compares the same
# as it would against a tuple
import operator
for compare in [False, 8, endless(), 'xyz', (7, 'jack')]:
for op in [
operator.eq, operator.ne, operator.gt,
operator.lt, operator.ge, operator.le
]:
try:
control = op(equal, compare)
except TypeError:
# Py3K raises TypeError for some invalid comparisons
assert_raises(TypeError, op, rp, compare)
else:
eq_(control, op(rp, compare))
try:
control = op(compare, equal)
except TypeError:
# Py3K raises TypeError for some invalid comparisons
assert_raises(TypeError, op, compare, rp)
else:
eq_(control, op(compare, rp))
@testing.provide_metadata
def test_column_label_overlap_fallback(self):
content = Table(
'content', self.metadata,
Column('type', String(30)),
)
bar = Table(
'bar', self.metadata,
Column('content_type', String(30))
)
self.metadata.create_all(testing.db)
testing.db.execute(content.insert().values(type="t1"))
row = testing.db.execute(content.select(use_labels=True)).first()
assert content.c.type in row
assert bar.c.content_type not in row
assert sql.column('content_type') in row
row = testing.db.execute(
select([content.c.type.label("content_type")])).first()
assert content.c.type in row
assert bar.c.content_type not in row
assert sql.column('content_type') in row
row = testing.db.execute(select([func.now().label("content_type")])). \
first()
assert content.c.type not in row
assert bar.c.content_type not in row
assert sql.column('content_type') in row
def test_pickled_rows(self):
users.insert().execute(
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9, 'user_name': 'fred'},
)
for pickle in False, True:
for use_labels in False, True:
result = users.select(use_labels=use_labels).order_by(
users.c.user_id).execute().fetchall()
if pickle:
result = util.pickle.loads(util.pickle.dumps(result))
eq_(
result,
[(7, "jack"), (8, "ed"), (9, "fred")]
)
if use_labels:
eq_(result[0]['query_users_user_id'], 7)
eq_(
list(result[0].keys()),
["query_users_user_id", "query_users_user_name"])
else:
eq_(result[0]['user_id'], 7)
eq_(list(result[0].keys()), ["user_id", "user_name"])
eq_(result[0][0], 7)
eq_(result[0][users.c.user_id], 7)
eq_(result[0][users.c.user_name], 'jack')
if not pickle or use_labels:
assert_raises(
exc.NoSuchColumnError,
lambda: result[0][addresses.c.user_id])
else:
# test with a different table. name resolution is
# causing 'user_id' to match when use_labels wasn't used.
eq_(result[0][addresses.c.user_id], 7)
assert_raises(
exc.NoSuchColumnError, lambda: result[0]['fake key'])
assert_raises(
exc.NoSuchColumnError,
lambda: result[0][addresses.c.address_id])
def test_column_error_printing(self):
row = testing.db.execute(select([1])).first()
class unprintable(object):
def __str__(self):
raise ValueError("nope")
msg = r"Could not locate column in row for column '%s'"
for accessor, repl in [
("x", "x"),
(Column("q", Integer), "q"),
(Column("q", Integer) + 12, r"q \+ :q_1"),
(unprintable(), "unprintable element.*"),
]:
assert_raises_message(
exc.NoSuchColumnError,
msg % repl,
lambda: row[accessor]
)
@testing.fails_if(
lambda: util.py3k and testing.against('mysql+mysqlconnector'),
"bug in mysqlconnector")
@testing.requires.boolean_col_expressions
def test_or_and_as_columns(self):
true, false = literal(True), literal(False)
eq_(testing.db.execute(select([and_(true, false)])).scalar(), False)
eq_(testing.db.execute(select([and_(true, true)])).scalar(), True)
eq_(testing.db.execute(select([or_(true, false)])).scalar(), True)
eq_(testing.db.execute(select([or_(false, false)])).scalar(), False)
eq_(
testing.db.execute(select([not_(or_(false, false))])).scalar(),
True)
row = testing.db.execute(
select(
[or_(false, false).label("x"),
and_(true, false).label("y")])).first()
assert row.x == False # noqa
assert row.y == False # noqa
row = testing.db.execute(
select(
[or_(true, false).label("x"),
and_(true, false).label("y")])).first()
assert row.x == True # noqa
assert row.y == False # noqa
def test_fetchmany(self):
users.insert().execute(user_id=7, user_name='jack')
users.insert().execute(user_id=8, user_name='ed')
users.insert().execute(user_id=9, user_name='fred')
r = users.select().execute()
l = []
for row in r.fetchmany(size=2):
l.append(row)
self.assert_(len(l) == 2, "fetchmany(size=2) got %s rows" % len(l))
def test_like_ops(self):
users.insert().execute(
{'user_id': 1, 'user_name': 'apples'},
{'user_id': 2, 'user_name': 'oranges'},
{'user_id': 3, 'user_name': 'bananas'},
{'user_id': 4, 'user_name': 'legumes'},
{'user_id': 5, 'user_name': 'hi % there'},
)
for expr, result in (
(select([users.c.user_id]).
where(users.c.user_name.startswith('apple')), [(1,)]),
(select([users.c.user_id]).
where(users.c.user_name.contains('i % t')), [(5,)]),
(select([users.c.user_id]).
where(users.c.user_name.endswith('anas')), [(3,)]),
(select([users.c.user_id]).
where(users.c.user_name.contains('i % t', escape='&')),
[(5,)]),
):
eq_(expr.execute().fetchall(), result)
@testing.fails_if(
lambda: util.py3k and testing.against('mysql+mysqlconnector'),
"bug in mysqlconnector")
@testing.requires.mod_operator_as_percent_sign
@testing.emits_warning('.*now automatically escapes.*')
def test_percents_in_text(self):
for expr, result in (
(text("select 6 % 10"), 6),
(text("select 17 % 10"), 7),
(text("select '%'"), '%'),
(text("select '%%'"), '%%'),
(text("select '%%%'"), '%%%'),
(text("select 'hello % world'"), "hello % world")
):
eq_(testing.db.scalar(expr), result)
def test_ilike(self):
users.insert().execute(
{'user_id': 1, 'user_name': 'one'},
{'user_id': 2, 'user_name': 'TwO'},
{'user_id': 3, 'user_name': 'ONE'},
{'user_id': 4, 'user_name': 'OnE'},
)
eq_(
select([users.c.user_id]).where(users.c.user_name.ilike('one')).
execute().fetchall(), [(1, ), (3, ), (4, )])
eq_(
select([users.c.user_id]).where(users.c.user_name.ilike('TWO')).
execute().fetchall(), [(2, )])
if testing.against('postgresql'):
eq_(
select([users.c.user_id]).
where(users.c.user_name.like('one')).execute().fetchall(),
[(1, )])
eq_(
select([users.c.user_id]).
where(users.c.user_name.like('TWO')).execute().fetchall(), [])
def test_compiled_execute(self):
users.insert().execute(user_id=7, user_name='jack')
s = select([users], users.c.user_id == bindparam('id')).compile()
c = testing.db.connect()
assert c.execute(s, id=7).fetchall()[0]['user_id'] == 7
def test_compiled_insert_execute(self):
users.insert().compile().execute(user_id=7, user_name='jack')
s = select([users], users.c.user_id == bindparam('id')).compile()
c = testing.db.connect()
assert c.execute(s, id=7).fetchall()[0]['user_id'] == 7
@testing.fails_if(
lambda: util.py3k and testing.against('mysql+mysqlconnector'),
"bug in mysqlconnector")
def test_repeated_bindparams(self):
"""Tests that a BindParam can be used more than once.
This should be run for DB-APIs with both positional and named
paramstyles.
"""
users.insert().execute(user_id=7, user_name='jack')
users.insert().execute(user_id=8, user_name='fred')
u = bindparam('userid')
s = users.select(and_(users.c.user_name == u, users.c.user_name == u))
r = s.execute(userid='fred').fetchall()
assert len(r) == 1
def test_bindparam_detection(self):
dialect = default.DefaultDialect(paramstyle='qmark')
prep = lambda q: str(sql.text(q).compile(dialect=dialect))
def a_eq(got, wanted):
if got != wanted:
print("Wanted %s" % wanted)
print("Received %s" % got)
self.assert_(got == wanted, got)
a_eq(prep('select foo'), 'select foo')
a_eq(prep("time='12:30:00'"), "time='12:30:00'")
a_eq(prep("time='12:30:00'"), "time='12:30:00'")
a_eq(prep(":this:that"), ":this:that")
a_eq(prep(":this :that"), "? ?")
a_eq(prep("(:this),(:that :other)"), "(?),(? ?)")
a_eq(prep("(:this),(:that:other)"), "(?),(:that:other)")
a_eq(prep("(:this),(:that,:other)"), "(?),(?,?)")
a_eq(prep("(:that_:other)"), "(:that_:other)")
a_eq(prep("(:that_ :other)"), "(? ?)")
a_eq(prep("(:that_other)"), "(?)")
a_eq(prep("(:that$other)"), "(?)")
a_eq(prep("(:that$:other)"), "(:that$:other)")
a_eq(prep(".:that$ :other."), ".? ?.")
a_eq(prep(r'select \foo'), r'select \foo')
a_eq(prep(r"time='12\:30:00'"), r"time='12\:30:00'")
a_eq(prep(":this \:that"), "? :that")
a_eq(prep(r"(\:that$other)"), "(:that$other)")
a_eq(prep(r".\:that$ :other."), ".:that$ ?.")
@testing.requires.standalone_binds
def test_select_from_bindparam(self):
"""Test result row processing when selecting from a plain bind
param."""
class MyInteger(TypeDecorator):
impl = Integer
def process_bind_param(self, value, dialect):
return int(value[4:])
def process_result_value(self, value, dialect):
return "INT_%d" % value
eq_(
testing.db.scalar(select([cast("INT_5", type_=MyInteger)])),
"INT_5"
)
eq_(
testing.db.scalar(
select([cast("INT_5", type_=MyInteger).label('foo')])),
"INT_5"
)
def test_order_by(self):
"""Exercises ORDER BY clause generation.
Tests simple, compound, aliased and DESC clauses.
"""
users.insert().execute(user_id=1, user_name='c')
users.insert().execute(user_id=2, user_name='b')
users.insert().execute(user_id=3, user_name='a')
def a_eq(executable, wanted):
got = list(executable.execute())
eq_(got, wanted)
for labels in False, True:
a_eq(users.select(order_by=[users.c.user_id],
use_labels=labels),
[(1, 'c'), (2, 'b'), (3, 'a')])
a_eq(users.select(order_by=[users.c.user_name, users.c.user_id],
use_labels=labels),
[(3, 'a'), (2, 'b'), (1, 'c')])
a_eq(select([users.c.user_id.label('foo')],
use_labels=labels,
order_by=[users.c.user_id]),
[(1,), (2,), (3,)])
a_eq(select([users.c.user_id.label('foo'), users.c.user_name],
use_labels=labels,
order_by=[users.c.user_name, users.c.user_id]),
[(3, 'a'), (2, 'b'), (1, 'c')])
a_eq(users.select(distinct=True,
use_labels=labels,
order_by=[users.c.user_id]),
[(1, 'c'), (2, 'b'), (3, 'a')])
a_eq(select([users.c.user_id.label('foo')],
distinct=True,
use_labels=labels,
order_by=[users.c.user_id]),
[(1,), (2,), (3,)])
a_eq(select([users.c.user_id.label('a'),
users.c.user_id.label('b'),
users.c.user_name],
use_labels=labels,
order_by=[users.c.user_id]),
[(1, 1, 'c'), (2, 2, 'b'), (3, 3, 'a')])
a_eq(users.select(distinct=True,
use_labels=labels,
order_by=[desc(users.c.user_id)]),
[(3, 'a'), (2, 'b'), (1, 'c')])
a_eq(select([users.c.user_id.label('foo')],
distinct=True,
use_labels=labels,
order_by=[users.c.user_id.desc()]),
[(3,), (2,), (1,)])
@testing.requires.nullsordering
def test_order_by_nulls(self):
"""Exercises ORDER BY clause generation.
Tests simple, compound, aliased and DESC clauses.
"""
users.insert().execute(user_id=1)
users.insert().execute(user_id=2, user_name='b')
users.insert().execute(user_id=3, user_name='a')
def a_eq(executable, wanted):
got = list(executable.execute())
eq_(got, wanted)
for labels in False, True:
a_eq(users.select(order_by=[users.c.user_name.nullsfirst()],
use_labels=labels),
[(1, None), (3, 'a'), (2, 'b')])
a_eq(users.select(order_by=[users.c.user_name.nullslast()],
use_labels=labels),
[(3, 'a'), (2, 'b'), (1, None)])
a_eq(users.select(order_by=[asc(users.c.user_name).nullsfirst()],
use_labels=labels),
[(1, None), (3, 'a'), (2, 'b')])
a_eq(users.select(order_by=[asc(users.c.user_name).nullslast()],
use_labels=labels),
[(3, 'a'), (2, 'b'), (1, None)])
a_eq(users.select(order_by=[users.c.user_name.desc().nullsfirst()],
use_labels=labels),
[(1, None), (2, 'b'), (3, 'a')])
a_eq(
users.select(
order_by=[users.c.user_name.desc().nullslast()],
use_labels=labels),
[(2, 'b'), (3, 'a'), (1, None)])
a_eq(
users.select(
order_by=[desc(users.c.user_name).nullsfirst()],
use_labels=labels),
[(1, None), (2, 'b'), (3, 'a')])
a_eq(users.select(order_by=[desc(users.c.user_name).nullslast()],
use_labels=labels),
[(2, 'b'), (3, 'a'), (1, None)])
a_eq(
users.select(
order_by=[users.c.user_name.nullsfirst(), users.c.user_id],
use_labels=labels),
[(1, None), (3, 'a'), (2, 'b')])
a_eq(
users.select(
order_by=[users.c.user_name.nullslast(), users.c.user_id],
use_labels=labels),
[(3, 'a'), (2, 'b'), (1, None)])
def test_column_slices(self):
users.insert().execute(user_id=1, user_name='john')
users.insert().execute(user_id=2, user_name='jack')
addresses.insert().execute(
address_id=1, user_id=2, address='<EMAIL>')
r = text(
"select * from query_addresses", bind=testing.db).execute().first()
self.assert_(r[0:1] == (1,))
self.assert_(r[1:] == (2, '<EMAIL>'))
self.assert_(r[:-1] == (1, 2))
def test_column_accessor_basic_compiled(self):
users.insert().execute(
dict(user_id=1, user_name='john'),
dict(user_id=2, user_name='jack')
)
r = users.select(users.c.user_id == 2).execute().first()
self.assert_(r.user_id == r['user_id'] == r[users.c.user_id] == 2)
self.assert_(
r.user_name == r['user_name'] == r[users.c.user_name] == 'jack')
def test_column_accessor_basic_text(self):
users.insert().execute(
dict(user_id=1, user_name='john'),
dict(user_id=2, user_name='jack')
)
r = testing.db.execute(
text("select * from query_users where user_id=2")).first()
self.assert_(r.user_id == r['user_id'] == r[users.c.user_id] == 2)
self.assert_(
r.user_name == r['user_name'] == r[users.c.user_name] == 'jack')
def test_column_accessor_textual_select(self):
users.insert().execute(
dict(user_id=1, user_name='john'),
dict(user_id=2, user_name='jack')
)
# this will create column() objects inside
# the select(), these need to match on name anyway
r = testing.db.execute(
select([
column('user_id'), column('user_name')
]).select_from(table('query_users')).
where(text('user_id=2'))
).first()
self.assert_(r.user_id == r['user_id'] == r[users.c.user_id] == 2)
self.assert_(
r.user_name == r['user_name'] == r[users.c.user_name] == 'jack')
def test_column_accessor_dotted_union(self):
users.insert().execute(
dict(user_id=1, user_name='john'),
)
# test a little sqlite weirdness - with the UNION,
# cols come back as "query_users.user_id" in cursor.description
r = testing.db.execute(
text(
"select query_users.user_id, query_users.user_name "
"from query_users "
"UNION select query_users.user_id, "
"query_users.user_name from query_users"
)
).first()
eq_(r['user_id'], 1)
eq_(r['user_name'], "john")
eq_(list(r.keys()), ["user_id", "user_name"])
@testing.only_on("sqlite", "sqlite specific feature")
def test_column_accessor_sqlite_raw(self):
users.insert().execute(
dict(user_id=1, user_name='john'),
)
r = text(
"select query_users.user_id, query_users.user_name "
"from query_users "
"UNION select query_users.user_id, "
"query_users.user_name from query_users",
bind=testing.db).execution_options(sqlite_raw_colnames=True). \
execute().first()
assert 'user_id' not in r
assert 'user_name' not in r
eq_(r['query_users.user_id'], 1)
eq_(r['query_users.user_name'], "john")
eq_(list(r.keys()), ["query_users.user_id", "query_users.user_name"])
@testing.only_on("sqlite", "sqlite specific feature")
def test_column_accessor_sqlite_translated(self):
users.insert().execute(
dict(user_id=1, user_name='john'),
)
r = text(
"select query_users.user_id, query_users.user_name "
"from query_users "
"UNION select query_users.user_id, "
"query_users.user_name from query_users",
bind=testing.db).execute().first()
eq_(r['user_id'], 1)
eq_(r['user_name'], "john")
eq_(r['query_users.user_id'], 1)
eq_(r['query_users.user_name'], "john")
eq_(list(r.keys()), ["user_id", "user_name"])
def test_column_accessor_labels_w_dots(self):
users.insert().execute(
dict(user_id=1, user_name='john'),
)
# test using literal tablename.colname
r = text(
'select query_users.user_id AS "query_users.user_id", '
'query_users.user_name AS "query_users.user_name" '
'from query_users', bind=testing.db).\
execution_options(sqlite_raw_colnames=True).execute().first()
eq_(r['query_users.user_id'], 1)
eq_(r['query_users.user_name'], "john")
assert "user_name" not in r
eq_(list(r.keys()), ["query_users.user_id", "query_users.user_name"])
def test_column_accessor_unary(self):
users.insert().execute(
dict(user_id=1, user_name='john'),
)
# unary experssions
r = select([users.c.user_name.distinct()]).order_by(
users.c.user_name).execute().first()
eq_(r[users.c.user_name], 'john')
eq_(r.user_name, 'john')
def test_column_accessor_err(self):
r = testing.db.execute(select([1])).first()
assert_raises_message(
AttributeError,
"Could not locate column in row for column 'foo'",
getattr, r, "foo"
)
assert_raises_message(
KeyError,
"Could not locate column in row for column 'foo'",
lambda: r['foo']
)
def test_graceful_fetch_on_non_rows(self):
"""test that calling fetchone() etc. on a result that doesn't
return rows fails gracefully.
"""
# these proxies don't work with no cursor.description present.
# so they don't apply to this test at the moment.
# result.FullyBufferedResultProxy,
# result.BufferedRowResultProxy,
# result.BufferedColumnResultProxy
conn = testing.db.connect()
for meth in ('fetchone', 'fetchall', 'first', 'scalar', 'fetchmany'):
trans = conn.begin()
result = conn.execute(users.insert(), user_id=1)
assert_raises_message(
exc.ResourceClosedError,
"This result object does not return rows. "
"It has been closed automatically.",
getattr(result, meth),
)
trans.rollback()
@testing.requires.empty_inserts
@testing.requires.returning
def test_no_inserted_pk_on_returning(self):
result = testing.db.execute(users.insert().returning(
users.c.user_id, users.c.user_name))
assert_raises_message(
exc.InvalidRequestError,
r"Can't call inserted_primary_key when returning\(\) is used.",
getattr, result, 'inserted_primary_key'
)
def test_fetchone_til_end(self):
result = testing.db.execute("select * from query_users")
eq_(result.fetchone(), None)
assert_raises_message(
exc.ResourceClosedError,
"This result object is closed.",
result.fetchone
)
def test_row_case_sensitive(self):
row = testing.db.execute(
select([
literal_column("1").label("case_insensitive"),
literal_column("2").label("CaseSensitive")
])
).first()
eq_(list(row.keys()), ["case_insensitive", "CaseSensitive"])
eq_(row["case_insensitive"], 1)
eq_(row["CaseSensitive"], 2)
assert_raises(
KeyError,
lambda: row["Case_insensitive"]
)
assert_raises(
KeyError,
lambda: row["casesensitive"]
)
def test_row_case_insensitive(self):
ins_db = engines.testing_engine(options={"case_sensitive": False})
row = ins_db.execute(
select([
literal_column("1").label("case_insensitive"),
literal_column("2").label("CaseSensitive")
])
).first()
eq_(list(row.keys()), ["case_insensitive", "CaseSensitive"])
eq_(row["case_insensitive"], 1)
eq_(row["CaseSensitive"], 2)
eq_(row["Case_insensitive"], 1)
eq_(row["casesensitive"], 2)
def test_row_as_args(self):
users.insert().execute(user_id=1, user_name='john')
r = users.select(users.c.user_id == 1).execute().first()
users.delete().execute()
users.insert().execute(r)
eq_(users.select().execute().fetchall(), [(1, 'john')])
def test_result_as_args(self):
users.insert().execute([
dict(user_id=1, user_name='john'),
dict(user_id=2, user_name='ed')])
r = users.select().execute()
users2.insert().execute(list(r))
eq_(
users2.select().order_by(users2.c.user_id).execute().fetchall(),
[(1, 'john'), (2, 'ed')]
)
users2.delete().execute()
r = users.select().execute()
users2.insert().execute(*list(r))
eq_(
users2.select().order_by(users2.c.user_id).execute().fetchall(),
[(1, 'john'), (2, 'ed')]
)
@testing.requires.duplicate_names_in_cursor_description
def test_ambiguous_column(self):
users.insert().execute(user_id=1, user_name='john')
result = users.outerjoin(addresses).select().execute()
r = result.first()
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: r['user_id']
)
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: r[users.c.user_id]
)
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: r[addresses.c.user_id]
)
# try to trick it - fake_table isn't in the result!
# we get the correct error
fake_table = Table('fake', MetaData(), Column('user_id', Integer))
assert_raises_message(
exc.InvalidRequestError,
"Could not locate column in row for column 'fake.user_id'",
lambda: r[fake_table.c.user_id]
)
r = util.pickle.loads(util.pickle.dumps(r))
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: r['user_id']
)
result = users.outerjoin(addresses).select().execute()
result = _result.BufferedColumnResultProxy(result.context)
r = result.first()
assert isinstance(r, _result.BufferedColumnRow)
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: r['user_id']
)
@testing.requires.duplicate_names_in_cursor_description
def test_ambiguous_column_by_col(self):
users.insert().execute(user_id=1, user_name='john')
ua = users.alias()
u2 = users.alias()
result = select([users.c.user_id, ua.c.user_id]).execute()
row = result.first()
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: row[users.c.user_id]
)
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: row[ua.c.user_id]
)
# Unfortunately, this fails -
# we'd like
# "Could not locate column in row"
# to be raised here, but the check for
# "common column" in _compare_name_for_result()
# has other requirements to be more liberal.
# Ultimately the
# expression system would need a way to determine
# if given two columns in a "proxy" relationship, if they
# refer to a different parent table
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: row[u2.c.user_id]
)
@testing.requires.duplicate_names_in_cursor_description
def test_ambiguous_column_contains(self):
# ticket 2702. in 0.7 we'd get True, False.
# in 0.8, both columns are present so it's True;
# but when they're fetched you'll get the ambiguous error.
users.insert().execute(user_id=1, user_name='john')
result = select([users.c.user_id, addresses.c.user_id]).\
select_from(users.outerjoin(addresses)).execute()
row = result.first()
eq_(
set([users.c.user_id in row, addresses.c.user_id in row]),
set([True])
)
def test_ambiguous_column_by_col_plus_label(self):
users.insert().execute(user_id=1, user_name='john')
result = select(
[users.c.user_id,
type_coerce(users.c.user_id, Integer).label('foo')]).execute()
row = result.first()
eq_(
row[users.c.user_id], 1
)
eq_(
row[1], 1
)
@testing.requires.subqueries
def test_column_label_targeting(self):
users.insert().execute(user_id=7, user_name='ed')
for s in (
users.select().alias('foo'),
users.select().alias(users.name),
):
row = s.select(use_labels=True).execute().first()
assert row[s.c.user_id] == 7
assert row[s.c.user_name] == 'ed'
def test_keys(self):
users.insert().execute(user_id=1, user_name='foo')
r = users.select().execute()
eq_([x.lower() for x in list(r.keys())], ['user_id', 'user_name'])
r = r.first()
eq_([x.lower() for x in list(r.keys())], ['user_id', 'user_name'])
def test_items(self):
users.insert().execute(user_id=1, user_name='foo')
r = users.select().execute().first()
eq_(
[(x[0].lower(), x[1]) for x in list(r.items())],
[('user_id', 1), ('user_name', 'foo')])
def test_len(self):
users.insert().execute(user_id=1, user_name='foo')
r = users.select().execute().first()
eq_(len(r), 2)
r = testing.db.execute('select user_name, user_id from query_users'). \
first()
eq_(len(r), 2)
r = testing.db.execute('select user_name from query_users').first()
eq_(len(r), 1)
def test_sorting_in_python(self):
users.insert().execute(
dict(user_id=1, user_name='foo'),
dict(user_id=2, user_name='bar'),
dict(user_id=3, user_name='def'),
)
rows = users.select().order_by(users.c.user_name).execute().fetchall()
eq_(rows, [(2, 'bar'), (3, 'def'), (1, 'foo')])
eq_(sorted(rows), [(1, 'foo'), (2, 'bar'), (3, 'def')])
def test_column_order_with_simple_query(self):
# should return values in column definition order
users.insert().execute(user_id=1, user_name='foo')
r = users.select(users.c.user_id == 1).execute().first()
eq_(r[0], 1)
eq_(r[1], 'foo')
eq_([x.lower() for x in list(r.keys())], ['user_id', 'user_name'])
eq_(list(r.values()), [1, 'foo'])
def test_column_order_with_text_query(self):
# should return values in query order
users.insert().execute(user_id=1, user_name='foo')
r = testing.db.execute('select user_name, user_id from query_users'). \
first()
eq_(r[0], 'foo')
eq_(r[1], 1)
eq_([x.lower() for x in list(r.keys())], ['user_name', 'user_id'])
eq_(list(r.values()), ['foo', 1])
@testing.crashes('oracle', 'FIXME: unknown, varify not fails_on()')
@testing.crashes('firebird', 'An identifier must begin with a letter')
def test_column_accessor_shadow(self):
meta = MetaData(testing.db)
shadowed = Table(
'test_shadowed', meta,
Column('shadow_id', INT, primary_key=True),
Column('shadow_name', VARCHAR(20)),
Column('parent', VARCHAR(20)),
Column('row', VARCHAR(40)),
Column('_parent', VARCHAR(20)),
Column('_row', VARCHAR(20)),
)
shadowed.create(checkfirst=True)
try:
shadowed.insert().execute(
shadow_id=1, shadow_name='The Shadow', parent='The Light',
row='Without light there is no shadow',
_parent='Hidden parent', _row='Hidden row')
r = shadowed.select(shadowed.c.shadow_id == 1).execute().first()
self.assert_(
r.shadow_id == r['shadow_id'] == r[shadowed.c.shadow_id] == 1)
self.assert_(
r.shadow_name == r['shadow_name'] ==
r[shadowed.c.shadow_name] == 'The Shadow')
self.assert_(
r.parent == r['parent'] == r[shadowed.c.parent] == 'The Light')
self.assert_(
r.row == r['row'] == r[shadowed.c.row] ==
'Without light there is no shadow')
self.assert_(r['_parent'] == 'Hidden parent')
self.assert_(r['_row'] == 'Hidden row')
finally:
shadowed.drop(checkfirst=True)
@testing.emits_warning('.*empty sequence.*')
def test_in_filtering(self):
"""test the behavior of the in_() function."""
users.insert().execute(user_id=7, user_name='jack')
users.insert().execute(user_id=8, user_name='fred')
users.insert().execute(user_id=9, user_name=None)
s = users.select(users.c.user_name.in_([]))
r = s.execute().fetchall()
# No username is in empty set
assert len(r) == 0
s = users.select(not_(users.c.user_name.in_([])))
r = s.execute().fetchall()
# All usernames with a value are outside an empty set
assert len(r) == 2
s = users.select(users.c.user_name.in_(['jack', 'fred']))
r = s.execute().fetchall()
assert len(r) == 2
s = users.select(not_(users.c.user_name.in_(['jack', 'fred'])))
r = s.execute().fetchall()
# Null values are not outside any set
assert len(r) == 0
@testing.fails_if(
lambda: util.py3k and testing.against('mysql+mysqlconnector'),
"bug in mysqlconnector")
@testing.emits_warning('.*empty sequence.*')
@testing.fails_on('firebird', "uses sql-92 rules")
@testing.fails_on('sybase', "uses sql-92 rules")
@testing.fails_if(
lambda: testing.against('mssql+pyodbc') and not
testing.db.dialect.freetds, "uses sql-92 rules")
def test_bind_in(self):
"""test calling IN against a bind parameter.
this isn't allowed on several platforms since we
generate ? = ?.
"""
users.insert().execute(user_id=7, user_name='jack')
users.insert().execute(user_id=8, user_name='fred')
users.insert().execute(user_id=9, user_name=None)
u = bindparam('search_key')
s = users.select(not_(u.in_([])))
r = s.execute(search_key='john').fetchall()
assert len(r) == 3
r = s.execute(search_key=None).fetchall()
assert len(r) == 0
@testing.fails_if(
lambda: util.py3k and testing.against('mysql+mysqlconnector'),
"bug in mysqlconnector")
@testing.emits_warning('.*empty sequence.*')
def test_literal_in(self):
"""similar to test_bind_in but use a bind with a value."""
users.insert().execute(user_id=7, user_name='jack')
users.insert().execute(user_id=8, user_name='fred')
users.insert().execute(user_id=9, user_name=None)
s = users.select(not_(literal("john").in_([])))
r = s.execute().fetchall()
assert len(r) == 3
@testing.emits_warning('.*empty sequence.*')
@testing.requires.boolean_col_expressions
def test_in_filtering_advanced(self):
"""test the behavior of the in_() function when
comparing against an empty collection, specifically
that a proper boolean value is generated.
"""
users.insert().execute(user_id=7, user_name='jack')
users.insert().execute(user_id=8, user_name='fred')
users.insert().execute(user_id=9, user_name=None)
s = users.select(users.c.user_name.in_([]) == True) # noqa
r = s.execute().fetchall()
assert len(r) == 0
s = users.select(users.c.user_name.in_([]) == False) # noqa
r = s.execute().fetchall()
assert len(r) == 2
s = users.select(users.c.user_name.in_([]) == None) # noqa
r = s.execute().fetchall()
assert len(r) == 1
class RequiredBindTest(fixtures.TablesTest):
run_create_tables = None
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table(
'foo', metadata,
Column('id', Integer, primary_key=True),
Column('data', String(50)),
Column('x', Integer)
)
def _assert_raises(self, stmt, params):
assert_raises_message(
exc.StatementError,
"A value is required for bind parameter 'x'",
testing.db.execute, stmt, **params)
assert_raises_message(
exc.StatementError,
"A value is required for bind parameter 'x'",
testing.db.execute, stmt, params)
def test_insert(self):
stmt = self.tables.foo.insert().values(
x=bindparam('x'), data=bindparam('data'))
self._assert_raises(stmt, {'data': 'data'})
def test_select_where(self):
stmt = select([self.tables.foo]). \
where(self.tables.foo.c.data == bindparam('data')). \
where(self.tables.foo.c.x == bindparam('x'))
self._assert_raises(stmt, {'data': 'data'})
@testing.requires.standalone_binds
def test_select_columns(self):
stmt = select([bindparam('data'), bindparam('x')])
self._assert_raises(
stmt, {'data': 'data'}
)
def test_text(self):
stmt = text("select * from foo where x=:x and data=:data1")
self._assert_raises(
stmt, {'data1': 'data'}
)
def test_required_flag(self):
is_(bindparam('foo').required, True)
is_(bindparam('foo', required=False).required, False)
is_(bindparam('foo', 'bar').required, False)
is_(bindparam('foo', 'bar', required=True).required, True)
c = lambda: None
is_(bindparam('foo', callable_=c, required=True).required, True)
is_(bindparam('foo', callable_=c).required, False)
is_(bindparam('foo', callable_=c, required=False).required, False)
class TableInsertTest(fixtures.TablesTest):
"""test for consistent insert behavior across dialects
regarding the inline=True flag, lower-case 't' tables.
"""
run_create_tables = 'each'
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
'foo', metadata,
Column('id', Integer, Sequence('t_id_seq'), primary_key=True),
Column('data', String(50)),
Column('x', Integer)
)
def _fixture(self, types=True):
if types:
t = sql.table(
'foo', sql.column('id', Integer),
sql.column('data', String),
sql.column('x', Integer))
else:
t = sql.table(
'foo', sql.column('id'), sql.column('data'), sql.column('x'))
return t
def _test(self, stmt, row, returning=None, inserted_primary_key=False):
r = testing.db.execute(stmt)
if returning:
returned = r.first()
eq_(returned, returning)
elif inserted_primary_key is not False:
eq_(r.inserted_primary_key, inserted_primary_key)
eq_(testing.db.execute(self.tables.foo.select()).first(), row)
def _test_multi(self, stmt, rows, data):
testing.db.execute(stmt, rows)
eq_(
testing.db.execute(
self.tables.foo.select().
order_by(self.tables.foo.c.id)).fetchall(),
data)
@testing.requires.sequences
def test_expicit_sequence(self):
t = self._fixture()
self._test(
t.insert().values(
id=func.next_value(Sequence('t_id_seq')), data='data', x=5),
(1, 'data', 5)
)
def test_uppercase(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[1]
)
def test_uppercase_inline(self):
t = self.tables.foo
self._test(
t.insert(inline=True).values(id=1, data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[1]
)
@testing.crashes(
"mssql+pyodbc",
"Pyodbc + SQL Server + Py3K, some decimal handling issue")
def test_uppercase_inline_implicit(self):
t = self.tables.foo
self._test(
t.insert(inline=True).values(data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[None]
)
def test_uppercase_implicit(self):
t = self.tables.foo
self._test(
t.insert().values(data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[1]
)
def test_uppercase_direct_params(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[1]
)
@testing.requires.returning
def test_uppercase_direct_params_returning(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data='data', x=5).returning(t.c.id, t.c.x),
(1, 'data', 5),
returning=(1, 5)
)
@testing.fails_on(
'mssql', "lowercase table doesn't support identity insert disable")
def test_direct_params(self):
t = self._fixture()
self._test(
t.insert().values(id=1, data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[]
)
@testing.fails_on(
'mssql', "lowercase table doesn't support identity insert disable")
@testing.requires.returning
def test_direct_params_returning(self):
t = self._fixture()
self._test(
t.insert().values(id=1, data='data', x=5).returning(t.c.id, t.c.x),
(1, 'data', 5),
returning=(1, 5)
)
@testing.requires.emulated_lastrowid
def test_implicit_pk(self):
t = self._fixture()
self._test(
t.insert().values(data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[]
)
@testing.requires.emulated_lastrowid
def test_implicit_pk_multi_rows(self):
t = self._fixture()
self._test_multi(
t.insert(),
[
{'data': 'd1', 'x': 5},
{'data': 'd2', 'x': 6},
{'data': 'd3', 'x': 7},
],
[
(1, 'd1', 5),
(2, 'd2', 6),
(3, 'd3', 7)
],
)
@testing.requires.emulated_lastrowid
def test_implicit_pk_inline(self):
t = self._fixture()
self._test(
t.insert(inline=True).values(data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[]
)
class KeyTargetingTest(fixtures.TablesTest):
run_inserts = 'once'
run_deletes = None
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
'keyed1', metadata, Column("a", CHAR(2), key="b"),
Column("c", CHAR(2), key="q")
)
Table('keyed2', metadata, Column("a", CHAR(2)), Column("b", CHAR(2)))
Table('keyed3', metadata, Column("a", CHAR(2)), Column("d", CHAR(2)))
Table('keyed4', metadata, Column("b", CHAR(2)), Column("q", CHAR(2)))
Table('content', metadata, Column('t', String(30), key="type"))
Table('bar', metadata, Column('ctype', String(30), key="content_type"))
if testing.requires.schemas.enabled:
Table(
'wschema', metadata,
Column("a", CHAR(2), key="b"),
Column("c", CHAR(2), key="q"),
schema=testing.config.test_schema
)
@classmethod
def insert_data(cls):
cls.tables.keyed1.insert().execute(dict(b="a1", q="c1"))
cls.tables.keyed2.insert().execute(dict(a="a2", b="b2"))
cls.tables.keyed3.insert().execute(dict(a="a3", d="d3"))
cls.tables.keyed4.insert().execute(dict(b="b4", q="q4"))
cls.tables.content.insert().execute(type="t1")
if testing.requires.schemas.enabled:
cls.tables['%s.wschema' % testing.config.test_schema].insert().execute(
dict(b="a1", q="c1"))
@testing.requires.schemas
def test_keyed_accessor_wschema(self):
keyed1 = self.tables['%s.wschema' % testing.config.test_schema]
row = testing.db.execute(keyed1.select()).first()
eq_(row.b, "a1")
eq_(row.q, "c1")
eq_(row.a, "a1")
eq_(row.c, "c1")
def test_keyed_accessor_single(self):
keyed1 = self.tables.keyed1
row = testing.db.execute(keyed1.select()).first()
eq_(row.b, "a1")
eq_(row.q, "c1")
eq_(row.a, "a1")
eq_(row.c, "c1")
def test_keyed_accessor_single_labeled(self):
keyed1 = self.tables.keyed1
row = testing.db.execute(keyed1.select().apply_labels()).first()
eq_(row.keyed1_b, "a1")
eq_(row.keyed1_q, "c1")
eq_(row.keyed1_a, "a1")
eq_(row.keyed1_c, "c1")
@testing.requires.duplicate_names_in_cursor_description
def test_keyed_accessor_composite_conflict_2(self):
keyed1 = self.tables.keyed1
keyed2 = self.tables.keyed2
row = testing.db.execute(select([keyed1, keyed2])).first()
# row.b is unambiguous
eq_(row.b, "b2")
# row.a is ambiguous
assert_raises_message(
exc.InvalidRequestError,
"Ambig",
getattr, row, "a"
)
def test_keyed_accessor_composite_names_precedent(self):
keyed1 = self.tables.keyed1
keyed4 = self.tables.keyed4
row = testing.db.execute(select([keyed1, keyed4])).first()
eq_(row.b, "b4")
eq_(row.q, "q4")
eq_(row.a, "a1")
eq_(row.c, "c1")
@testing.requires.duplicate_names_in_cursor_description
def test_keyed_accessor_composite_keys_precedent(self):
keyed1 = self.tables.keyed1
keyed3 = self.tables.keyed3
row = testing.db.execute(select([keyed1, keyed3])).first()
eq_(row.q, "c1")
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name 'b'",
getattr, row, "b"
)
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name 'a'",
getattr, row, "a"
)
eq_(row.d, "d3")
def test_keyed_accessor_composite_labeled(self):
keyed1 = self.tables.keyed1
keyed2 = self.tables.keyed2
row = testing.db.execute(select([keyed1, keyed2]).apply_labels()). \
first()
eq_(row.keyed1_b, "a1")
eq_(row.keyed1_a, "a1")
eq_(row.keyed1_q, "c1")
eq_(row.keyed1_c, "c1")
eq_(row.keyed2_a, "a2")
eq_(row.keyed2_b, "b2")
assert_raises(KeyError, lambda: row['keyed2_c'])
assert_raises(KeyError, lambda: row['keyed2_q'])
def test_column_label_overlap_fallback(self):
content, bar = self.tables.content, self.tables.bar
row = testing.db.execute(
select([content.c.type.label("content_type")])).first()
assert content.c.type not in row
assert bar.c.content_type not in row
assert sql.column('content_type') in row
row = testing.db.execute(select([func.now().label("content_type")])). \
first()
assert content.c.type not in row
assert bar.c.content_type not in row
assert sql.column('content_type') in row
def test_column_label_overlap_fallback_2(self):
content, bar = self.tables.content, self.tables.bar
row = testing.db.execute(content.select(use_labels=True)).first()
assert content.c.type in row
assert bar.c.content_type not in row
assert sql.column('content_type') not in row
def test_columnclause_schema_column_one(self):
keyed2 = self.tables.keyed2
# this is addressed by [ticket:2932]
# ColumnClause._compare_name_for_result allows the
# columns which the statement is against to be lightweight
# cols, which results in a more liberal comparison scheme
a, b = sql.column('a'), sql.column('b')
stmt = select([a, b]).select_from(table("keyed2"))
row = testing.db.execute(stmt).first()
assert keyed2.c.a in row
assert keyed2.c.b in row
assert a in row
assert b in row
def test_columnclause_schema_column_two(self):
keyed2 = self.tables.keyed2
a, b = sql.column('a'), sql.column('b')
stmt = select([keyed2.c.a, keyed2.c.b])
row = testing.db.execute(stmt).first()
assert keyed2.c.a in row
assert keyed2.c.b in row
assert a in row
assert b in row
def test_columnclause_schema_column_three(self):
keyed2 = self.tables.keyed2
# this is also addressed by [ticket:2932]
a, b = sql.column('a'), sql.column('b')
stmt = text("select a, b from keyed2").columns(a=CHAR, b=CHAR)
row = testing.db.execute(stmt).first()
assert keyed2.c.a in row
assert keyed2.c.b in row
assert a in row
assert b in row
assert stmt.c.a in row
assert stmt.c.b in row
def test_columnclause_schema_column_four(self):
keyed2 = self.tables.keyed2
# this is also addressed by [ticket:2932]
a, b = sql.column('keyed2_a'), sql.column('keyed2_b')
stmt = text("select a AS keyed2_a, b AS keyed2_b from keyed2").columns(
a, b)
row = testing.db.execute(stmt).first()
assert keyed2.c.a in row
assert keyed2.c.b in row
assert a in row
assert b in row
assert stmt.c.keyed2_a in row
assert stmt.c.keyed2_b in row
def test_columnclause_schema_column_five(self):
keyed2 = self.tables.keyed2
# this is also addressed by [ticket:2932]
stmt = text("select a AS keyed2_a, b AS keyed2_b from keyed2").columns(
keyed2_a=CHAR, keyed2_b=CHAR)
row = testing.db.execute(stmt).first()
assert keyed2.c.a in row
assert keyed2.c.b in row
assert stmt.c.keyed2_a in row
assert stmt.c.keyed2_b in row
class LimitTest(fixtures.TestBase):
__backend__ = True
@classmethod
def setup_class(cls):
global users, addresses, metadata
metadata = MetaData(testing.db)
users = Table(
'query_users', metadata,
Column('user_id', INT, primary_key=True),
Column('user_name', VARCHAR(20)),
)
addresses = Table(
'query_addresses', metadata,
Column('address_id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey('query_users.user_id')),
Column('address', String(30)))
metadata.create_all()
users.insert().execute(user_id=1, user_name='john')
addresses.insert().execute(address_id=1, user_id=1, address='addr1')
users.insert().execute(user_id=2, user_name='jack')
addresses.insert().execute(address_id=2, user_id=2, address='addr1')
users.insert().execute(user_id=3, user_name='ed')
addresses.insert().execute(address_id=3, user_id=3, address='addr2')
users.insert().execute(user_id=4, user_name='wendy')
addresses.insert().execute(address_id=4, user_id=4, address='addr3')
users.insert().execute(user_id=5, user_name='laura')
addresses.insert().execute(address_id=5, user_id=5, address='addr4')
users.insert().execute(user_id=6, user_name='ralph')
addresses.insert().execute(address_id=6, user_id=6, address='addr5')
users.insert().execute(user_id=7, user_name='fido')
addresses.insert().execute(address_id=7, user_id=7, address='addr5')
@classmethod
def teardown_class(cls):
metadata.drop_all()
def test_select_limit(self):
r = users.select(limit=3, order_by=[users.c.user_id]).execute(). \
fetchall()
self.assert_(r == [(1, 'john'), (2, 'jack'), (3, 'ed')], repr(r))
@testing.requires.offset
def test_select_limit_offset(self):
"""Test the interaction between limit and offset"""
r = users.select(limit=3, offset=2, order_by=[users.c.user_id]). \
execute().fetchall()
self.assert_(r == [(3, 'ed'), (4, 'wendy'), (5, 'laura')])
r = users.select(offset=5, order_by=[users.c.user_id]).execute(). \
fetchall()
self.assert_(r == [(6, 'ralph'), (7, 'fido')])
def test_select_distinct_limit(self):
"""Test the interaction between limit and distinct"""
r = sorted(
[x[0] for x in select([addresses.c.address]).distinct().
limit(3).order_by(addresses.c.address).execute().fetchall()])
self.assert_(len(r) == 3, repr(r))
self.assert_(r[0] != r[1] and r[1] != r[2], repr(r))
@testing.requires.offset
@testing.fails_on('mssql', 'FIXME: unknown')
def test_select_distinct_offset(self):
"""Test the interaction between distinct and offset"""
r = sorted(
[x[0] for x in select([addresses.c.address]).distinct().
offset(1).order_by(addresses.c.address).
execute().fetchall()])
eq_(len(r), 4)
self.assert_(r[0] != r[1] and r[1] != r[2] and r[2] != [3], repr(r))
@testing.requires.offset
def test_select_distinct_limit_offset(self):
"""Test the interaction between limit and limit/offset"""
r = select([addresses.c.address]).order_by(addresses.c.address). \
distinct().offset(2).limit(3).execute().fetchall()
self.assert_(len(r) == 3, repr(r))
self.assert_(r[0] != r[1] and r[1] != r[2], repr(r))
class CompoundTest(fixtures.TestBase):
"""test compound statements like UNION, INTERSECT, particularly their
ability to nest on different databases."""
__backend__ = True
@classmethod
def setup_class(cls):
global metadata, t1, t2, t3
metadata = MetaData(testing.db)
t1 = Table(
't1', metadata,
Column(
'col1', Integer, test_needs_autoincrement=True,
primary_key=True),
Column('col2', String(30)),
Column('col3', String(40)),
Column('col4', String(30)))
t2 = Table(
't2', metadata,
Column(
'col1', Integer, test_needs_autoincrement=True,
primary_key=True),
Column('col2', String(30)),
Column('col3', String(40)),
Column('col4', String(30)))
t3 = Table(
't3', metadata,
Column(
'col1', Integer, test_needs_autoincrement=True,
primary_key=True),
Column('col2', String(30)),
Column('col3', String(40)),
Column('col4', String(30)))
metadata.create_all()
t1.insert().execute([
dict(col2="t1col2r1", col3="aaa", col4="aaa"),
dict(col2="t1col2r2", col3="bbb", col4="bbb"),
dict(col2="t1col2r3", col3="ccc", col4="ccc"),
])
t2.insert().execute([
dict(col2="t2col2r1", col3="aaa", col4="bbb"),
dict(col2="t2col2r2", col3="bbb", col4="ccc"),
dict(col2="t2col2r3", col3="ccc", col4="aaa"),
])
t3.insert().execute([
dict(col2="t3col2r1", col3="aaa", col4="ccc"),
dict(col2="t3col2r2", col3="bbb", col4="aaa"),
dict(col2="t3col2r3", col3="ccc", col4="bbb"),
])
@engines.close_first
def teardown(self):
pass
@classmethod
def teardown_class(cls):
metadata.drop_all()
def _fetchall_sorted(self, executed):
return sorted([tuple(row) for row in executed.fetchall()])
@testing.requires.subqueries
def test_union(self):
(s1, s2) = (
select([t1.c.col3.label('col3'), t1.c.col4.label('col4')],
t1.c.col2.in_(["t1col2r1", "t1col2r2"])),
select([t2.c.col3.label('col3'), t2.c.col4.label('col4')],
t2.c.col2.in_(["t2col2r2", "t2col2r3"]))
)
u = union(s1, s2)
wanted = [('aaa', 'aaa'), ('bbb', 'bbb'), ('bbb', 'ccc'),
('ccc', 'aaa')]
found1 = self._fetchall_sorted(u.execute())
eq_(found1, wanted)
found2 = self._fetchall_sorted(u.alias('bar').select().execute())
eq_(found2, wanted)
@testing.fails_on('firebird', "doesn't like ORDER BY with UNIONs")
def test_union_ordered(self):
(s1, s2) = (
select([t1.c.col3.label('col3'), t1.c.col4.label('col4')],
t1.c.col2.in_(["t1col2r1", "t1col2r2"])),
select([t2.c.col3.label('col3'), t2.c.col4.label('col4')],
t2.c.col2.in_(["t2col2r2", "t2col2r3"]))
)
u = union(s1, s2, order_by=['col3', 'col4'])
wanted = [('aaa', 'aaa'), ('bbb', 'bbb'), ('bbb', 'ccc'),
('ccc', 'aaa')]
eq_(u.execute().fetchall(), wanted)
@testing.fails_on('firebird', "doesn't like ORDER BY with UNIONs")
@testing.requires.subqueries
def test_union_ordered_alias(self):
(s1, s2) = (
select([t1.c.col3.label('col3'), t1.c.col4.label('col4')],
t1.c.col2.in_(["t1col2r1", "t1col2r2"])),
select([t2.c.col3.label('col3'), t2.c.col4.label('col4')],
t2.c.col2.in_(["t2col2r2", "t2col2r3"]))
)
u = union(s1, s2, order_by=['col3', 'col4'])
wanted = [('aaa', 'aaa'), ('bbb', 'bbb'), ('bbb', 'ccc'),
('ccc', 'aaa')]
eq_(u.alias('bar').select().execute().fetchall(), wanted)
@testing.crashes('oracle', 'FIXME: unknown, verify not fails_on')
@testing.fails_on(
'firebird',
"has trouble extracting anonymous column from union subquery")
@testing.fails_on('mysql', 'FIXME: unknown')
@testing.fails_on('sqlite', 'FIXME: unknown')
def test_union_all(self):
e = union_all(
select([t1.c.col3]),
union(
select([t1.c.col3]),
select([t1.c.col3]),
)
)
wanted = [('aaa',), ('aaa',), ('bbb',), ('bbb',), ('ccc',), ('ccc',)]
found1 = self._fetchall_sorted(e.execute())
eq_(found1, wanted)
found2 = self._fetchall_sorted(e.alias('foo').select().execute())
eq_(found2, wanted)
def test_union_all_lightweight(self):
"""like test_union_all, but breaks the sub-union into
a subquery with an explicit column reference on the outside,
more palatable to a wider variety of engines.
"""
u = union(
select([t1.c.col3]),
select([t1.c.col3]),
).alias()
e = union_all(
select([t1.c.col3]),
select([u.c.col3])
)
wanted = [('aaa',), ('aaa',), ('bbb',), ('bbb',), ('ccc',), ('ccc',)]
found1 = self._fetchall_sorted(e.execute())
eq_(found1, wanted)
found2 = self._fetchall_sorted(e.alias('foo').select().execute())
eq_(found2, wanted)
@testing.requires.intersect
def test_intersect(self):
i = intersect(
select([t2.c.col3, t2.c.col4]),
select([t2.c.col3, t2.c.col4], t2.c.col4 == t3.c.col3)
)
wanted = [('aaa', 'bbb'), ('bbb', 'ccc'), ('ccc', 'aaa')]
found1 = self._fetchall_sorted(i.execute())
eq_(found1, wanted)
found2 = self._fetchall_sorted(i.alias('bar').select().execute())
eq_(found2, wanted)
@testing.requires.except_
@testing.fails_on('sqlite', "Can't handle this style of nesting")
def test_except_style1(self):
e = except_(union(
select([t1.c.col3, t1.c.col4]),
select([t2.c.col3, t2.c.col4]),
select([t3.c.col3, t3.c.col4]),
), select([t2.c.col3, t2.c.col4]))
wanted = [('aaa', 'aaa'), ('aaa', 'ccc'), ('bbb', 'aaa'),
('bbb', 'bbb'), ('ccc', 'bbb'), ('ccc', 'ccc')]
found = self._fetchall_sorted(e.alias().select().execute())
eq_(found, wanted)
@testing.requires.except_
def test_except_style2(self):
# same as style1, but add alias().select() to the except_().
# sqlite can handle it now.
e = except_(union(
select([t1.c.col3, t1.c.col4]),
select([t2.c.col3, t2.c.col4]),
select([t3.c.col3, t3.c.col4]),
).alias().select(), select([t2.c.col3, t2.c.col4]))
wanted = [('aaa', 'aaa'), ('aaa', 'ccc'), ('bbb', 'aaa'),
('bbb', 'bbb'), ('ccc', 'bbb'), ('ccc', 'ccc')]
found1 = self._fetchall_sorted(e.execute())
eq_(found1, wanted)
found2 = self._fetchall_sorted(e.alias().select().execute())
eq_(found2, wanted)
@testing.fails_on('sqlite', "Can't handle this style of nesting")
@testing.requires.except_
def test_except_style3(self):
# aaa, bbb, ccc - (aaa, bbb, ccc - (ccc)) = ccc
e = except_(
select([t1.c.col3]), # aaa, bbb, ccc
except_(
select([t2.c.col3]), # aaa, bbb, ccc
select([t3.c.col3], t3.c.col3 == 'ccc'), # ccc
)
)
eq_(e.execute().fetchall(), [('ccc',)])
eq_(e.alias('foo').select().execute().fetchall(), [('ccc',)])
@testing.requires.except_
def test_except_style4(self):
# aaa, bbb, ccc - (aaa, bbb, ccc - (ccc)) = ccc
e = except_(
select([t1.c.col3]), # aaa, bbb, ccc
except_(
select([t2.c.col3]), # aaa, bbb, ccc
select([t3.c.col3], t3.c.col3 == 'ccc'), # ccc
).alias().select()
)
eq_(e.execute().fetchall(), [('ccc',)])
eq_(
e.alias().select().execute().fetchall(),
[('ccc',)]
)
@testing.requires.intersect
@testing.fails_on('sqlite', "sqlite can't handle leading parenthesis")
def test_intersect_unions(self):
u = intersect(
union(
select([t1.c.col3, t1.c.col4]),
select([t3.c.col3, t3.c.col4]),
),
union(
select([t2.c.col3, t2.c.col4]),
select([t3.c.col3, t3.c.col4]),
).alias().select()
)
wanted = [('aaa', 'ccc'), ('bbb', 'aaa'), ('ccc', 'bbb')]
found = self._fetchall_sorted(u.execute())
eq_(found, wanted)
@testing.requires.intersect
def test_intersect_unions_2(self):
u = intersect(
union(
select([t1.c.col3, t1.c.col4]),
select([t3.c.col3, t3.c.col4]),
).alias().select(),
union(
select([t2.c.col3, t2.c.col4]),
select([t3.c.col3, t3.c.col4]),
).alias().select()
)
wanted = [('aaa', 'ccc'), ('bbb', 'aaa'), ('ccc', 'bbb')]
found = self._fetchall_sorted(u.execute())
eq_(found, wanted)
@testing.requires.intersect
def test_intersect_unions_3(self):
u = intersect(
select([t2.c.col3, t2.c.col4]),
union(
select([t1.c.col3, t1.c.col4]),
select([t2.c.col3, t2.c.col4]),
select([t3.c.col3, t3.c.col4]),
).alias().select()
)
wanted = [('aaa', 'bbb'), ('bbb', 'ccc'), ('ccc', 'aaa')]
found = self._fetchall_sorted(u.execute())
eq_(found, wanted)
@testing.requires.intersect
def test_composite_alias(self):
ua = intersect(
select([t2.c.col3, t2.c.col4]),
union(
select([t1.c.col3, t1.c.col4]),
select([t2.c.col3, t2.c.col4]),
select([t3.c.col3, t3.c.col4]),
).alias().select()
).alias()
wanted = [('aaa', 'bbb'), ('bbb', 'ccc'), ('ccc', 'aaa')]
found = self._fetchall_sorted(ua.select().execute())
eq_(found, wanted)
t1 = t2 = t3 = None
class JoinTest(fixtures.TestBase):
"""Tests join execution.
The compiled SQL emitted by the dialect might be ANSI joins or
theta joins ('old oracle style', with (+) for OUTER). This test
tries to exercise join syntax and uncover any inconsistencies in
`JOIN rhs ON lhs.col=rhs.col` vs `rhs.col=lhs.col`. At least one
database seems to be sensitive to this.
"""
__backend__ = True
@classmethod
def setup_class(cls):
global metadata
global t1, t2, t3
metadata = MetaData(testing.db)
t1 = Table('t1', metadata,
Column('t1_id', Integer, primary_key=True),
Column('name', String(32)))
t2 = Table('t2', metadata,
Column('t2_id', Integer, primary_key=True),
Column('t1_id', Integer, ForeignKey('t1.t1_id')),
Column('name', String(32)))
t3 = Table('t3', metadata,
Column('t3_id', Integer, primary_key=True),
Column('t2_id', Integer, ForeignKey('t2.t2_id')),
Column('name', String(32)))
metadata.drop_all()
metadata.create_all()
# t1.10 -> t2.20 -> t3.30
# t1.11 -> t2.21
# t1.12
t1.insert().execute({'t1_id': 10, 'name': 't1 #10'},
{'t1_id': 11, 'name': 't1 #11'},
{'t1_id': 12, 'name': 't1 #12'})
t2.insert().execute({'t2_id': 20, 't1_id': 10, 'name': 't2 #20'},
{'t2_id': 21, 't1_id': 11, 'name': 't2 #21'})
t3.insert().execute({'t3_id': 30, 't2_id': 20, 'name': 't3 #30'})
@classmethod
def teardown_class(cls):
metadata.drop_all()
def assertRows(self, statement, expected):
"""Execute a statement and assert that rows returned equal expected."""
found = sorted([tuple(row)
for row in statement.execute().fetchall()])
eq_(found, sorted(expected))
def test_join_x1(self):
"""Joins t1->t2."""
for criteria in (t1.c.t1_id == t2.c.t1_id, t2.c.t1_id == t1.c.t1_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id],
from_obj=[t1.join(t2, criteria)])
self.assertRows(expr, [(10, 20), (11, 21)])
def test_join_x2(self):
"""Joins t1->t2->t3."""
for criteria in (t1.c.t1_id == t2.c.t1_id, t2.c.t1_id == t1.c.t1_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id],
from_obj=[t1.join(t2, criteria)])
self.assertRows(expr, [(10, 20), (11, 21)])
def test_outerjoin_x1(self):
"""Outer joins t1->t2."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id],
from_obj=[t1.join(t2).join(t3, criteria)])
self.assertRows(expr, [(10, 20)])
def test_outerjoin_x2(self):
"""Outer joins t1->t2,t3."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
from_obj=[t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria)])
self.assertRows(
expr, [(10, 20, 30), (11, 21, None), (12, None, None)])
def test_outerjoin_where_x2_t1(self):
"""Outer joins t1->t2,t3, where on t1."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t1.c.name == 't1 #10',
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t1.c.t1_id < 12,
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30), (11, 21, None)])
def test_outerjoin_where_x2_t2(self):
"""Outer joins t1->t2,t3, where on t2."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t2.c.name == 't2 #20',
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t2.c.t2_id < 29,
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30), (11, 21, None)])
def test_outerjoin_where_x2_t3(self):
"""Outer joins t1->t2,t3, where on t3."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t3.c.name == 't3 #30',
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t3.c.t3_id < 39,
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
def test_outerjoin_where_x2_t1t3(self):
"""Outer joins t1->t2,t3, where on t1 and t3."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t1.c.name == 't1 #10', t3.c.name == 't3 #30'),
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t1.c.t1_id < 19, t3.c.t3_id < 39),
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
def test_outerjoin_where_x2_t1t2(self):
"""Outer joins t1->t2,t3, where on t1 and t2."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t1.c.name == 't1 #10', t2.c.name == 't2 #20'),
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t1.c.t1_id < 12, t2.c.t2_id < 39),
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30), (11, 21, None)])
def test_outerjoin_where_x2_t1t2t3(self):
"""Outer joins t1->t2,t3, where on t1, t2 and t3."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t1.c.name == 't1 #10',
t2.c.name == 't2 #20',
t3.c.name == 't3 #30'),
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t1.c.t1_id < 19, t2.c.t2_id < 29, t3.c.t3_id < 39),
from_obj=[
(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
def test_mixed(self):
"""Joins t1->t2, outer t2->t3."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
from_obj=[(t1.join(t2).outerjoin(t3, criteria))])
print(expr)
self.assertRows(expr, [(10, 20, 30), (11, 21, None)])
def test_mixed_where(self):
"""Joins t1->t2, outer t2->t3, plus a where on each table in turn."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t1.c.name == 't1 #10',
from_obj=[(t1.join(t2).outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t2.c.name == 't2 #20',
from_obj=[(t1.join(t2).outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t3.c.name == 't3 #30',
from_obj=[(t1.join(t2).outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t1.c.name == 't1 #10', t2.c.name == 't2 #20'),
from_obj=[(t1.join(t2).outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t2.c.name == 't2 #20', t3.c.name == 't3 #30'),
from_obj=[(t1.join(t2).outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t1.c.name == 't1 #10',
t2.c.name == 't2 #20',
t3.c.name == 't3 #30'),
from_obj=[(t1.join(t2).outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
metadata = flds = None
class OperatorTest(fixtures.TestBase):
__backend__ = True
@classmethod
def setup_class(cls):
global metadata, flds
metadata = MetaData(testing.db)
flds = Table(
'flds', metadata,
Column(
'idcol', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('intcol', Integer),
Column('strcol', String(50)),
)
metadata.create_all()
flds.insert().execute([
dict(intcol=5, strcol='foo'),
dict(intcol=13, strcol='bar')
])
@classmethod
def teardown_class(cls):
metadata.drop_all()
# TODO: seems like more tests warranted for this setup.
@testing.fails_if(
lambda: util.py3k and testing.against('mysql+mysqlconnector'),
"bug in mysqlconnector")
def test_modulo(self):
eq_(
select([flds.c.intcol % 3],
order_by=flds.c.idcol).execute().fetchall(),
[(2,), (1,)]
)
@testing.requires.window_functions
def test_over(self):
eq_(
select([
flds.c.intcol, func.row_number().over(order_by=flds.c.strcol)
]).execute().fetchall(),
[(13, 1), (5, 2)]
)
| StarcoderdataPython |
4981099 | import pandas as pd
import numpy as np
import time
import seaborn as sb
import requests
import matplotlib.pyplot as plt
from pandas_datareader import data as web
from sklearn.linear_model import Lasso
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import RandomizedSearchCV as rcv
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
import matplotlib.pyplot as plt
from IPython import get_ipython
import matplotlib.pyplot as plt
from typing import Optional, Dict, Any, List
from ciso8601 import parse_datetime
from requests import Request, Session, Response
import sys
import hmac
import urllib.parse
import datetime
import sqlite3
import csv
class Algo():
def __init__(self, df):
self.self = self
self.df = df
def ranger(self, df) -> pd.DataFrame:
""" Basic Indicator values inserted into df, mostly for the range algo"""
self["ma"] = self['close'].rolling(9).mean()
self['ratio'] = self['close'] / self['ma']
percentiles = [5, 10, 50, 90, 95]
p = np.percentile(self['ratio'].dropna(), percentiles)
short = p[-1]
long = p[0]
self['position'] = np.where(self.ratio >= short, -1, np.nan)
self['position'] = np.where(self.ratio < long, 1, self['position'])
self['position'] = self['position'].ffill()
self['returnsR'] = np.log(self["close"]).diff()
self['strat_returnR'] = self['returnsR'] * self['position'].shift()
return self
def plot_R(self):
plt.rcParams['figure.figsize'] = [16.0, 6.0]
""" Plots price percenitles"""
sb.set()
y = self.iloc[-500:]['time']
percentiles = [5, 10, 50, 90, 95]
p = np.percentile(self['ratio'].dropna(), percentiles)
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.plot(self.iloc[-500:]['ratio'].dropna())
plt.axhline(p[0], c=(.5, .5, .5), ls='--')
plt.axhline(p[2], c=(.5, .5, .5), ls='--')
plt.axhline(p[-1], c=(.5, .5, .5), ls='--')
plt.savefig('../assets/rangePercentiles.png')
def plot_positionR(self):
plt.rcParams['figure.figsize'] = [16.0, 6.0]
fig = plt.figure(facecolor=(1, 1, 1))
y=self.iloc[-500:]['time']
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.plot(self.iloc[-500:]['position'].dropna())
plt.savefig('../assets/rangeStatus.png')
def market_returnsR(self):
plt.rcParams['figure.figsize'] = [16.0, 6.0]
fig = plt.figure(facecolor=(1, 1, 1))
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.plot(
np.exp(self.iloc[-500:]['market_returns'].dropna()).cumprod(), label='Buy/Hold')
plt.plot(
np.exp(self.iloc[-500:]['range_returns'].dropna()).cumprod(), label='Strategy')
plt.xticks(rotation=90)
plt.legend()
plt.savefig('../assets/rangeRets.png')
def nineM(self):
plt.rcParams['figure.figsize'] = [16.0, 6.0]
fig = plt.figure(facecolor=(1, 1, 1))
y=self.iloc[-500:]['time']
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.plot(self.iloc[-500:]['close'], label = 'BTC')
plt.plot(self.iloc[-500:]['9-min'], label = '9-min')
plt.legend(loc=2);
plt.savefig('../assets/btc1m9ma.png')
def range_gainz(self):
print("Market Returns: ", np.exp(
self.market_returns).cumprod().iloc[-1])
print("Range Strategy Returns: ", np.exp(
self.range_returns).cumprod().iloc[-1])
def trendy(self, df) -> pd.DataFrame:
""" Trend following algo"""
short_ma = 9
long_ma = 21
self['9-min'] = self['close'].rolling(short_ma).mean().shift()
self['21-min'] = self['close'].rolling(long_ma).mean().shift()
self['signal'] = np.where(self['9-min'] > self['21-min'], 1, np.nan)
self['signal'] = np.where(
self['9-min'] < self['21-min'], -1, self['signal'])
self.dropna(inplace=True)
self['returnsT'] = np.log(self['close']).diff()
self['strat_returnT'] = self['signal'] * self['returnsT'].shift()
self['entry'] = self.signal.diff()
return self
def plot_positionT(self):
plt.rcParams['figure.figsize'] = [16.0, 6.0]
"""Plots long short flips on line chart for trend algo 'T' """
plt.rcParams['figure.figsize'] = 30, 10
plt.grid(True, alpha=.3)
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
y = self.iloc[-500:]['time']
plt.plot(self.iloc[-500:]['close'], label='BTC')
plt.plot(self.iloc[-500:]['9-min'], label='9-min')
plt.plot(self.iloc[-500:]['21-min'], label='21-min')
plt.plot(self[-500:].loc[self.entry == 2].index, self[-500:]['9-min'][self.entry == 2], "^",
color="g", markersize=12, label="Long")
plt.plot(self[-500:].loc[self.entry == -2].index, self[-500:]['21-min'][self.entry == -2], "v",
color="r", markersize=12, label="Short")
plt.legend(loc=2)
plt.savefig('../assets/trendPositions1.png')
def plot_gainzT(self):
plt.rcParams['figure.figsize'] = [16.0, 6.0]
fig = plt.figure(facecolor=(1, 1, 1))
y=self.iloc[-500:]['time']
self['trend_returns'] = self.signal * self.market_returns
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.plot(np.exp(self.iloc[-500:]['market_returns']).cumprod(),label = "Buy/Hold")
plt.plot(np.exp(self.iloc[-500:]['trend_returns']).cumprod(),label = "Strat")
plt.legend()
plt.savefig('../assets/trendRets.png')
def trend_gainz(self):
print("Market Returns: ", np.exp(
self.market_returns).cumprod().iloc[-1])
print("Trend Strategy Returns: ", np.exp(
self.trend_returns).cumprod().iloc[-1])
def dualPlot(self):
fig = plt.figure(facecolor=(1, 1, 1))
y=self.iloc[-500:]['time']
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.plot(self.iloc[-500:]['close'], label = 'BTC')
plt.plot(self.iloc[-500:]['9-min'], label = '9-min')
plt.plot(self[-500:].loc[self.entryR == 2].index, self[-500:]['9-min'][self.entryR == 2], "^",
color = "r", markersize = 12, label= "Short")
plt.plot(self[-500:].loc[self.entryR == -2].index, self[-500:]['9-min'][self.entryR == -2], "v",
color = "g", markersize = 12, label="Long")
plt.legend(loc=2);
plt.savefig('../assets/dualPlot.png')
def Z_scoreR(self, df) -> pd.DataFrame:
'''Args, needs col 'strat_return with +/- returns'''
# column for negative and positive
df.dropna().head(100)
df['posneg'] = np.where(df['strat_returnR'] < 0, 'neg', 'pos')
# consecutive groups
df['series'] = df['posneg'].ne(df['posneg'].shift()).cumsum()
# removed groups with length more like 2
df = df[df['series'].map(df['series'].value_counts()).gt(2)]
# tested if order `pos-neg` of groups, if not removed groups
m1 = df['posneg'].eq('pos') & df['posneg'].shift(-1).eq('neg')
m2 = df['posneg'].eq('neg') & df['posneg'].shift().eq('pos')
groups = df.loc[m1 | m2, 'series']
df = df[df['series'].isin(groups)].copy()
df['pairs'] = (df['posneg'].ne(df['posneg'].shift())
& df['posneg'].eq('pos')).cumsum()
N = len(df['series'].dropna())
R = df['series'].dropna().nunique()
W = len(df.loc[df.strat_returnR > 0])
L = len(df.loc[df.strat_returnR < 0])
P = 2*W*L
Z_score = (N*(R-0.5)-P)/((P*(P-N))/(N-1))**(1/2)
return float(Z_score)
def Z_scoreT(self, df) -> pd.DataFrame:
'''Args, needs col 'strat_return with +/- returns'''
# column for negative and positive
self['posneg'] = np.where(self['trend_returns'] < 0, 'neg', 'pos')
# consecutive groups
self['series'] = self['posneg'].ne(self['posneg'].shift()).cumsum()
# removed groups with length more like 2
self = self[self['series'].map(self['series'].value_counts()).gt(2)]
# tested if order `pos-neg` of groups, if not removed groups
m1 = self['posneg'].eq('pos') & self['posneg'].shift(-1).eq('neg')
m2 = self['posneg'].eq('neg') & self['posneg'].shift().eq('pos')
groups = self.loc[m1 | m2, 'series']
self = self[self['series'].isin(groups)].copy()
self['pairs'] = (self['posneg'].ne(self['posneg'].shift())
& self['posneg'].eq('pos')).cumsum()
N = len(self['series'].dropna())
R = self['series'].dropna().nunique()
W = len(self.loc[self.trend_returns> 0])
L = len(self.loc[self.trend_returns < 0])
P = 2*W*L
Z_score = (N*(R-0.5)-P)/((P*(P-N))/(N-1))**(1/2)
return float(Z_score)
def marketState(d, b):
''' Func to determine market state, and shifts trading engine to that regime'''
prev_state = []
prev_state_name = []
state_selector_switch = []
d = float(d)
b = float(b)
if d < 0 and d < b or d > 0 and d > b:
prev_state.append(d)
prev_state_name.append("Trend")
state_selector_switch.append(1) # pos 1 indicates trend conditions
print(
"Trend detector algo's ZScore indicates there is a 'Trending' market state..", "..Z Score:", d)
print('Here is the trend_df with our trend indicator suite')
print(
'All trades for the next period will be placed according to the conditions within...')
return state_selector_switch
elif b < 0 and b < d or b > 0 and b > d:
prev_state.append(b)
prev_state_name.append("Range")
# neg -1 indicates range conditions
state_selector_switch.append(-1)
print(
"Range detector algo's ZScore indicates there is a 'Ranging' market state..", "..Z Score:", b)
print('Here is the range_df with our range indicator suite')
print(
'All trades for the next period will be placed according to the conditions within...')
return state_selector_switch
else:
return state_selector_switch
print(f"Market state detector algo's have exact same values, previous state algo will be used for the next period..")
print(
f" Previous State: {prev_state_name}, previous Z_score: {prev_state}, Current Z_score tie: {b}")
def stateTest(df):
""" Calls FTX REST APi, gathers data, flips rows, adds TA for algo's, and calcs Z_score's' for each algo"""
df = df.dropna()
df = df[::-1]
trend_df = Algo.trendy(df, df)
range_df = Algo.ranger(df, df)
a = Algo(range_df)
b = a.Z_scoreR(range_df)
c = Algo(trend_df)
d = c.Z_scoreT(trend_df)
return d, b
def stateSelector(df, int):
df = df.dropna()
'''func to switch between mean reversion and trend phase'''
if int == -1:
return Algo.ranger(df, df)
elif int == 1:
return Algo.trendy(df, df)
def fullstate(self, df) -> pd.DataFrame:
""" Trend following algo"""
short_ma = 9
long_ma = 21
self['9-min'] = self['close'].rolling(short_ma).mean().shift()
self['21-min'] = self['close'].rolling(long_ma).mean().shift()
self['signal'] = np.where(self['9-min'] > self['21-min'], 1, np.nan)
self['signal'] = np.where(
self['9-min'] < self['21-min'], -1, self['signal'])
self.dropna(inplace=True)
self['market_returns'] = np.log(self['close']).diff()
self['trend_returns'] = self['signal'] * self['market_returns'].shift()
self['entry'] = self.signal.diff()
self["ma"] = self['close'].rolling(9).mean().shift()
self['ratio'] = self['close'] / self['ma']
percentiles = [5, 10, 50, 90, 95]
p = np.percentile(self['ratio'].dropna(), percentiles)
short = p[-1]
long = p[0]
self['position'] = np.where(self.ratio >= short, -1, np.nan)
self['position'] = np.where(self.ratio < long, 1, self['position'])
self['position'] = self['position'].ffill()
self['entryR'] = self.position.diff()
self['range_returns'] = self['market_returns'] * \
self['position'].shift()
self['sign'] = np.where(self['trend_returns'] >
self['range_returns'], 1, np.nan)
self['sign'] = np.where(self['trend_returns'] <
self['range_returns'], -1, self['sign'])
return self
def folio(self) -> pd.DataFrame:
#column for negative and positive
self=self.dropna()
self['rangeSign'] = np.where(self['range_returns'] < 0, 'neg','pos')
self['trendSign'] = np.where(self['trend_returns'] < 0, 'neg','pos')
#consecutive groups
self['rangeSeries'] = self['rangeSign'].ne(self['rangeSign'].shift()).cumsum()
self['trendSeries'] = self['trendSign'].ne(self['trendSign'].shift()).cumsum()
#removed groups with length more like 2
df = self[self['rangeSeries'].map(self['rangeSeries'].value_counts()).gt(2)]
df = self[self['trendSeries'].map(self['trendSeries'].value_counts()).gt(2)]
#tested if order `pos-neg` of groups, if not removed groups
m1 = df['rangeSign'].eq('pos') & df['rangeSign'].shift(-1).eq('neg')
m2 = df['rangeSign'].eq('neg') & df['rangeSign'].shift().eq('pos')
m3 = df['trendSign'].eq('pos') & df['trendSign'].shift(-1).eq('neg')
m4 = df['trendSign'].eq('neg') & df['trendSign'].shift().eq('pos')
groupsR = df.loc[m1 | m2, 'rangeSeries']
df = df[df['rangeSeries'].isin(groupsR)].copy()
df['rangePairs'] = (df['rangeSign'].ne(df['rangeSign'].shift()) & df['rangeSign'].eq('pos')).cumsum()
groupsT = df.loc[m3 | m4, 'trendSeries']
df = df[df['trendSeries'].isin(groupsT)].copy()
df['trendPairs'] = (df['trendSign'].ne(df['trendSign'].shift()) & df['trendSign'].eq('pos')).cumsum()
rangeTradeCounts = df['rangeSeries'].nunique()
trendTradeCounts = df['trendSeries'].nunique()
totalTrades = rangeTradeCounts + trendTradeCounts
df['just_date'] = df['time'].dt.date
df['just_date']
# Set initial capital
initial_capital = float(100000.0)
# Create df positions
positions = pd.DataFrame(index=df.time.index).fillna(0.0)
# Buy 2 BTC
positions['BTCPERP'] = 1*df['signal']
# Initilize portfolio w value owned
portfolio = positions.multiply(df['close'], axis=0)
# Store diff in shares owned
pos_diff = positions.diff()
# Add 'holdings' to portfolio
portfolio['holdings'] = (positions.multiply(df['close'], axis=0)).sum(axis=1)
# Add 'cash' to portfolio
portfolio['cash'] = initial_capital - (pos_diff.multiply(df['close'], axis=0)).sum(axis=1).cumsum()
# Add 'total' to portfolio
portfolio['total'] = portfolio['cash'] + portfolio['holdings']
# Add 'returns' to portfolio
portfolio['returns'] = portfolio['total'].pct_change()
portfolio['time'] = df['time']
p = portfolio[-1:]
p.drop(columns=['time'], inplace=True)
p = p.reset_index(drop=True)
p.to_json('../templates/portfolio2.json', orient='records')
fig = plt.figure(facecolor=(1, 1, 1))
x = portfolio.iloc[-200:]['time']
y = portfolio.iloc[-200:]['total']
plt.xticks(fontsize=22, color="black", rotation=25)
plt.xlabel('Time', color='black',fontsize=22)
plt.yticks(fontsize=22, color='black')
plt.ylabel('Value', color='black',fontsize=22)
plt.locator_params(axis='x', nbins=8)
plt.plot(x,y)
plt.savefig('../assets/portfolioStandings.png')
plt.show()
portfolio.to_csv("../assets/portfolio.csv", index=False)
def folioDB():
conn = sqlite3.connect('folio.db')
cur = conn.cursor()
cur.execute('''CREATE TABLE IF NOT EXISTS folio (BTCPERP int, holdings int, cash int, total int, returns int, time text)''')
folioTable = pd.read_csv('../assets/portfolio.csv') # load to DataFrame
folioTable.to_sql('orders', conn, if_exists='append', index = False) # write to sqlite table
def regime(df):
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
steps = [('imputation', imp),
('scaler',StandardScaler()),
('lasso',Lasso())]
pipeline =Pipeline(steps)
parameters = {'lasso__alpha':np.arange(0.0001,10,.0001),
'lasso__max_iter':np.random.uniform(100,100000,4)}
from pandas_datareader import data as web
from sklearn import mixture as mix
import seaborn as sns
import matplotlib.pyplot as plt
reg = rcv(pipeline, parameters,cv=5)
X=df[['open','high','low','close']]
y =df['close']
avg_err={}
avg_err={}
avg_train_err = {}
for t in np.arange(50,97,3):
get_ipython().magic('reset_selective -f reg1')
split = int(t*len(X)/100)
reg.fit(X[:split],y[:split])
best_alpha = reg.best_params_['lasso__alpha']
best_iter = reg.best_params_['lasso__max_iter']
reg1 = Lasso(alpha=best_alpha,max_iter=best_iter)
X = imp.fit_transform(X,y)
reg1.fit(X[:split],y[:split])
df['P_C_%i'%t] = 0.
df.iloc[:,df.columns.get_loc('P_C_%i'%t)] = reg1.predict(X[:])
df['Error_%i'%t] = np.abs(df['P_C_%i'%t]-df['close'])
e = np.mean(df['Error_%i'%t][split:])
train_e = np.mean(df['Error_%i'%t][:split])
avg_err[t] = e
avg_train_err[t] = train_e
plt.rcParams['figure.figsize'] = [4.0, 4.0]
fig = plt.figure(facecolor=(1, 1, 1))
Range =df['high'][split:]-df['low'][split:]
plt.scatter(list(avg_train_err.keys()),list(avg_train_err.values()),label='train_error')
plt.legend(loc='best')
avgRange = np.average(Range)
plt.title(f'Avg Range = %1.2f'%avgRange)
plt.savefig('../assets/lasso-error.png')
plt.rcParams['figure.figsize'] = [4.0, 4.0]
fig = plt.figure(facecolor=(1, 1, 1))
Range =df['high'][split:]-df['low'][split:]
# ------------------------------------------------------------------------ added code below.
plt.scatter(list(avg_err.keys()),list(avg_err.values()), label='test_error')
# ---------------------------------------------------------------------------
plt.scatter(list(avg_train_err.keys()),list(avg_train_err.values()),label='train_error')
plt.legend(loc='best')
avR = np.average(Range)
plt.title(f'Avg Range = %1.2f'%avR)
plt.savefig('../assets/train-test-error.png')
df=df[['open','high','low','close']]
df['open']=df['open'].shift(1)
df['high']=df['high'].shift(1)
df['low']=df['low'].shift(1)
df['close']=df['close'].shift(1)
df=df[['open','high','low','close']]
df=df.dropna()
unsup = mix.GaussianMixture(n_components=3,
covariance_type="spherical",
n_init=100,
random_state=42)
unsup.fit(np.reshape(df,(-1,df.shape[1])))
regime = unsup.predict(np.reshape(df,(-1,df.shape[1])))
df['Return']= np.log(df['close']/df['close'].shift(1))
Regimes=pd.DataFrame(regime,columns=['Regime'],index=df.index)\
.join(df, how='inner')\
.assign(market_cu_return=df.Return.cumsum())\
.reset_index(drop=False)\
.rename(columns={'index':'Date'})
plt.rcParams['figure.figsize'] = [16.0, 6.0]
order=[0,1,2]
fig = sns.FacetGrid(data=Regimes,hue='Regime',hue_order=order,aspect=2,height= 4)
fig.map(plt.scatter,'Date','market_cu_return', s=4).add_legend(labelcolor='white')
plt.tick_params(colors='white', grid_color='black')
plt.rcParams['text.color']='w'
plt.grid()
plt.savefig('../assets/lasso.png', bbox_inches='tight')
plt.show()
for i in order:
print('Mean for regime %i: '%i,unsup.means_[i][0])
print('Co-Variancefor regime %i: '%i,(unsup.covariances_[i]))
| StarcoderdataPython |
1965299 | import tensorflow as tf
def close_crop(image, patch_size):
image.set_shape([None, None, 3])
width = 178
height = 218
new_width = 140
new_height = 140
left = (width - new_width) // 2
top = (height - new_height) // 2
right = (width + new_width) // 2
bottom = (height + new_height) // 2
image = tf.expand_dims(image, axis=0)
crops = tf.image.crop_to_bounding_box(image, top, left, bottom - top, right - left)
resize = tf.image.resize_images(crops, [patch_size, patch_size])
output = tf.squeeze(resize, axis=0)
output.set_shape([patch_size, patch_size, 3])
output = tf.to_float(output) / 255.
return output
def image_file_inputs(file_patters, batch_size=32, patch_size=32):
dataset = (tf.data.Dataset.list_files(file_patters)
.map(tf.read_file)
.map(tf.image.decode_image)
.map(lambda x: close_crop(x, patch_size))
.batch(batch_size))
data_iterator = dataset.make_one_shot_iterator()
images = data_iterator.get_next()
return images, images
| StarcoderdataPython |
1912759 | <reponame>nvllsvm/file_to_bitmap
import setuptools
setuptools.setup(
name='bmp-transcode',
version='0.3.0',
description='Transcode ordinary files to and from bitmap images.',
long_description=open('README.rst').read(),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/nvllsvm/bmp-transcode',
license='Apache 2.0',
packages=['bmp_transcode'],
install_requires=['pillow'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3 :: Only'
],
entry_points={
'console_scripts': ['bmp-transcode=bmp_transcode:main']
}
)
| StarcoderdataPython |
11391330 | <gh_stars>0
## @package onnx
# Module caffe2.python.onnx.backend
"""Backend for running ONNX on Caffe2
To run this, you will need to have Caffe2 installed as well.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import collections
from subprocess import Popen, PIPE
import zipfile
import itertools
# When onnx is built against a version of protobuf that is older than
# that which is vendored with caffe2, onnx will crash if caffe2's
# vendored protobuf is loaded first. We can work around this by
# importing onnx first, which will cause it to go out and pick up the
# system protobuf.
import onnx.backend
import caffe2
from caffe2.python import core, workspace, rnn_cell, gru_cell
from caffe2.python.compatibility import container_abcs
from caffe2.python.model_helper import ModelHelper
from caffe2.proto import caffe2_pb2
import caffe2.python.utils
import numpy as np
import onnx
from onnx import checker, GraphProto, TensorProto, AttributeProto, ModelProto
import onnx.numpy_helper
import onnx.defs
import onnx.optimizer
import onnx.shape_inference
import onnx.utils
from onnx.backend.base import Backend, Device, DeviceType, namedtupledict
from caffe2.python.onnx.workspace import Workspace
from caffe2.python.onnx.backend_rep import Caffe2Rep
from caffe2.python.onnx.backend_cpp_rep import Caffe2CppRep
import caffe2.python._import_c_extension as C
import warnings
def force_unicode(s):
try:
return s.decode('utf-8')
except AttributeError:
return s
def get_device_option(device):
m = {DeviceType.CPU: caffe2_pb2.CPU,
DeviceType.CUDA: workspace.GpuDeviceType}
return core.DeviceOption(m[device.type], device.device_id)
class OnnxAttributes(dict):
"""
This is a more convenient way to work with ONNX/Caffe2 attributes
that is not the protobuf representation.
"""
@staticmethod
def from_onnx(args):
d = OnnxAttributes()
for arg in args:
d[arg.name] = convertAttributeProto(arg)
return d
def caffe2(self, kmap=lambda k: k):
for k, v in self.items():
if kmap(k) != '':
yield caffe2.python.utils.MakeArgument(kmap(k), v)
# TODO: Move this into ONNX main library
def convertAttributeProto(onnx_arg):
"""
Convert an ONNX AttributeProto into an appropriate Python object
for the type.
NB: Tensor attribute gets returned as the straight proto.
"""
if onnx_arg.HasField('f'):
return onnx_arg.f
elif onnx_arg.HasField('i'):
return onnx_arg.i
elif onnx_arg.HasField('s'):
return onnx_arg.s
elif onnx_arg.HasField('t'):
return onnx_arg.t # this is a proto!
elif onnx_arg.HasField('g'):
return Caffe2Backend._graph_to_net(onnx_arg.g, Caffe2Backend._known_opset_version)
elif len(onnx_arg.floats):
return list(onnx_arg.floats)
elif len(onnx_arg.ints):
return list(onnx_arg.ints)
elif len(onnx_arg.strings):
return list(onnx_arg.strings)
elif len(onnx_arg.graphs):
retval = []
# TODO: this doesn't work with RNN ops
for g in onnx_arg.graphs:
retval.append(Caffe2Backend._graph_to_net(g, Caffe2Backend._known_opset_version))
return retval
else:
raise ValueError("Unsupported ONNX attribute: {}".format(onnx_arg))
# TODO: Move this into ONNX main library
class OnnxNode(object):
"""
Reimplementation of NodeProto from ONNX, but in a form
more convenient to work with from Python.
We may temporarily edit these nodes to get them into Caffe2 form,
before actually translating into the Caffe2 protobuf, since this
is easier than decomposing everything, and putting it back together
when we're ready.
"""
def __init__(self, node):
self.name = str(node.name)
self.op_type = str(node.op_type)
self.attrs = OnnxAttributes.from_onnx(node.attribute)
self.inputs = list(node.input)
self.outputs = list(node.output)
Caffe2Ops = collections.namedtuple('Caffe2Ops', ['ops', 'init_ops', 'interface_blobs'])
class Caffe2Backend(Backend):
# The greatest version of the ONNX operator set which we are aware of.
# Models whose version is larger than this will cause us to emit a warning
# that we are attempting to translate on a "best effort" basis.
#
# If you increase this, make SURE you cross-reference all BC-breaking
# changes from one version to the next, and any that you did not
# implement, mark as broken in _broken_operators
_known_opset_version = 9
# This dictionary will record operators which are KNOWN to be
# broken, so we give a good error message rather than do something
# bogus and then fail.
_broken_operators = {
# 'BrokenOp': version_it_was_broken_in
}
# Operators that are different between Caffe2 and
# ONNX but only in their name.
# In most cases, this should be empty - as the effort of ONNX is
# to unify the operator definitions.
_renamed_operators = {
'GlobalMaxPool': 'MaxPool',
'GlobalAveragePool': 'AveragePool',
'Pad': 'PadImage',
'Neg': 'Negative',
'BatchNormalization': 'SpatialBN',
'InstanceNormalization': 'InstanceNorm',
'MatMul': 'BatchMatMul',
'Upsample': 'ResizeNearest',
'Identity': 'Copy',
'InstanceNormalization': 'InstanceNorm',
'Equal': 'EQ',
'Less': 'LT',
'Greater': 'GT',
'Unsqueeze': 'ExpandDims',
'Loop': 'ONNXWhile',
'Tile': 'NumpyTile',
'RandomNormal': 'GaussianFill',
'RandomUniform': 'UniformFill',
}
_global_renamed_attrs = {'kernel_shape': 'kernels'}
_per_op_renamed_attrs = {
'Squeeze': {'axes': 'dims'},
'Unsqueeze': {'axes': 'dims'},
'Transpose': {'perm': 'axes'},
'Upsample': {'mode': '',
'scales': ''},
'ConvTranspose': {'output_padding': 'adjs'},
'Selu': {'gamma': 'scale'},
'If': {'then_branch': 'then_net',
'else_branch': 'else_net'},
'RandomUniform': {'low': 'min',
'high': 'max'}
}
# operators whose behavior is different beyond renaming
# the value is an attribute of this class that is a
# function from ToffeIR node_def to caffe2 op_def
_special_operators = {
'LSTM': '_create_rnn_variant',
'GRU': '_create_rnn_variant',
'RNN': '_create_rnn_variant',
'Loop': '_create_loop',
'If': '_create_if',
'Upsample': '_create_upsample',
'RandomNormal': '_create_gaussian_fill'
}
# Dummy name generator
_dummy_name = C.DummyName()
@classmethod
def dummy_name(cls):
return cls._dummy_name.new_dummy_name()
# NB: By default, you will use the LATEST definition of the operator,
# so this interface MAY make BC-breaking changes. Specify an
# opset_version if you don't want this to version.
@classmethod
def run_node(cls, node, inputs, device='CPU', opset_version=_known_opset_version, outputs_info=None):
super(Caffe2Backend, cls).run_node(node, inputs, device=device,
outputs_info=outputs_info, opset_version=opset_version)
value_infos = []
device_option = get_device_option(Device(device))
ws = Workspace()
with core.DeviceScope(device_option): # temporary!
if isinstance(inputs, dict):
for key, value in inputs.items():
ws.FeedBlob(key, value)
value_infos.append(onnx.helper.make_tensor_value_info(
name=key,
elem_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[value.dtype],
shape=value.shape).SerializeToString())
else:
assert len(node.input) == len(inputs), "{}: expected {} but got {}".format(
node.op_type, len(node.input), len(inputs))
for key, value in zip(node.input, inputs):
ws.FeedBlob(key, value)
value_infos.append(onnx.helper.make_tensor_value_info(
name=key,
elem_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[value.dtype],
shape=value.shape).SerializeToString())
ops = []
cbackend = C.Caffe2Backend(cls._dummy_name)
ops_str = cbackend.convert_node(node.SerializeToString(), value_infos, opset_version)
for s in ops_str[0] + ops_str[1]:
op = caffe2_pb2.OperatorDef()
op.ParseFromString(s)
op.device_option.CopyFrom(device_option)
ops.append(op)
ws.RunOperatorsOnce(ops)
output_values = [ws.FetchBlob(name) for name in node.output]
return namedtupledict('Outputs', node.output)(*output_values)
@classmethod
def _create_tensor_filling_op(cls, onnx_tensor, name=None):
"""
Given an Onnx TensorProto, translate it into a Caffe2 operator
which produces the given tensor filling op.
"""
assert name or onnx_tensor.name
name = name or onnx_tensor.name
c2_op = caffe2_pb2.OperatorDef()
c2_values = c2_op.arg.add()
c2_values.name = "values"
def tensor2list(onnx_tensor):
# Use the onnx.numpy_helper because the data may be raw
return onnx.numpy_helper.to_array(onnx_tensor).flatten().tolist()
if onnx_tensor.data_type in [TensorProto.FLOAT]:
c2_op.type = 'GivenTensorFill'
c2_values.floats.extend(tensor2list(onnx_tensor))
elif onnx_tensor.data_type in [TensorProto.DOUBLE]:
c2_op.type = 'GivenTensorDoubleFill'
c2_values.floats.extend(tensor2list(onnx_tensor))
elif onnx_tensor.data_type in [TensorProto.INT64,
TensorProto.UINT32]:
c2_op.type = 'GivenTensorInt64Fill'
c2_values.ints.extend(tensor2list(onnx_tensor))
elif onnx_tensor.data_type in [TensorProto.UINT8,
TensorProto.INT8,
TensorProto.UINT16,
TensorProto.INT16,
TensorProto.INT32]:
c2_op.type = 'GivenTensorIntFill'
c2_values.ints.extend(tensor2list(onnx_tensor))
elif onnx_tensor.data_type == TensorProto.BOOL:
c2_op.type = 'GivenTensorBoolFill'
c2_values.ints.extend(tensor2list(onnx_tensor))
elif onnx_tensor.data_type == TensorProto.STRING:
c2_op.type = 'GivenTensorStringFill'
c2_values.strings.extend(onnx_tensor.string_data)
else:
raise RuntimeError(
"unrecognized tensor type {}".format(onnx_tensor.data_type))
c2_shape = c2_op.arg.add()
c2_shape.name = "shape"
c2_shape.ints.extend(onnx_tensor.dims)
c2_op.output.append(name)
return c2_op
@classmethod
def _rnn_reform_weights(cls, reforms, name, hidden_size, init_net, gates, reorder_indices):
for name_from, name_to, do_concat, extra_dims in reforms:
gate_blobs = ['%s/%s_%s' % (name, prefix, name_to) for prefix in gates]
for i, x in enumerate(gate_blobs):
dim0 = i * hidden_size, (i+1) * hidden_size
starts, ends = zip(dim0, *extra_dims)
init_net.Slice(name_from, x, starts=starts, ends=ends)
if do_concat:
reordered_gate_blobs = [gate_blobs[i] for i in reorder_indices]
init_net.Concat(reordered_gate_blobs, ['%s/%s' % (name, name_to), cls.dummy_name()], axis=0)
@classmethod
def _make_rnn_direction(cls, input_blob, B, W, R, initial_states_and_names, sequence_lens,
pred_mh, init_net,
input_size, hidden_size, num_gates, direction_offset,
Bi, Br, W_, R_,
reform, make_cell, keep_outputs):
name = cls.dummy_name()
# input and recurrence biases are squashed together in onnx
# but not in caffe2
gates_hidden_size = num_gates * hidden_size
bias_offset = 2 * direction_offset * gates_hidden_size
weight_offset = direction_offset * gates_hidden_size
Bi = init_net.Slice(B, name + Bi,
starts=[bias_offset + 0 * gates_hidden_size],
ends =[bias_offset + 1 * gates_hidden_size])
Br = init_net.Slice(B, name + Br,
starts=[bias_offset + 1 * gates_hidden_size],
ends =[bias_offset + 2 * gates_hidden_size])
W_ = init_net.Slice(W, name + W_,
starts=[weight_offset + 0 * gates_hidden_size, 0],
ends =[weight_offset + 1 * gates_hidden_size,-1])
R_ = init_net.Slice(R, name + R_,
starts=[weight_offset + 0 * gates_hidden_size, 0],
ends =[weight_offset + 1 * gates_hidden_size,-1])
initial_states_sliced = []
for initial_state, name_suffix in initial_states_and_names:
initial_states_sliced.append(
pred_mh.net.Slice(initial_state, name + name_suffix,
starts=[direction_offset + 0, 0, 0],
ends =[direction_offset + 1,-1,-1]))
if direction_offset == 1:
if sequence_lens is not None:
seq_lens_for_reverse = sequence_lens
else:
input_shape = pred_mh.net.Shape(input_blob, name + '/input_shape')
batch_size = pred_mh.net.Slice(input_shape, name + '/batch_size_slice', starts=[1], ends=[2])
seq_len = pred_mh.net.Slice(input_shape, name + '/seq_len_slice', starts=[0], ends=[1])
dummy_sequence_lens = pred_mh.net.Tile([seq_len, batch_size], name + '/dummy_sequence_lens', axis=0)
pred_mh.net.Reshape(dummy_sequence_lens, [dummy_sequence_lens, cls.dummy_name()], shape=[-1])
seq_lens_for_reverse = pred_mh.net.Cast(dummy_sequence_lens, name + '/seq_lens_for_reverse', to=core.DataType.INT32)
reform(Bi, Br, W_, R_, name, hidden_size, init_net)
if direction_offset == 1:
input = pred_mh.net.ReversePackedSegs(
[input_blob, seq_lens_for_reverse], name + "/input-reversed")
else:
input = input_blob
outputs = keep_outputs(list(make_cell(
pred_mh,
input,
sequence_lens,
initial_states_sliced,
input_size,
hidden_size,
name,
drop_states=False,
forward_only=True,
)))
if direction_offset == 1:
outputs[0] = pred_mh.net.ReversePackedSegs(
[outputs[0], seq_lens_for_reverse], name + "/output-reversed")
return outputs
@classmethod
def _create_rnn_variant(cls, init_model, pred_model, n, opset_version):
assert init_model is not None, "cannot convert RNNs without access to the full model"
assert pred_model is not None, "cannot convert RNNs without access to the full model"
attrs = dict(n.attrs) # make a copy, which is safe to mutate
hidden_size = attrs.pop('hidden_size')
direction = force_unicode(attrs.pop('direction', 'forward'))
if n.op_type == 'RNN':
activation = force_unicode(attrs.pop('activations', ('tanh',))[0].lower())
elif n.op_type == 'GRU':
linear_before_reset = attrs.pop('linear_before_reset', 0)
assert not attrs, "unsupported RNN attributes: " + str(attrs.keys())
assert direction in ['forward', 'bidirectional'], "unsupported backwards RNN/GRU/LSTM"
if n.op_type in ['RNN', 'GRU']:
input_blob, W, R, B, sequence_lens, initial_h = n.inputs
elif n.op_type == 'LSTM':
input_blob, W, R, B, sequence_lens, initial_h, initial_c = n.inputs
if sequence_lens == "":
sequence_lens = None
for x in itertools.chain(init_model.graph.input,
init_model.graph.value_info,
pred_model.graph.input,
pred_model.graph.value_info):
if x.name == W:
input_size = x.type.tensor_type.shape.dim[2].dim_value
break
else:
raise RuntimeError("best-effort shape inference for RNN/GRU/LSTM failed")
pred_mh = ModelHelper()
init_net = core.Net("init-net")
init_net.Reshape(W, [W, cls.dummy_name()], shape=[1,-1,0])
init_net.Squeeze(W, W, dims=[0])
init_net.Reshape(R, [R, cls.dummy_name()], shape=[1,-1,0])
init_net.Squeeze(R, R, dims=[0])
init_net.Reshape(B, [B, cls.dummy_name()], shape=[1,-1])
init_net.Squeeze(B, B, dims=[0])
if n.op_type == 'RNN':
def reform(*args):
pass
def make_cell(*args, **kwargs):
return rnn_cell.BasicRNN(*args, activation=activation, **kwargs)
def make_rnn(direction_offset):
return cls._make_rnn_direction(
input_blob, B, W, R, [(initial_h, '/initial_h')], sequence_lens,
pred_mh, init_net, input_size, hidden_size, 1, direction_offset,
"/i2h_b", "/gates_t_b", "/i2h_w", "/gates_t_w",
reform, make_cell, lambda x: x)
elif n.op_type == 'GRU':
def reform(Bi, Br, W_, R_, name, hidden_size, init_net):
# caffe2 has a different order from onnx. We need to rearrange
# z r h -> r z h
reforms = ((W_, 'i2h_w', True, [(0,-1)]),
(R_, 'gate_t_w', False, [(0,-1)]),
(Bi, 'i2h_b', True, []),
(Br, 'gate_t_b', False, []))
cls._rnn_reform_weights(reforms, name, hidden_size, init_net,
['update', 'reset', 'output'], [1, 0, 2])
def make_cell(*args, **kwargs):
return gru_cell.GRU(*args, linear_before_reset=linear_before_reset, **kwargs)
def make_rnn(direction_offset):
return cls._make_rnn_direction(
input_blob, B, W, R, [(initial_h, '/initial_h')], sequence_lens,
pred_mh, init_net, input_size, hidden_size, 3, direction_offset,
"_bias_i2h", "_bias_gates", "/i2h_w_pre", "/gates_t_w_pre",
reform, make_cell, lambda x: x)
elif n.op_type == 'LSTM':
def reform(Bi, Br, W_, R_, name, hidden_size, init_net):
# caffe2 has a different order from onnx. We need to rearrange
# i o f c -> i f o c
reforms = ((W_, 'i2h_w', True, [(0, -1)]),
(R_, 'gates_t_w', True, [(0, -1)]),
(Bi, 'i2h_b' , True, []),
(Br, 'gates_t_b', True, []))
cls._rnn_reform_weights(reforms, name, hidden_size, init_net,
['input', 'output', 'forget', 'cell'], [0, 2, 1, 3])
def make_cell(*args, **kwargs):
return rnn_cell.LSTM(*args, **kwargs)
def make_rnn(direction_offset):
return cls._make_rnn_direction(
input_blob, B, W, R, [(initial_h, '/initial_h'), (initial_c, '/initial_c')], sequence_lens,
pred_mh, init_net, input_size, hidden_size, 4, direction_offset,
"/i2h_b", "/gates_t_b", "/i2h_w", "/gates_t_w",
reform, make_cell, lambda x: [x[0], x[1], x[3]])
if direction == 'forward':
outputs = make_rnn(0)
# in the forward case, storage is shared between the
# last outputs. We need to decouple them so that the
# VariableLengthSequencePadding only mutates
# n.outputs[0]
for i in range(1, len(outputs)):
pred_mh.net.Copy(outputs[i], n.outputs[i])
if sequence_lens is not None:
pred_mh.net.VariableLengthSequencePadding(
[outputs[0], sequence_lens], [outputs[0]])
pred_mh.net.ExpandDims([outputs[0]], [n.outputs[0]], dims=[1])
elif direction == 'bidirectional':
outputs_f = make_rnn(0)
outputs_b = make_rnn(1)
concatted_output, _ = pred_mh.net.Concat(
[outputs_f[0], outputs_b[0]], [cls.dummy_name(), cls.dummy_name()], axis=2)
if sequence_lens is not None:
pred_mh.net.VariableLengthSequencePadding(
[concatted_output, sequence_lens], [concatted_output])
reshaped_output, _ = pred_mh.net.Reshape(concatted_output, [cls.dummy_name(), cls.dummy_name()], shape=[0,0,-1,2])
pred_mh.net.Transpose(reshaped_output, n.outputs[0], axes=[0,2,1,3])
for i in range(1, len(n.outputs)):
pred_mh.net.Concat([outputs_f[i], outputs_b[i]],
[n.outputs[i], cls.dummy_name()], axis=0)
# We want to decide whether to put all of our weight-reshaping
# operators in the init net or the predict net. We can put
# them in the init net iff the inputs to those operators are
# already available, either as graph initializers, or as the
# output of other operators in the init net. The latter case
# occurs, for example, when exporting from pytorch to onnx.
# In most production use, we expect has_initializers to be
# true.
initializers = {i.name for i in init_model.graph.initializer}
outputs = {output for node in init_model.graph.node for output in node.output}
has_initializers = all(x in initializers or x in outputs for x in (W, R, B))
pred_ops = []
init_ops = []
(init_ops if has_initializers else pred_ops).extend(init_net.Proto().op)
pred_ops.extend(pred_mh.Proto().op)
return Caffe2Ops(pred_ops, init_ops, list(pred_mh.Proto().external_input))
@classmethod
def _create_control_op(cls, init_model, pred_model, n, opset_version):
control_inputs = []
if '__control_inputs' in n.attrs:
control_inputs.extend(n.attrs['__control_inputs'])
node = cls._common_onnx_node_to_caffe2_op(init_model, pred_model, n, opset_version)
node.control_input.extend(control_inputs)
return Caffe2Ops([node], [], [])
@classmethod
def _remove_ssa(cls, net, remap_dict):
for op in net.op:
for i, name in enumerate(op.output):
if name in remap_dict:
op.output[i] = remap_dict[name]
for i, out in enumerate(net.external_output):
if out in remap_dict:
net.external_output[i] = remap_dict[out]
@classmethod
def _create_if(cls, init_model, pred_model, n, opset_version):
ops = cls._create_control_op(init_model, pred_model, n, opset_version)
assert ops[0][0].type == 'If'
if_op = ops[0][0]
then_net = else_net = None
control_inputs = []
for arg in if_op.arg:
if arg.name == 'then_net':
then_net = arg.n
if arg.name == 'else_net':
else_net = arg.n
if arg.name == '__control_inputs':
control_inputs = arg.strings
assert then_net and else_net
then_net_outs = then_net.external_output
else_net_outs = else_net.external_output
op_outputs = if_op.output
assert len(then_net_outs) == len(else_net_outs)
assert len(else_net_outs) == len(op_outputs)
for arg in if_op.arg:
if arg.name == 'then_net':
arg.n.external_input.extend(control_inputs)
if arg.name == 'else_net':
arg.n.external_input.extend(control_inputs)
return ops
@classmethod
def _create_loop(cls, init_model, pred_model, n, opset_version):
ops = cls._create_control_op(init_model, pred_model, n, opset_version)
assert ops[0][0].type == 'ONNXWhile'
while_op = ops[0][0]
while_op.arg.extend([caffe2.python.utils.MakeArgument('has_trip_count', True)])
while_op.arg.extend([caffe2.python.utils.MakeArgument('has_cond', True)])
while_op.arg.extend([caffe2.python.utils.MakeArgument('disable_scopes', True)])
control_inputs = []
for arg in while_op.arg:
if arg.name == '__control_inputs':
control_inputs = arg.strings
num_loop_carried_deps = 0
for arg in while_op.arg:
if arg.name == 'body':
num_loop_carried_deps = len(arg.n.external_input) - 2
arg.n.external_input.extend(control_inputs)
while_op.arg.extend([
caffe2.python.utils.MakeArgument('num_loop_carried_deps',
num_loop_carried_deps)
])
return ops
@classmethod
def _substitute_raw_value(cls, tp, raw_values_dict):
if tp.HasField('raw_data') and tp.raw_data == bytes(b'__EXTERNAL'):
if tp.name not in raw_values_dict:
raise RuntimeError('TensorProto for value {} referenced raw data but it was not found!'.format(tp.name))
else:
tp.raw_data = raw_values_dict[tp.name]
@classmethod
def _visit_and_substitute_raw_values(cls, nodes, raw_values_dict):
for node in nodes:
for attr in node.attribute:
if attr.HasField('t'):
cls._substitute_raw_value(attr.t, raw_values_dict)
for t in attr.tensors:
cls._substitute_raw_value(t, raw_values_dict)
if attr.HasField('g'):
cls._visit_and_substitute_raw_values(attr.g.node, raw_values_dict)
for g in attr.graphs:
cls._visit_and_substitute_raw_values(g.node, raw_values_dict)
@classmethod
def _external_value_resolution_pass(cls, model, raw_values_dict):
for init in model.graph.initializer:
cls._substitute_raw_value(init, raw_values_dict)
cls._visit_and_substitute_raw_values(model.graph.node, raw_values_dict)
@classmethod
def _direct_initialize_parameters(cls, initializer, ws, device_option):
for tp in initializer:
ws.FeedBlob(tp.name, onnx.numpy_helper.to_array(tp), device_option)
@classmethod
def _direct_initialize_inputs(cls, inputs, initialized, ws, device_option):
for value_info in inputs:
if value_info.name in initialized:
continue
shape = list(d.dim_value for d in value_info.type.tensor_type.shape.dim)
ws.FeedBlob(
value_info.name,
np.ones(shape, dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[value_info.type.tensor_type.elem_type]),
device_option)
@staticmethod
def optimize_onnx(input, init=False, predict=False):
passes = ['fuse_consecutive_transposes',
'eliminate_nop_transpose',
'fuse_transpose_into_gemm',
'lift_lexical_references']
if init:
passes.append('split_init')
if predict:
passes.append('split_predict')
out = onnx.optimizer.optimize(input, passes)
return out
@classmethod
def prepare_zip_archive(cls, file, device='CPU', **kwargs):
with zipfile.ZipFile(file, mode='r') as z:
with z.open('__MODEL_PROTO', 'r') as f:
model = onnx.load(f);
blob_names = set(z.namelist()) - set('__MODEL_PROTO')
# TODO: make this more efficient
raw_values_dict = {}
for name in blob_names:
with z.open(name, 'r') as blob_file:
raw_values_dict[name] = blob_file.read()
return cls.prepare(model, device, raw_values_dict=raw_values_dict, **kwargs)
@classmethod
def prepare(cls, model, device='CPU', raw_values_dict=None, **kwargs):
'''
For Onnx Caffe2Backend, we require that init_graph don't initialize the actual input of the predict_graph,
for example, if "img" is the input blob for the predict_net, we require that in init_graph and in
initializer of the predict_graph, "img" is not initalized. We don't have a check for this, since
there is no way we can know which blob is the input of the predict_graph.
'''
if not kwargs.pop('no_check_UNSAFE', False):
super(Caffe2Backend, cls).prepare(model, device, **kwargs)
opset_version = None
for imp in model.opset_import:
if not imp.HasField("domain") or imp.domain == "":
opset_version = imp.version
if imp.version > cls._known_opset_version:
warnings.warn("This version of onnx-caffe2 targets ONNX operator set version {}, but the model we are trying to import uses version {}. We will try to import it anyway, but if the model uses operators which had BC-breaking changes in the intervening versions, import will fail.".format(cls._known_opset_version, imp.version))
else:
warnings.warn("Unrecognized operator set {}".format(imp.domain))
if opset_version is None:
if model.ir_version >= 0x00000003:
raise RuntimeError("Model with IR version >= 3 did not specify ONNX operator set version (onnx-caffe2 requires it)")
else:
opset_version = 1
model = onnx.shape_inference.infer_shapes(model)
ws = Workspace()
device_option = get_device_option(Device(device))
init_net, predict_net = cls._onnx_model_to_caffe2_net(model, device, opset_version, False)
if raw_values_dict:
cls._external_value_resolution_pass(model, raw_values_dict)
# Directly load initializer data into blobs in workspace
cls._direct_initialize_parameters(
model.graph.initializer,
ws,
device_option,
)
initialized = {init.name for init in model.graph.initializer}
cls._direct_initialize_inputs(
model.graph.input,
initialized,
ws,
device_option,
)
uninitialized = [value_info.name for value_info in model.graph.input if value_info.name not in initialized]
retval = Caffe2Rep(init_net, predict_net, ws, uninitialized)
return retval
@classmethod
# TODO: This method needs a refactor for clarity
def _onnx_node_to_caffe2_op(cls, init_model, pred_model, node_def, opset_version):
cbackend = C.Caffe2Backend(cls._dummy_name)
if cbackend.support_onnx_import(node_def.op_type):
# extract value infos from pred model (value infos of
# node's inputs that are in init model should be all
# available in pred model)
value_infos = []
for name in node_def.input:
if pred_model is not None:
for vi in itertools.chain(pred_model.graph.input,
pred_model.graph.output,
pred_model.graph.value_info):
if vi.name == name:
value_infos.append(vi.SerializeToString())
op_strs = cbackend.convert_node(node_def.SerializeToString(), value_infos, opset_version)
init_ops = []
for s in op_strs[0]:
op = caffe2_pb2.OperatorDef()
op.ParseFromString(s)
init_ops.append(op)
ops = []
for s in op_strs[1]:
op = caffe2_pb2.OperatorDef()
op.ParseFromString(s)
ops.append(op)
return Caffe2Ops(ops, init_ops, [])
if node_def.op_type in cls._special_operators:
translator = getattr(cls, cls._special_operators[node_def.op_type])
else:
translator = cls._common_onnx_node_to_caffe2_op
ops = translator(init_model, pred_model, OnnxNode(node_def), opset_version)
if isinstance(ops, Caffe2Ops):
return ops
if not isinstance(ops, container_abcs.Iterable):
ops = [ops]
return Caffe2Ops(ops, [], [])
_broadcast_operators = {
'Add',
'Sub',
}
@classmethod
def _common_onnx_node_to_caffe2_op(cls, init_model, pred_model, onnx_node, opset_version):
"""
This translator performs the basic translation of ONNX nodes into
Caffe2 operators. Besides doing a straightforward marshalling from
one format to another, it also does these extra things:
- Renames operators based on '_renamed_operators'
- Renames attributes based on '_global_renamed_attrs' and
'_per_op_renamed_attrs'
If you're writing a custom translator, consider calling this first,
and then fixing things up further.
"""
c2_op = caffe2_pb2.OperatorDef()
c2_op.input.extend(onnx_node.inputs)
c2_op.output.extend(onnx_node.outputs)
c2_op.name = onnx_node.name
onnx_op_type = onnx_node.op_type
broken_version = cls._broken_operators.get(onnx_op_type, float('Inf'))
if broken_version <= opset_version:
raise ValueError(
"Don't know how to translate op {} in ONNX operator set v{} (I only support prior to v{})".format(onnx_op_type, opset_version, broken_version))
c2_op.type = cls._renamed_operators.get(onnx_op_type, onnx_op_type)
if not core.IsOperator(c2_op.type):
raise ValueError(
"Don't know how to translate op {}".format(onnx_op_type))
def kmap(k):
if (onnx_op_type in cls._per_op_renamed_attrs and
k in cls._per_op_renamed_attrs[onnx_op_type]):
return cls._per_op_renamed_attrs[onnx_op_type][k]
if k in cls._global_renamed_attrs:
return cls._global_renamed_attrs[k]
return k
c2_op.arg.extend(onnx_node.attrs.caffe2(kmap=kmap))
if opset_version < 7:
# onnx opset 7 and newest caffe2 have adopted full onnx broadcast semantics
# so we don't need this hack anymore
if c2_op.type in cls._broadcast_operators:
already_broadcast = False
for arg in c2_op.arg:
if arg.name == 'broadcast':
already_broadcast = True
if not already_broadcast:
c2_op.arg.extend([caffe2.python.utils.MakeArgument('broadcast', 1)])
return c2_op
@staticmethod
def _all_names_in_graph(graph):
if graph is None:
return set()
names = set()
names.update(value_info.name for value_info in graph.input)
names.update(value_info.name for value_info in graph.output)
for node in graph.node:
names.update(node.input)
names.update(node.output)
return names
@classmethod
def _graph_to_net(cls, onnx_graph, opset_version):
net = caffe2_pb2.NetDef()
for node in onnx_graph.node:
try:
c2ops = cls._onnx_node_to_caffe2_op(
None, None, node, opset_version)
except Exception as e:
print('ONNX FATAL:', e)
continue
net.op.extend(c2ops.init_ops)
net.op.extend(c2ops.ops)
net.external_input.extend(c2ops.interface_blobs)
net.external_output.extend(
value_info.name for value_info in onnx_graph.output)
net.external_input.extend(
value_info.name for value_info in onnx_graph.input)
return net
@classmethod
def _onnx_model_to_caffe2_net(cls, onnx_model, device, opset_version, include_initializers):
device_option = get_device_option(Device(device))
onnx_model = onnx.utils.polish_model(onnx_model)
init_model = cls.optimize_onnx(onnx_model, init=True)
pred_model = cls.optimize_onnx(onnx_model, predict=True)
init_net = caffe2_pb2.NetDef()
pred_net = caffe2_pb2.NetDef()
init_net.name = onnx_model.graph.name + '_init'
pred_net.name = onnx_model.graph.name + '_predict'
if include_initializers:
init_net.op.extend(cls._create_tensor_filling_op(tp) for tp in onnx_model.graph.initializer)
cls._dummy_name.reset(cls._all_names_in_graph(init_model.graph) | cls._all_names_in_graph(pred_model.graph))
success = True
for net, model in ( (init_net, init_model), (pred_net, pred_model) ):
net.device_option.CopyFrom(device_option)
for node in model.graph.node:
try:
c2ops = cls._onnx_node_to_caffe2_op(
init_model, pred_model, node, opset_version)
except Exception as e:
success = False
print('ONNX FATAL:', e)
continue
init_net.op.extend(c2ops.init_ops)
net.op.extend(c2ops.ops)
net.external_input.extend(c2ops.interface_blobs)
net.external_output.extend(
value_info.name for value_info in model.graph.output)
net.external_input.extend(
value_info.name for value_info in model.graph.input)
if not success:
raise RuntimeError('ONNX conversion failed')
return init_net, pred_net
# wrapper for backwards compatability
@classmethod
def onnx_graph_to_caffe2_net(cls, model, device="CPU", opset_version=_known_opset_version):
return cls._onnx_model_to_caffe2_net(model, device=device, opset_version=opset_version, include_initializers=True)
@classmethod
def supports_device(cls, device_str):
device = Device(device_str)
if device.type == DeviceType.CPU:
return True
elif core.IsGPUDeviceType(device.type):
return workspace.has_gpu_support
return False
@classmethod
def is_compatible(cls, model, device='CPU', **kwargs):
if hasattr(super(Caffe2Backend, cls), 'is_compatible') \
and callable(super(Caffe2Backend, cls).is_compatible):
if not super(Caffe2Backend, cls).is_compatible(model, device, **kwargs):
return False
# TODO: should have an unspported list of operators, be optimistic for now
return True
prepare = Caffe2Backend.prepare
prepare_zip_archive = Caffe2Backend.prepare_zip_archive
run_node = Caffe2Backend.run_node
run_model = Caffe2Backend.run_model
supports_device = Caffe2Backend.supports_device # noqa
is_compatible = Caffe2Backend.is_compatible
| StarcoderdataPython |
1811042 | '''
Implementation of the HTCPCP protocol for raspberry pi
'''
__version__ = '1.0.0'
| StarcoderdataPython |
12852843 | #!/usr/bin/python
# Copyright (c) 2015-2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
"""
from __future__ import print_function
from collections import OrderedDict
import re
def TranslateColorTable(infile):
''' Usage: TranslateColorTable("tkcolors") '''
DofL = OrderedDict()
with open(infile) as f:
for line in f:
m = re.match(r"^(.+?)\t(.+?)\t(.+?)\t(.+?)$", line)
if m:
name = m.group(1)
red = int(m.group(2))
grn = int(m.group(3))
blu = int(m.group(4))
rgb = '{0:02X}{1:02X}{2:02X}'.format(red, grn, blu)
if rgb in DofL.keys():
DofL[rgb].append(name)
else:
DofL[rgb] = [name]
print('COLORS_DICT = OrderedDict([')
for d in DofL:
print(' (\'{0}\', {1}),'.format(d, repr(DofL[d])))
print('])')
if __name__ == "__main__":
TranslateColorTable("colors_tk.orig")
| StarcoderdataPython |
3370142 | <gh_stars>0
from django.contrib import admin
# Register your models here.
from .models import ScoreData
admin.site.register(ScoreData) | StarcoderdataPython |
9799479 |
def Main(a: int, b: int) -> int:
"""
:param a:
:param b:
:return:
"""
j = 0
return b
| StarcoderdataPython |
1995292 | <filename>manila/share/drivers/netapp/dataontap/cluster_mode/lib_multi_svm.py
# Copyright (c) 2015 <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
NetApp Data ONTAP cDOT multi-SVM storage driver library.
This library extends the abstract base library and completes the multi-SVM
functionality needed by the cDOT multi-SVM Manila driver. This library
variant creates Data ONTAP storage virtual machines (i.e. 'vservers')
as needed to provision shares.
"""
import re
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import excutils
from manila import exception
from manila.i18n import _
from manila.share.drivers.netapp.dataontap.client import client_cmode
from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base
from manila.share.drivers.netapp import utils as na_utils
from manila import utils
LOG = log.getLogger(__name__)
SUPPORTED_NETWORK_TYPES = (None, 'flat', 'vlan')
SEGMENTED_NETWORK_TYPES = ('vlan',)
DEFAULT_MTU = 1500
class NetAppCmodeMultiSVMFileStorageLibrary(
lib_base.NetAppCmodeFileStorageLibrary):
@na_utils.trace
def check_for_setup_error(self):
if self._have_cluster_creds:
if self.configuration.netapp_vserver:
msg = ('Vserver is specified in the configuration. This is '
'ignored when the driver is managing share servers.')
LOG.warning(msg)
else: # only have vserver creds, which is an error in multi_svm mode
msg = _('Cluster credentials must be specified in the '
'configuration when the driver is managing share servers.')
raise exception.InvalidInput(reason=msg)
# Ensure one or more aggregates are available.
if not self._find_matching_aggregates():
msg = _('No aggregates are available for provisioning shares. '
'Ensure that the configuration option '
'netapp_aggregate_name_search_pattern is set correctly.')
raise exception.NetAppException(msg)
(super(NetAppCmodeMultiSVMFileStorageLibrary, self).
check_for_setup_error())
@na_utils.trace
def _get_vserver(self, share_server=None, vserver_name=None):
if share_server:
backend_details = share_server.get('backend_details')
vserver = backend_details.get(
'vserver_name') if backend_details else None
if not vserver:
msg = _('Vserver name is absent in backend details. Please '
'check whether Vserver was created properly.')
raise exception.VserverNotSpecified(msg)
elif vserver_name:
vserver = vserver_name
else:
msg = _('Share server not provided')
raise exception.InvalidInput(reason=msg)
if not self._client.vserver_exists(vserver):
raise exception.VserverNotFound(vserver=vserver)
vserver_client = self._get_api_client(vserver)
return vserver, vserver_client
def _get_ems_pool_info(self):
return {
'pools': {
'vserver': None,
'aggregates': self._find_matching_aggregates(),
},
}
@na_utils.trace
def _handle_housekeeping_tasks(self):
"""Handle various cleanup activities."""
self._client.prune_deleted_nfs_export_policies()
self._client.prune_deleted_snapshots()
self._client.remove_unused_qos_policy_groups()
(super(NetAppCmodeMultiSVMFileStorageLibrary, self).
_handle_housekeeping_tasks())
@na_utils.trace
def _find_matching_aggregates(self):
"""Find all aggregates match pattern."""
aggregate_names = self._client.list_non_root_aggregates()
pattern = self.configuration.netapp_aggregate_name_search_pattern
return [aggr_name for aggr_name in aggregate_names
if re.match(pattern, aggr_name)]
@na_utils.trace
def setup_server(self, network_info, metadata=None):
"""Creates and configures new Vserver."""
vlan = network_info['segmentation_id']
ports = {}
for network_allocation in network_info['network_allocations']:
ports[network_allocation['id']] = network_allocation['ip_address']
@utils.synchronized('netapp-VLAN-%s' % vlan, external=True)
def setup_server_with_lock():
LOG.debug('Creating server %s', network_info['server_id'])
self._validate_network_type(network_info)
vserver_name = self._get_vserver_name(network_info['server_id'])
server_details = {
'vserver_name': vserver_name,
'ports': jsonutils.dumps(ports)
}
try:
self._create_vserver(vserver_name, network_info)
except Exception as e:
e.detail_data = {'server_details': server_details}
raise
return server_details
return setup_server_with_lock()
@na_utils.trace
def _validate_network_type(self, network_info):
"""Raises exception if the segmentation type is incorrect."""
if network_info['network_type'] not in SUPPORTED_NETWORK_TYPES:
msg = _('The specified network type %s is unsupported by the '
'NetApp clustered Data ONTAP driver')
raise exception.NetworkBadConfigurationException(
reason=msg % network_info['network_type'])
@na_utils.trace
def _get_vserver_name(self, server_id):
return self.configuration.netapp_vserver_name_template % server_id
@na_utils.trace
def _create_vserver(self, vserver_name, network_info):
"""Creates Vserver with given parameters if it doesn't exist."""
if self._client.vserver_exists(vserver_name):
msg = _('Vserver %s already exists.')
raise exception.NetAppException(msg % vserver_name)
# NOTE(lseki): If there's already an ipspace created for the same VLAN
# port, reuse it. It will be named after the previously created share
# server's neutron subnet id.
node_name = self._client.list_cluster_nodes()[0]
port = self._get_node_data_port(node_name)
vlan = network_info['segmentation_id']
ipspace_name = self._client.get_ipspace_name_for_vlan_port(
node_name, port, vlan) or self._create_ipspace(network_info)
LOG.debug('Vserver %s does not exist, creating.', vserver_name)
self._client.create_vserver(
vserver_name,
self.configuration.netapp_root_volume_aggregate,
self.configuration.netapp_root_volume,
self._find_matching_aggregates(),
ipspace_name)
vserver_client = self._get_api_client(vserver=vserver_name)
security_services = None
try:
self._create_vserver_lifs(vserver_name,
vserver_client,
network_info,
ipspace_name)
self._create_vserver_admin_lif(vserver_name,
vserver_client,
network_info,
ipspace_name)
self._create_vserver_routes(vserver_client,
network_info)
vserver_client.enable_nfs(
self.configuration.netapp_enabled_share_protocols)
security_services = network_info.get('security_services')
if security_services:
self._client.setup_security_services(security_services,
vserver_client,
vserver_name)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Failed to configure Vserver.")
# NOTE(dviroel): At this point, the lock was already acquired
# by the caller of _create_vserver.
self._delete_vserver(vserver_name,
security_services=security_services,
needs_lock=False)
def _get_valid_ipspace_name(self, network_id):
"""Get IPspace name according to network id."""
return 'ipspace_' + network_id.replace('-', '_')
@na_utils.trace
def _create_ipspace(self, network_info):
"""If supported, create an IPspace for a new Vserver."""
if not self._client.features.IPSPACES:
return None
if (network_info['network_allocations'][0]['network_type']
not in SEGMENTED_NETWORK_TYPES):
return client_cmode.DEFAULT_IPSPACE
# NOTE(cknight): Neutron needs cDOT IP spaces because it can provide
# overlapping IP address ranges for different subnets. That is not
# believed to be an issue for any of Manila's other network plugins.
ipspace_id = network_info.get('neutron_subnet_id')
if not ipspace_id:
return client_cmode.DEFAULT_IPSPACE
ipspace_name = self._get_valid_ipspace_name(ipspace_id)
self._client.create_ipspace(ipspace_name)
return ipspace_name
@na_utils.trace
def _create_vserver_lifs(self, vserver_name, vserver_client, network_info,
ipspace_name):
"""Create Vserver data logical interfaces (LIFs)."""
nodes = self._client.list_cluster_nodes()
node_network_info = zip(nodes, network_info['network_allocations'])
for node_name, network_allocation in node_network_info:
lif_name = self._get_lif_name(node_name, network_allocation)
self._create_lif(vserver_client, vserver_name, ipspace_name,
node_name, lif_name, network_allocation)
@na_utils.trace
def _create_vserver_admin_lif(self, vserver_name, vserver_client,
network_info, ipspace_name):
"""Create Vserver admin LIF, if defined."""
network_allocations = network_info.get('admin_network_allocations')
if not network_allocations:
LOG.info('No admin network defined for Vserver %s.',
vserver_name)
return
node_name = self._client.list_cluster_nodes()[0]
network_allocation = network_allocations[0]
lif_name = self._get_lif_name(node_name, network_allocation)
self._create_lif(vserver_client, vserver_name, ipspace_name,
node_name, lif_name, network_allocation)
@na_utils.trace
def _create_vserver_routes(self, vserver_client, network_info):
"""Create Vserver route and set gateways."""
route_gateways = []
# NOTE(gouthamr): Use the gateway from the tenant subnet/s
# for the static routes. Do not configure a route for the admin
# subnet because fast path routing will work for incoming
# connections and there are no requirements for outgoing
# connections on the admin network yet.
for net_allocation in (network_info['network_allocations']):
if net_allocation['gateway'] not in route_gateways:
vserver_client.create_route(net_allocation['gateway'])
route_gateways.append(net_allocation['gateway'])
@na_utils.trace
def _get_node_data_port(self, node):
port_names = self._client.list_node_data_ports(node)
pattern = self.configuration.netapp_port_name_search_pattern
matched_port_names = [port_name for port_name in port_names
if re.match(pattern, port_name)]
if not matched_port_names:
raise exception.NetAppException(
_('Could not find eligible network ports on node %s on which '
'to create Vserver LIFs.') % node)
return matched_port_names[0]
def _get_lif_name(self, node_name, network_allocation):
"""Get LIF name based on template from manila.conf file."""
lif_name_args = {
'node': node_name,
'net_allocation_id': network_allocation['id'],
}
return self.configuration.netapp_lif_name_template % lif_name_args
@na_utils.trace
def _create_lif(self, vserver_client, vserver_name, ipspace_name,
node_name, lif_name, network_allocation):
"""Creates LIF for Vserver."""
port = self._get_node_data_port(node_name)
ip_address = network_allocation['ip_address']
netmask = utils.cidr_to_netmask(network_allocation['cidr'])
vlan = network_allocation['segmentation_id']
network_mtu = network_allocation.get('mtu')
mtu = network_mtu or DEFAULT_MTU
if not vserver_client.network_interface_exists(
vserver_name, node_name, port, ip_address, netmask, vlan):
self._client.create_network_interface(
ip_address, netmask, vlan, node_name, port, vserver_name,
lif_name, ipspace_name, mtu)
@na_utils.trace
def get_network_allocations_number(self):
"""Get number of network interfaces to be created."""
return len(self._client.list_cluster_nodes())
@na_utils.trace
def get_admin_network_allocations_number(self, admin_network_api):
"""Get number of network allocations for creating admin LIFs."""
return 1 if admin_network_api else 0
@na_utils.trace
def teardown_server(self, server_details, security_services=None):
"""Teardown share server."""
vserver = server_details.get(
'vserver_name') if server_details else None
if not vserver:
LOG.warning("Vserver not specified for share server being "
"deleted. Deletion of share server record will "
"proceed anyway.")
return
elif not self._client.vserver_exists(vserver):
LOG.warning("Could not find Vserver for share server being "
"deleted: %s. Deletion of share server "
"record will proceed anyway.", vserver)
return
self._delete_vserver(vserver, security_services=security_services)
@na_utils.trace
def _delete_vserver(self, vserver, security_services=None,
needs_lock=True):
"""Delete a Vserver plus IPspace and security services as needed."""
ipspace_name = self._client.get_vserver_ipspace(vserver)
vserver_client = self._get_api_client(vserver=vserver)
network_interfaces = vserver_client.get_network_interfaces()
interfaces_on_vlans = []
vlans = []
for interface in network_interfaces:
if '-' in interface['home-port']:
interfaces_on_vlans.append(interface)
vlans.append(interface['home-port'])
if vlans:
vlans = '-'.join(sorted(set(vlans))) if vlans else None
vlan_id = vlans.split('-')[-1]
else:
vlan_id = None
def _delete_vserver_without_lock():
self._client.delete_vserver(vserver,
vserver_client,
security_services=security_services)
if ipspace_name and not self._client.ipspace_has_data_vservers(
ipspace_name):
self._client.delete_ipspace(ipspace_name)
self._delete_vserver_vlans(interfaces_on_vlans)
@utils.synchronized('netapp-VLAN-%s' % vlan_id, external=True)
def _delete_vserver_with_lock():
_delete_vserver_without_lock()
if needs_lock:
return _delete_vserver_with_lock()
else:
return _delete_vserver_without_lock()
@na_utils.trace
def _delete_vserver_vlans(self, network_interfaces_on_vlans):
"""Delete Vserver's VLAN configuration from ports"""
for interface in network_interfaces_on_vlans:
try:
home_port = interface['home-port']
port, vlan = home_port.split('-')
node = interface['home-node']
self._client.delete_vlan(node, port, vlan)
except exception.NetAppException:
LOG.exception("Deleting Vserver VLAN failed.")
def get_configured_ip_versions(self):
versions = [4]
options = self._client.get_net_options()
if options['ipv6-enabled']:
versions.append(6)
return versions
def manage_server(self, context, share_server, identifier, driver_options):
"""Manages a vserver by renaming it and returning backend_details."""
new_vserver_name = self._get_vserver_name(share_server['id'])
old_vserver_name = self._get_correct_vserver_old_name(identifier)
if new_vserver_name != old_vserver_name:
self._client.rename_vserver(old_vserver_name, new_vserver_name)
backend_details = {'vserver_name': new_vserver_name}
return new_vserver_name, backend_details
def unmanage_server(self, server_details, security_services=None):
pass
def get_share_server_network_info(
self, context, share_server, identifier, driver_options):
"""Returns a list of IPs for each vserver network interface."""
vserver_name = self._get_correct_vserver_old_name(identifier)
vserver, vserver_client = self._get_vserver(vserver_name=vserver_name)
interfaces = vserver_client.get_network_interfaces()
allocations = []
for lif in interfaces:
allocations.append(lif['address'])
return allocations
def _get_correct_vserver_old_name(self, identifier):
# In case vserver_name includes the template, we check and add it here
if not self._client.vserver_exists(identifier):
return self._get_vserver_name(identifier)
return identifier
| StarcoderdataPython |
1691276 | from tsp.TSPGame import TSPGame as Game
from tsp.NNetShell import NNetShell
from TSPMCTS import TSPMCTS
import numpy as np
from utils import *
args = dotdict({
'numEps': 5, # Number of complete self-play games to simulate during a new iteration.
'numMCTSSims': 20, # Number of games moves for MCTS to simulate.
'cpuct': 1,
})
if __name__ == "__main__":
game = Game(10)
nnet = NNetShell(game)
mcts = TSPMCTS(args, game, nnet)
actions = [game.start_node]
wins, losses = 0, 0
player = 1
for i in range(args.numEps):
board = game.getInitBoard()
while game.getGameEnded(board, player) == 0:
canon = game.getCanonicalForm(board, player)
ap = mcts.getActionProb(canon, temp=1)
action = np.argmax(ap)
actions.append(action)
valid_moves = game.getValidMoves(canon, player)
if valid_moves[action] == 0:
exit(1)
board, player = game.getNextState(board, player, action)
print('my', actions)
result = game.getGameEnded(board, player)
print('-----',result,'------')
actions = [game.start_node]
if game.getGameEnded(board, player) == 1:
wins += 1
else:
losses += 1
print('wins', wins)
print('losses', losses)
| StarcoderdataPython |
6541668 | <reponame>Accenture/Docknet
import json
import math
import os
import pickle
import sys
from typing import List, Optional, Union, TextIO, BinaryIO
import numpy as np
from docknet.initializer.abstract_initializer import AbstractInitializer
from docknet.layer.abstract_layer import AbstractLayer
from docknet.function.cost_function import get_cost_function
from docknet.layer.dense_layer import DenseLayer
from docknet.layer.input_layer import InputLayer
from docknet.optimizer.abstract_optimizer import AbstractOptimizer
from docknet.util.notifier import Notifier
class DocknetJSONEncoder(json.JSONEncoder):
"""
JSON encoder needed for serializing a docknet to JSON format; defines how to serialize special docknet classes such
as the Docknet itself, the layers and Numpy arrays
"""
def default(self, obj):
if isinstance(obj, Docknet):
return obj.layers
elif isinstance(obj, AbstractLayer):
return obj.to_dict()
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super().default(obj)
class Docknet(object):
"""
The Docknet class, an extensible implementation of neural networks comprising a sequence of layers, a parameter
initializer, a cost function and its derivative, and a parameter optimizer. A Docknet instance is first to be
created, then its methods add_XXX_layer invoked in the proper sequence in order to configure the network
architecture. In order to train the network, the initializer, cost function and optimizer must be first set. After
training, methods to_pickle and to_json can be used to save the network to a file. In case the docknet has
previously been saved to a file, use methods read_pickle or read_json to create the Docknet.
"""
def __init__(self, notifier=Notifier()):
"""
Initializes the docknet as an empty network (no layers)
:param notifier:
"""
self.layers: List[AbstractLayer] = []
self._cost_function_name: Optional[str] = None
self._initializer: Optional[AbstractInitializer] = None
self._optimizer: Optional[AbstractOptimizer] = None
self.notifier = notifier
def add_input_layer(self, dimension: int):
"""
Add an input layer to this DockNet after the last layer; note a Docknet is supposed to have a single input layer
as first layer
:param dimension: input vector size
"""
layer = InputLayer(dimension)
self.layers.append(layer)
def add_dense_layer(self, dimension: int, activation_function_name: str):
"""
Add a dense layer to this DockNet after the last layer
:param dimension: number of neurons of the layer to add
:param activation_function_name: name of the activation function to use in this layer
"""
layer = DenseLayer(self.layers[-1].dimension, dimension, activation_function_name)
self.layers.append(layer)
@property
def initializer(self) -> AbstractInitializer:
"""
Gets the network initializer
:return: the network initializer
"""
return self._initializer
@initializer.setter
def initializer(self, initializer: AbstractInitializer):
"""
Sets the network parameter initializer; required for training only
:param initializer: an initializer object (e.g. an instance of RandomNormalInitializer)
:return:
"""
self._initializer = initializer
@property
def cost_function(self) -> str:
"""
Gets the networks's cost function name
:return: the network's cost function name
"""
return self._cost_function_name
@cost_function.setter
def cost_function(self, cost_function_name: str):
"""
Sets the network cost function and its derivative, given a cost function name; required for training only
:param cost_function_name: the cost function name (e.g. 'cross_entropy')
"""
self._cost_function, self._cost_function_prime = get_cost_function(cost_function_name)
@property
def optimizer(self) -> AbstractOptimizer:
"""
Gets the network parameter optimizer
:return: the network parameter optimizer
"""
return self._optimizer
@optimizer.setter
def optimizer(self, optimizer: AbstractOptimizer):
"""
Sets the network's optimizer
:param optimizer: the network's optimizer (e.g. an instance of GradicentDescentOptimizer)
:return:
"""
self._optimizer = optimizer
def train_batch(self, X: np.array, Y: np.array) -> float:
"""
Train the network for a batch of data
:param X: 2-dimensional array of input vectors, one vector per column
:param Y: 2-dimensional array of expected values to predict, one single row with same amount of columns than X
:return: aggregated cost for the entire batch (without averaging)
"""
A = X
for layer in self.layers:
A = layer.cached_forward_propagate(A)
Y_circ = A
J = self._cost_function(Y_circ, Y)
dJdY_circ = self._cost_function_prime(Y_circ, Y)
dJdA = dJdY_circ
network_gradients = []
for layer in reversed(self.layers):
dJdA, layer_gradients = layer.backward_propagate(dJdA)
network_gradients.insert(0, layer_gradients)
self._optimizer.optimize(self.layers, network_gradients)
return J
def train(self, X: np.array, Y: np.array, batch_size: int, max_number_of_epochs: int, error_delta=0.,
max_epochs_within_delta=-1, stop_file_pathname: str = None, initialize=True):
"""
Train the network for a given set of input vectors and expected predictions up to reaching one of 2 stopping
conditions:
1) a maximum number of epochs is attained
2) the error difference between 2 consecutive epochs is below a threshold for a given amount of epochs
3) a file at a given location exists (create the file to manually stop the training)
:param X: 2-dimensional array of input vectors, one vector per column
:param Y: 2-dimensional array of expected values to predict, one single row with same amount of columns than X
:param batch_size: amount of input vectors to with use per training iteration
:param max_number_of_epochs: maximum number of epochs for stop condition 1
:param error_delta: error difference threshold of stop condition 2
:param max_epochs_within_delta: maximum number of epochs for stop condition 2
:param stop_file_pathname: path of file that
:param initialize: initialize parameters before starting to train
:return lists of average error per epoch and per iteration
"""
if max_number_of_epochs < 0:
max_number_of_epochs = sys.maxsize
if max_epochs_within_delta < 0:
max_epochs_within_delta = sys.maxsize
epoch = 1
epochs_within_delta = 0
batch_count = math.ceil(X.shape[1] / batch_size)
if initialize:
self.initializer.initialize(self.layers)
self.optimizer.reset(self.layers)
iteration_errors = []
epoch_errors = []
while epoch <= max_number_of_epochs and\
epochs_within_delta <= max_epochs_within_delta and\
not (stop_file_pathname and os.path.exists(stop_file_pathname)):
batch_begin = 0
epoch_error = 0.
for i in range(batch_count - 1):
batch_end = batch_begin + batch_size
X_batch = X[:, batch_begin:batch_end]
Y_batch = Y[:, batch_begin:batch_end]
iteration_error = self.train_batch(X_batch, Y_batch)
iteration_errors.append(iteration_error / X_batch.shape[1])
epoch_error += iteration_error
batch_begin = batch_end
X_batch = X[:, batch_begin:]
Y_batch = Y[:, batch_begin:]
iteration_error = self.train_batch(X_batch, Y_batch)
iteration_errors.append(iteration_error / X_batch.shape[1])
epoch_error += iteration_error
epoch_error /= X.shape[1]
epoch_errors.append(epoch_error)
self.notifier.info("Loss after epoch {}: {}".format(epoch, epoch_error))
if epoch_error > error_delta:
epochs_within_delta = 0
else:
epochs_within_delta += 1
epoch += 1
return epoch_errors, iteration_errors
def predict(self, X: np.array):
"""
Compute the predictions for a batch of input vectors
:param X: 2-dimensional array of input vectors, one vector per column
:return: 1-dimensional array with the computed predictions,
"""
A = X
for layer in self.layers:
A = layer.forward_propagate(A)
return A
def to_pickle(self, pathname_or_file: Union[str, BinaryIO]):
"""
Save the current network parameters to a pickle file; to be used after training so that the model can be later
reused for making predictions without having to train the network again
:param pathname_or_file: either a path to a pkl file or a file-like object
"""
if isinstance(pathname_or_file, str):
with open(pathname_or_file, 'wb') as fp:
pickle.dump(self, fp)
else:
pickle.dump(self, pathname_or_file)
def to_json(self, pathname_or_file: Union[str, TextIO], pretty_print=False):
"""
Save the current network parameters to a json file. Intended for debugging/testing purposes. For making actually
using the network for making predictions, use method to_pickle, with will save the parameters in a more
efficient binary format
:param pathname_or_file: either a path to a JSON file or a file-like object
:param pretty_print: generate a well formatted JSON for manual review
"""
kwargs = {'cls': DocknetJSONEncoder}
if pretty_print:
kwargs['indent'] = 4
kwargs['sort_keys'] = True
if isinstance(pathname_or_file, str):
with open(pathname_or_file, 'wb', encoding='UTF-8') as fp:
json.dump(self, fp, **kwargs)
else:
json.dump(self, pathname_or_file, **kwargs)
def read_pickle(pathname: str) -> Docknet:
"""
Create a new DockNet initialized with previously saved parameters in pickle format
:param pathname: path and name of the pickle file
:return: the initialized DockNet
"""
with open(pathname, 'rb') as fp:
docknet = pickle.load(fp)
return docknet
def read_json(pathname: str) -> Docknet:
"""
Create a new DockNet initialized with previously saved parameters in json format
:param pathname: path and name of the json file
:return: the initialized DockNet
"""
with open(pathname, 'rb') as fp:
layers_description = json.load(fp, encoding='UTF-8')
docknet = Docknet()
for desc in layers_description:
if desc['type'] == 'input':
docknet.add_input_layer(desc['dimension'])
elif desc['type'] == 'dense':
docknet.add_dense_layer(desc['dimension'], desc['activation_function'])
if 'params' in desc:
params = {k: np.array(v) for k, v in desc['params'].items()}
docknet.layers[-1].params = params
return docknet
| StarcoderdataPython |
3372371 | #In this script I will try to assess any possible difference
#in treatment outcome across patients of different groups
#(isolated with hierarchical clustering on snps genotypes)
import pandas as pd
from scipy import stats
#importing table with treatment infos
df_rr = pd.read_csv('./checks/delta_pl_ther.tsv', sep = '\t')
#dropping patient where difference between PL and
#first day of first treatment is higher than 90 days
df = df_rr[(abs(df_rr['delta_pl']) < 91)]
def getGroups(file, stops):
ordered_ids = []
with open(file) as f:
for line in f:
ordered_ids.append(int(line.strip()))
mask = []
for i in range(len(ordered_ids)):
if ordered_ids[i] in stops:
mask.append(-1)
else:
mask.append(0)
groups = []
gp = []
for i in range(len(mask)):
if mask[i] == 0:
gp.append(ordered_ids[i])
else:
groups.append(gp)
gp = [ordered_ids[i]]
groups.append(gp)
return groups
#extracting groups from snps genotypes based clustering
snp_stops = [862, 826]
snp_groups = getGroups('./data/cluster_groups/ordered_rr_snps.txt', snp_stops)
group1 = snp_groups[0]
group2 = snp_groups[1]
group3 = snp_groups[2]
df1 = df[df['patient_id'].isin(group1)] #18
df2 = df[df['patient_id'].isin(group2)] #22
df3 = df[df['patient_id'].isin(group3)] #23
#defining function to get contingency table:
def getContTab(s1, s2):
succ1 = sum(s1)
fail1 = len(s1) - succ1
succ2 = sum(s2)
fail2 = len(s2) - succ2
tab = [[succ1, succ2], [fail1, fail2]]
return tab
def writeStat(matrix, filename):
with open('./data/thertest/{}.tsv'.format(filename), 'w') as f:
f.write('\tg1\tg2\tg3\n')
for i, row in enumerate(matrix, start = 1):
f.write('g{}\t'.format(i) + '\t'.join(row) + '\n')
#testing if patients of different groups have a different response
#to therapy (without distinction of FL vs SL)
def getInterruptionCause(df):
series = df['re_end1'].fillna('A')
series.replace(to_replace = 'P', value = 1, inplace = True)
series.replace(to_replace = 'A', value = 0, inplace = True)
series.dropna(inplace = True)
return(list(series))
s1 = getInterruptionCause(df1)
s2 = getInterruptionCause(df2)
s3 = getInterruptionCause(df3)
liste_a = [s1, s2, s3]
liste_b = [s1, s2, s3]
matrix = []
for lista1 in liste_a:
pvals = []
for lista2 in liste_b:
table = getContTab(lista1, lista2)
print(table)
pvals.append(str(stats.fisher_exact(table)[1]))
matrix.append(pvals)
writeStat(matrix, 'snps_ther1_end')
#testing if patients of different groups have been assigned
#to different therapy classes (first or second line)
def getTherapyLine(df):
series = df['TL'].dropna()
series.replace(to_replace = 'FL', value = 1, inplace = True)
series.replace(to_replace = 'SL', value = 0, inplace = True)
series.dropna(inplace = True)
return(list(series))
s1 = getTherapyLine(df1)
s2 = getTherapyLine(df2)
s3 = getTherapyLine(df3)
liste_a = [s1, s2, s3]
liste_b = [s1, s2, s3]
matrix = []
for lista1 in liste_a:
pvals = []
for lista2 in liste_b:
table = getContTab(lista1, lista2)
print(table)
pvals.append(str(stats.fisher_exact(table)[1]))
matrix.append(pvals)
writeStat(matrix, 'snps_ther_line') | StarcoderdataPython |
4812580 | <gh_stars>1-10
import os
import hydra
import jax
import jax.numpy as jnp
from flax.serialization import to_state_dict
from omegaconf import DictConfig, OmegaConf
from models.jax import get_model
from neural_kernels.nads import mixed_derivative_nad_decomposition
from utils.misc import get_apply_fn
@hydra.main(config_path="config/compute_nads", config_name="config")
def main(cfg: DictConfig) -> None:
print(OmegaConf.to_yaml(cfg))
# Load model
model_key = jax.random.PRNGKey(cfg.seed)
model = get_model(**cfg.model)
init_variables = model.init(model_key, jnp.zeros(cfg.nads.shape, jnp.float32))
apply_fn = get_apply_fn(model, expose_bn=False, variables=init_variables, train=False)
_, init_params = init_variables.pop("params")
print("Computing NADs...")
# Compute NADs
eigvals, nads = mixed_derivative_nad_decomposition(apply_fn, init_params, **cfg.nads)
print("Done!")
print("Saving results...")
# Save results
init_variables_state_dict = to_state_dict(init_variables)
save_path = f"{hydra.utils.get_original_cwd()}/artifacts/nads/{cfg.model.model_name}"
os.makedirs(save_path, exist_ok=True)
jnp.save(f"{save_path}/nads.npy", nads)
jnp.save(f"{save_path}/eigvals.npy", eigvals)
jnp.save(
f"{save_path}/init_variables.npy",
init_variables_state_dict,
)
if __name__ == "__main__":
main()
| StarcoderdataPython |
8199332 | <filename>website/dbCache.py
from website import session
needToRecompute = True
def recompute():
global cachedDBData, nextUserId, needToRecompute
cachedDBData = [
row
for row in session.execute(
"SELECT id, username, password, misc FROM keyspace1.data;"
)
]
for row in cachedDBData:
print(row)
nextUserId = 1 + max([row.id for row in cachedDBData]) if cachedDBData else 1
needToRecompute = False
def getDBData():
global cachedDBData, nextUserId, needToRecompute
if needToRecompute:
recompute()
return cachedDBData
def addUser(username, password):
global needToRecompute
session.execute(
"""
INSERT INTO keyspace1.data (id, username, password, misc)
VALUES (%s, %s, %s, %s)
""",
(nextUserId, username, password, "{}"),
)
needToRecompute = True
def updateMisc(user_id, misc):
global needToRecompute
session.execute(
"UPDATE keyspace1.data SET misc = %s WHERE id = %s",
(misc, user_id),
)
needToRecompute = True
| StarcoderdataPython |
8017995 | import py
from pypy import conftest
from pypy.translator.translator import TranslationContext
from pypy.translator.llsupport.wrapper import new_wrapper
from pypy.rpython.rmodel import PyObjPtr
from pypy.rpython.llinterp import LLInterpreter
from pypy.rpython.lltypesystem import lltype
class TestMakeWrapper:
def getgraph(self, func, argtypes=None):
from pypy.config.pypyoption import get_pypy_config
config = get_pypy_config(translating=True)
config.translation.gc = "ref"
config.translation.simplifying = True
t = TranslationContext(config=config)
if argtypes is None:
argtypes = []
a = t.buildannotator()
a.build_types(func, argtypes)
a.simplify()
t.buildrtyper().specialize()
wrapperptr = new_wrapper(func, t)
wrappergraph = wrapperptr._obj.graph
F = lltype.typeOf(wrapperptr).TO
assert F.ARGS == (PyObjPtr,) * len(wrappergraph.getargs())
assert F.RESULT == PyObjPtr
for inputarg in wrappergraph.getargs():
assert inputarg.concretetype == PyObjPtr
assert wrappergraph.getreturnvar().concretetype == PyObjPtr
return t.graphs[0], wrappergraph, t
def interpret(self, t, graph, *args):
interp = LLInterpreter(t.rtyper)
result = interp.eval_graph(graph, [lltype.pyobjectptr(arg)
for arg in args])
return result._obj.value
def test_simple(self):
def f(x):
return x * 3
graph, wrappergraph, t = self.getgraph(f, [int])
res = self.interpret(t, wrappergraph, 3)
assert res == 9
def test_manyargs(self):
def f(x, y, z):
return x * y + z
graph, wrappergraph, t = self.getgraph(f, [int, int, int])
res = self.interpret(t, wrappergraph, 3, 4, 5)
assert res == 3 * 4 + 5
def test_returnnone(self):
def f():
pass
graph, wrappergraph, t = self.getgraph(f)
res = self.interpret(t, wrappergraph)
assert res is None
| StarcoderdataPython |
12862220 | <filename>dmm/dmm_data/__init__.py
all=['load']
| StarcoderdataPython |
3252975 | <reponame>guaix-ucm/azotea
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Copyright (c) 2020
#
# See the LICENSE file for details
# see the AUTHORS file for authors
# ----------------------------------------------------------------------
#--------------------
# System wide imports
# -------------------
import logging
import requests
import argparse
import os.path
import pprint
import json
import sqlite3
#--------------
# local imports
# -------------
from .packer import make_new_release
from . import SANDBOX_DOI_PREFIX, SANDBOX_URL_PREFIX, PRODUCTION_URL_PREFIX, PRODUCTION_DOI_PREFIX, DEF_DBASE
# -----------------------
# Module global variables
# -----------------------
log = logging.getLogger("azotenodo")
# -----------------------
# Module global functions
# -----------------------
def setup_context(options, file_options):
context = argparse.Namespace()
context.dbase = options.dbase
if options.test:
context.url_prefix = SANDBOX_URL_PREFIX
context.doi_prefix = SANDBOX_DOI_PREFIX
context.access_token = file_options.api_key_sandbox
else:
context.url_prefix = PRODUCTION_URL_PREFIX
context.doi_prefix = PRODUCTION_DOI_PREFIX
context.access_token = file_options.api_key_production
return context
def open_database(dbase_path):
if not os.path.exists(dbase_path):
with open(dbase_path, 'w') as f:
pass
log.info("Created database file {0}".format(dbase_path))
return sqlite3.connect(dbase_path)
def select_contributors(connection):
cursor = connection.cursor()
cursor.execute(
'''
SELECT DISTINCT o.surname, o.family_name
FROM image_t AS i
JOIN observer_t AS o USING(observer_id)
ORDER BY o.surname ASC
''')
return cursor
def get_contributors(connection):
contributors = []
for surname, family_name, organization in select_contributors(connection):
record = {
'name': "{0}, {1}".format(surname, family_name),
'type': 'DataCollector',
}
if organization is not None:
record['affiliation'] = organization
contributors.append(record)
return contributors
# ------------
# Real actions
# ------------
def do_zenodo_licenses(context):
headers = {"Content-Type": "application/json"}
params = {'access_token': context.access_token,}
url = "{0}/licenses/".format(context.url_prefix)
log.debug("Licenses List Request to {0} ".format(url))
r = requests.get(url, params=params, headers=headers)
log.info("Licenses List Status Code {0} ".format(r.status_code))
response = r.json()
if context.verbose:
print("="*80)
context.pprinter.pprint(response)
print("="*80)
if 400 <= r.status_code <= 599:
raise Exception(response)
return response
def do_zenodo_list(context, title, published):
headers = {"Content-Type": "application/json"}
status = 'published' if published else 'draft'
query = 'type:dataset AND title:{0}'.format(title)
query = 'title:{0}'.format(title)
params = {'access_token': context.access_token, 'status':status, 'sort': 'mostrecent', 'q': query}
url = "{0}/deposit/depositions".format(context.url_prefix)
log.debug("Deposition List Request to {0} ".format(url))
r = requests.get(url, params=params, headers=headers)
log.info("Deposition List Status Code {0} ".format(r.status_code))
response = r.json()
if context.verbose:
print("=============== BEGIN DEPOSIT LISTING RESPONSE ===============")
context.pprinter.pprint(response)
print("=============== END DEPOSIT LISTING RESPONSE ===============")
if 400 <= r.status_code <= 599:
raise Exception(response)
return response
def do_zenodo_search(context, title, published):
headers = {"Content-Type": "application/json"}
status = 'published'
params = {'access_token': context.access_token, 'status':status, 'sort': 'mostrecent',}
url = "{0}/deposit/depositions".format(context.url_prefix)
log.info("Searching dataset with title {0}".format(title))
log.debug("Deposition List Request to {0} ".format(url))
r = requests.get(url, params=params, headers=headers)
response = r.json()
response = list(filter(lambda item: item['title'] == title, response))
log.info("Deposition search OK, HTTP status code {0} ".format(r.status_code))
if context.verbose:
print("=============== BEGIN DEPOSITION SEARCH BY TITLE RESPONSE ===============")
context.pprinter.pprint(response)
print("=============== END DEPOSITION SEARCH BY TITLE RESPONSE ===============")
if 400 <= r.status_code <= 599:
raise Exception(response)
return response[0]
def do_zenodo_delete(context, identifier):
headers = {"Content-Type": "application/json"}
params = {'access_token': context.access_token}
url = "{0}/deposit/depositions/{1}".format(context.url_prefix, identifier)
log.debug("Deposition Delete Request to {0} ".format(url))
r = requests.delete(url, params=params, headers=headers)
log.info("Deposition Delete Status Code {0} ".format(r.status_code))
response = r.json()
if context.verbose:
print("=============== BEGIN DEPOSIT DELETION RESPONSE ===============")
context.pprinter.pprint(response)
print("=============== END DEPOSIT DELETETION RESPONSE ===============")
if 400 <= r.status_code <= 599:
raise Exception(response)
return response
def do_zenodo_deposit(context):
headers = {"Content-Type": "application/json"}
params = {'access_token': context.access_token}
log.info("Creating new Deposition for title {0}, version {1} ".format(context.title, context.version))
url = "{0}/deposit/depositions".format(context.url_prefix)
log.debug("Deposition Request to {0} ".format(url))
r = requests.post(url, params=params, headers=headers, json={})
response = r.json()
if context.verbose:
print("=============== BEGIN DEPOSIT CREATION RESPONSE ===============")
context.pprinter.pprint(response)
print("=============== END DEPOSIT CREATION RESPONSE ===============")
if 400 <= r.status_code <= 599:
raise Exception(response)
log.info("Deposition created with id {0}, HTTP status code {1}".format(response['id'], r.status_code))
return response
def do_zenodo_metadata(context, identifier):
log.info("Deposit Metadata for id {0} to Zenodo".format(identifier))
connection = open_database(context.dbase)
contributors = get_contributors(connection)
headers = {"Content-Type": "application/json"}
params = {'access_token': context.access_token}
metadata = {
'title' : context.title,
'upload_type': 'dataset',
'version' : context.version,
'communities': [ {'identifier': context.community} ],
'creators' : [
{'name': '<NAME>', 'affiliation': 'UCM', 'orcid': 'https://orcid.org/0000-0002-8993-5894'},
{'name': '<NAME>','affiliation': 'UCM', 'orcid': 'https://orcid.org/0000-0002-3725-0586'}
],
'contributors': contributors,
'description': 'Latest monthly AZOTEA reduced CSV files',
'access_right': 'open',
}
url = "{0}/deposit/depositions/{1}".format(context.url_prefix, identifier)
log.debug("Deposition Metadata Request to {0} ".format(url))
r = requests.put(url, params=params, headers=headers, json={'metadata':metadata})
response = r.json()
if context.verbose:
print("=============== BEGIN METADATA RESPONSE ===============")
context.pprinter.pprint(response)
print("=============== END METADATA RESPONSE ===============")
if 400 <= r.status_code <= 599:
raise Exception(response)
log.info("Metadata updated for id {0}, HTTP status code {1}".format(identifier, r.status_code))
return response
def do_zenodo_upload(context, zip_file, bucket_url):
headers = {"Content-Type": "application/json"}
params = {'access_token': context.access_token}
filename = os.path.basename(zip_file)
url = "{0}/{1}".format(bucket_url, filename)
with open(zip_file, "rb") as fp:
log.debug("Deposition File Upload Request to {0} ".format(url))
r = requests.put(url, data=fp, params=params)
response = r.json()
if context.verbose:
print("=============== BEGIN FILE UPLOAD RESPONSE ===============")
context.pprinter.pprint(response)
print("=============== END FILE UPLOAD RESPONSE ===============")
if 400 <= r.status_code <= 599:
raise Exception(response)
log.info("Deposition File Upload succesful for {0}, HTTP status code {1} ".format(zip_file, r.status_code))
return response
def do_zenodo_publish(context, identifier):
headers = {"Content-Type": "application/json"}
params = {'access_token': context.access_token}
url = "{0}/deposit/depositions/{1}/actions/publish".format(context.url_prefix, identifier)
log.info("Publish new dataset for {0}".format(identifier))
log.debug("Deposition Publish Request to {0} ".format(url))
r = requests.post(url, params=params, headers=headers, json={})
response = r.json()
if context.verbose:
print("=============== BEGIN PUBLISH RESPONSE ===============")
context.pprinter.pprint(response)
print("=============== END PUBLISH RESPONSE ===============")
if 400 <= r.status_code <= 599:
raise Exception(response)
log.info("Publication succesful doi = {0}, HTTP status code {1} ".format(response['doi'], r.status_code))
return context
def do_zenodo_newversion(context, latest_id):
headers = {"Content-Type": "application/json"}
params = {'access_token': context.access_token,}
url = "{0}/deposit/depositions/{1}/actions/newversion".format(context.url_prefix, latest_id)
log.info("Creating New Version deposition from {0}".format(latest_id))
log.debug("Deposition New Version of {1} Request to {0} ".format(url, latest_id))
r = requests.post(url, params=params, headers=headers, json={})
response = r.json()
if context.verbose:
print("=============== BEGIN DEPOSITION NEW VERSION RESPONSE ===============")
context.pprinter.pprint(response)
print("=============== END DEPOSITION NEW VERSION RESPONSE ===============")
if 400 <= r.status_code <= 599:
raise Exception(response)
new_id = os.path.basename(response['links']['latest_draft'])
log.info("Deposition New Version succesful, new id = {0}, HTTP status code {1} ".format(new_id, r.status_code))
return response
# ========
# COMMANDS
# ========
def zenodo_licenses(options, file_options):
context = setup_context(options, file_options)
context.verbose = options.verbose
context.pprinter = pprint.PrettyPrinter(indent=2)
do_zenodo_licenses(context)
def zenodo_list(options, file_options):
context = setup_context(options, file_options)
context.verbose = options.verbose
context.pprinter = pprint.PrettyPrinter(indent=2)
context.title = ' '.join(options.title) # allows Multiword titles
do_zenodo_list2(context, context.title, options.published)
def zenodo_delete(options, file_options):
context = setup_context(options, file_options)
context.verbose = options.verbose
context.pprinter = pprint.PrettyPrinter(indent=2)
identifier = options.id
do_zenodo_delete(context, identifier)
def zenodo_pipeline(options, file_options):
first_time, changed, version = make_new_release(options)
if not changed:
log.info("No need to upload new version to Zendodo")
return
if options.zip_only:
log.info("Generated ZIP file only. Exiting")
return
context = setup_context(options, file_options)
context.verbose = options.verbose
context.pprinter = pprint.PrettyPrinter(indent=2)
context.title = ' '.join(options.title) # allows Multiword titles
context.community = options.community
context.version = version if options.version is None else options.version
zip_file = options.zip_file
if first_time:
response = do_zenodo_deposit(context)
new_id = response['id']
response = do_zenodo_metadata(context, new_id)
bucket_url = response["links"]["bucket"]
response = do_zenodo_upload(context, zip_file, bucket_url)
response = do_zenodo_publish(context, new_id)
else:
response = do_zenodo_search(context, context.title, True)
latest_id = response['id']
response = do_zenodo_newversion(context, latest_id)
new_id = os.path.basename(response['links']['latest_draft'])
response = do_zenodo_metadata(context, new_id)
bucket_url = response["links"]["bucket"]
response = do_zenodo_upload(context, zip_file, bucket_url)
response = do_zenodo_publish(context, new_id)
| StarcoderdataPython |
3330308 | <gh_stars>0
"""Combined single command for bundling and deploying the selected targets."""
import argparse
import typing
from reviser import interactivity
from ..commands import bundler
from ..commands import deployer
def get_completions(
completer: "interactivity.ShellCompleter",
) -> typing.List[str]:
"""Shell auto-completes for this command."""
return bundler.get_completions(completer) + deployer.get_completions(completer)
def populate_subparser(parser: argparse.ArgumentParser):
"""Populate parser for this command."""
bundler.populate_subparser(parser)
deployer.populate_subparser(parser)
def run(ex: "interactivity.Execution") -> "interactivity.Execution":
"""Execute a bundle operation on the selected function/layer targets."""
out = bundler.run(ex)
if out.result.status != "BUNDLED":
return out
return deployer.run(ex)
| StarcoderdataPython |
11321232 | <reponame>Tawkat/Autonomous-Code-Review-Usefulness-Measurement
class QuestionMark:
def __init__(self,review):
self.review=review
self.count=0
def getQuestionMark(self):
file=self.review
str=file.lower()
count=str.count('?')
#print("Total QuestionMark: %s" % count)
return count
'''
if __name__=='__main__':
q=QuestionMark("asd")
print(q.getQuestionMark())
'''
| StarcoderdataPython |
11253066 | <gh_stars>0
# vim: sw=4:ts=4:et
import datetime
import json
import logging
import os.path
import shutil
import uuid
import requests
# the expected format of the event_time of an alert
event_time_format = '%Y-%m-%d %H:%M:%S'
# current protocol version
# update this protocol number when you update the protocol
# this is used by the server.py code in saq to select which functions to handle the request
PROTOCOL_VERSION = "1.5"
class AlertSubmitException(Exception):
pass
# utility class to translate custom objects into JSON
class _JSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.strftime(event_time_format)
elif isinstance(obj, set):
return list(obj)
elif isinstance(obj, Attachment):
return obj.relative_storage_path
else:
return super(_JSONEncoder, self).default(obj)
class Attachment(object):
def __init__(self, source_path, relative_storage_path):
self.source_path = source_path
self.relative_storage_path = relative_storage_path
def __str__(self):
return "Attachment(from {} to {})".format(self.source_path, self.relative_storage_path)
class Alert(object):
KEY_ID = 'id'
KEY_UUID = 'uuid'
KEY_TOOL = 'tool'
KEY_TOOL_INSTANCE = 'tool_instance'
KEY_TYPE = 'type'
KEY_DESCRIPTION = 'description'
KEY_EVENT_TIME = 'event_time'
KEY_DETAILS = 'details'
KEY_OBSERVABLES = 'observables'
KEY_TAGS = 'tags'
KEY_ATTACHMENTS = 'attachments'
KEY_NAME = 'name'
KEY_COMPANY_NAME = 'company_name'
KEY_COMPANY_ID = 'company_id'
def __init__(self,
tool=None,
tool_instance=None,
alert_type=None,
desc=None,
event_time=None,
details=None,
name=None,
company_name=None,
company_id=None):
self._event_time = None
self.uuid = str(uuid.uuid4())
self.tool = tool
self.tool_instance = tool_instance
self.alert_type = alert_type
self.description = desc
self.event_time = event_time
self.details = details
self.attachments = []
self.observables = {}
self.tags = set()
self.name = name
self.company_name = company_name
self.company_id = company_id
def __str__(self):
return "Alert({})".format(self.uuid)
@property
def network_json(self):
return {
Alert.KEY_UUID: self.uuid,
Alert.KEY_TOOL: self.tool,
Alert.KEY_TOOL_INSTANCE: self.tool_instance,
Alert.KEY_TYPE: self.alert_type,
Alert.KEY_DESCRIPTION: self.description,
Alert.KEY_EVENT_TIME: self.event_time,
Alert.KEY_DETAILS: self.details,
Alert.KEY_OBSERVABLES: self.observables,
Alert.KEY_TAGS: self.tags,
Alert.KEY_ATTACHMENTS: self.attachments,
Alert.KEY_NAME: self.name,
Alert.KEY_COMPANY_NAME: self.company_name,
Alert.KEY_COMPANY_ID: self.company_id,
}
@network_json.setter
def network_json(self, alert_json):
self.uuid = alert_json[Alert.KEY_UUID]
self.tool = alert_json[Alert.KEY_TOOL]
self.tool_instance = alert_json[Alert.KEY_TOOL_INSTANCE]
self.alert_type = alert_json[Alert.KEY_TYPE]
self.description = alert_json[Alert.KEY_DESCRIPTION]
self.event_time = alert_json[Alert.KEY_EVENT_TIME]
self.details = alert_json[Alert.KEY_DETAILS]
self.observables = alert_json[Alert.KEY_OBSERVABLES]
self.tags = alert_json[Alert.KEY_TAGS]
self.attachments = alert_json[Alert.KEY_ATTACHMENTS]
if Alert.KEY_NAME in alert_json:
self.name = alert_json[Alert.KEY_NAME]
if Alert.KEY_COMPANY_NAME in alert_json:
self.company_name = alert_json[Alert.KEY_COMPANY_NAME]
if Alert.KEY_COMPANY_ID in alert_json:
self.company_id = alert_json[Alert.KEY_COMPANY_ID]
@property
def event_time(self):
#"""YYYY-MM-DD HH:MM:SS UTC <-- the time the event occurred, NOT when SAQ received it."""
return self._event_time
@event_time.setter
def event_time(self, value):
if value is None:
self._event_time = None
elif isinstance(value, datetime.datetime):
self._event_time = value.strftime(event_time_format)
elif isinstance(value, str):
self._event_time = value
else:
raise ValueError("event_time must be a datetime.datetime object or a string in the format %Y-%m-%d %H:%M:%S you passed {}".format(type(value).__name__))
@property
def event_time_datetime(self):
"""Return a datetime.datetime representation of self.event_time."""
if self._event_time is None:
return None
return datetime.datetime.strptime(self._event_time, event_time_format)
# (this is a drop-in replacement function)
def add_attachment_link(self, source_path, relative_storage_path):
self.attachments.append(Attachment(source_path, relative_storage_path))
# (this is a drop-in replacement function)
def add_observable(self, o_type, o_value, o_time=None, is_suspect=False, directives=[]):
if o_type not in self.observables:
self.observables[o_type] = []
self.observables[o_type].append((o_value, o_time, is_suspect, directives))
logging.debug("added observable type {} value {} time {} suspect {} directives {} to {}".format(
o_type, o_value, o_time, is_suspect, directives, self))
# (this is a drop-in replacement function)
def add_tag(self, tag):
self.tags.add(tag)
logging.debug("added tag {} to {}".format(tag, self))
def load_saved_alert(self, path):
"""Loads an alert that was saved when a call to submit() failed. Returns a tuple of (uri, key) which was used to submit the alert when it failed."""
saved_json = {}
with open(path, 'r') as fp:
saved_json = json.load(fp)
# this will rebuild the Alert object
self.network_json = saved_json
# replace the paths with Attachment objects that contain the source path (full path to the file) and the relative path
self.attachments = [Attachment(os.path.join(os.path.dirname(path), x), x) for x in self.attachments]
# extract the uri and key that was used last time to submit the alert
uri = saved_json['uri']
key = saved_json['key']
return (uri, key)
# (this is a drop-in replacement function)
def submit(self, uri, key, fail_dir=".saq_alerts", save_on_fail=True):
"""Submits this Alert to ACE for analysis to the given URI with the given key. Returns tuple(http_status_code, http_text)."""
assert isinstance(uri, str)
assert len(uri) > 0
# make sure we're not using the proxy
if 'http_proxy' in os.environ:
logging.warning("removing proxy setting http_proxy from environment variables")
del os.environ['http_proxy']
try:
# append the attachments to the POST
_file_info = []
for attachment in self.attachments:
# when the alert is created new it will have these Attachment objects in here
if isinstance(attachment, Attachment):
_file_info.append(('data', (attachment.relative_storage_path, open(attachment.source_path, 'rb'), 'application/octet-stream')))
logging.info("submitting alert {} to {}".format(self, uri))
r = requests.post(
uri,
data = {
'alert': json.dumps(self.network_json, cls=_JSONEncoder, sort_keys=True),
'key': key,
'protocol_version': PROTOCOL_VERSION },
files = _file_info)
if r.status_code != 200:
logging.error("alert submission failed: {} ({})".format(
r.status_code, r.reason))
raise AlertSubmitException()
return (r.status_code, r.text)
except Exception as submission_error:
logging.warning("unable to submit alert {}: {} (attempting to save alert to {})".format(
self, str(submission_error), fail_dir))
if not save_on_fail:
raise submission_error
if fail_dir is None:
logging.error("fail_dir is set to None")
raise submission_error
dest_dir = os.path.join(fail_dir, self.uuid)
if not os.path.isdir(dest_dir):
try:
os.makedirs(dest_dir)
except Exception as e:
logging.error("unable to create directory {} to save alert {}: {}".format(
dest_dir, self, str(e)))
raise e
# save the attachments
for attachment in self.attachments:
src = None
dst = dest_dir
try:
# create the containing directory of the attachment if it does not already exist
src = attachment.source_path
dst = os.path.dirname(os.path.join(dest_dir, attachment.relative_storage_path))
if not os.path.isdir(dst):
os.makedirs(dst)
except Exception as e:
logging.error("unable to create storage directory {} for alert {}: {}".format(dst, self, str(e)))
# destination path of the file
dst_path = os.path.join(dst, os.path.basename(src))
try:
shutil.copy2(src, dst_path)
except Exception as e:
logging.error("unable to copy attachment from {} to {}: {}".format(src, dst_path, str(e)))
continue
# get the json
alert_json = self.network_json
# we also include the url and submission key in the failed alert so that we can submit them later
alert_json['uri'] = uri
alert_json['key'] = key
# to write it out to the filesystem
with open(os.path.join(dest_dir, 'data.json'), 'w') as fp:
json.dump(alert_json, fp, cls=_JSONEncoder, sort_keys=True)
logging.debug("saved alert {} to {}".format(self, dest_dir))
raise submission_error
return (500, "")
| StarcoderdataPython |
3352703 | from aiogoogle import Aiogoogle
from aiogoogle.auth.creds import ServiceAccountCreds
from django.conf import settings
scopes = [
'https://www.googleapis.com/auth/drive',
]
class GoogleManager:
__instance = None
@classmethod
def instance(cls):
'''
Get a single instance per process.
'''
if cls.__instance is None:
cls.__instance = cls()
return cls.__instance
def __init__(self):
self.creds = ServiceAccountCreds(
scopes=scopes,
**settings.DRIVE_SETTINGS['credentials'],
)
self.template_id = settings.DRIVE_SETTINGS['template_id']
self.puzzle_folder_id = settings.DRIVE_SETTINGS['puzzle_folder_id']
self.owner_id = str(settings.DRIVE_SETTINGS['owner_id'])
self.client = Aiogoogle(service_account_creds=self.creds)
self.drive = None
self.sheets = None
async def setup(self):
if self.drive is None:
self.drive = await self.client.discover('drive', 'v3')
self.sheets = await self.client.discover('sheets', 'v4')
await self.client._ensure_session_set()
async def create(self, name):
await self.setup()
sheet_file = await self.client.as_service_account(
self.drive.files.copy(
fileId=self.template_id,
json={
'name': name,
'parents': [self.puzzle_folder_id],
},
),
)
sheet_id = sheet_file['id']
await self.client.as_service_account(
self.drive.permissions.update(
fileId=sheet_id,
permissionId=self.owner_id,
transferOwnership=True,
json={
'role': 'owner',
},
),
)
return sheet_id
async def add_links(self, sheet_id, checkmate_link=None, puzzle_link=None):
if not checkmate_link or not puzzle_link:
return
await self.setup()
await self.client.as_service_account(
self.sheets.spreadsheets.values.update(
spreadsheetId=sheet_id,
range='A1:B1',
valueInputOption='USER_ENTERED',
json={
'values': [[
f'=HYPERLINK("{checkmate_link}", "Checkmate Link")' if checkmate_link else None,
f'=HYPERLINK("{puzzle_link}", "Puzzle Link")' if puzzle_link else None,
]],
},
),
)
async def rename(self, file_id, name):
await self.setup()
await self.client.as_service_account(
self.drive.files.update(
fileId=file_id,
json={
'name': name,
},
)
)
| StarcoderdataPython |
5184526 | def test_read(client, seeder, utils):
_, admin_unit_id = seeder.setup_base()
custom_widget_id = seeder.insert_event_custom_widget(admin_unit_id)
url = utils.get_url("api_v1_custom_widget", id=custom_widget_id)
response = utils.get_json(url)
utils.assert_response_ok(response)
assert response.json["settings"]["color"] == "black"
def test_put(client, seeder, utils, app):
_, admin_unit_id = seeder.setup_api_access()
custom_widget_id = seeder.insert_event_custom_widget(admin_unit_id)
url = utils.get_url("api_v1_custom_widget", id=custom_widget_id)
response = utils.put_json(url, {"widget_type": "search", "name": "<NAME>"})
utils.assert_response_no_content(response)
with app.app_context():
from project.models import CustomWidget
custom_widget = CustomWidget.query.get(custom_widget_id)
assert custom_widget.name == "<NAME>"
assert custom_widget.widget_type == "search"
def test_patch(client, seeder, utils, app):
_, admin_unit_id = seeder.setup_api_access()
custom_widget_id = seeder.insert_event_custom_widget(admin_unit_id)
url = utils.get_url("api_v1_custom_widget", id=custom_widget_id)
response = utils.patch_json(url, {"name": "<NAME>"})
utils.assert_response_no_content(response)
with app.app_context():
from project.models import CustomWidget
custom_widget = CustomWidget.query.get(custom_widget_id)
assert custom_widget.name == "<NAME>"
assert custom_widget.widget_type == "search"
def test_delete(client, seeder, utils, app):
_, admin_unit_id = seeder.setup_api_access()
custom_widget_id = seeder.insert_event_custom_widget(admin_unit_id)
url = utils.get_url("api_v1_custom_widget", id=custom_widget_id)
response = utils.delete(url)
utils.assert_response_no_content(response)
with app.app_context():
from project.models import CustomWidget
custom_widget = CustomWidget.query.get(custom_widget_id)
assert custom_widget is None
| StarcoderdataPython |
11253868 | <gh_stars>1-10
# Generated by Django 3.0.6 on 2020-05-27 18:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('workers_amount', models.IntegerField()),
('sides_amount', models.IntegerField()),
],
),
migrations.CreateModel(
name='Flight',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('saled_tickets_amount', models.IntegerField()),
('distance', models.IntegerField()),
('arrival_point', models.CharField(max_length=250)),
('departure_point', models.CharField(max_length=250)),
('is_transit', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Worker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=250)),
('last_name', models.CharField(max_length=250)),
('patronymic', models.CharField(max_length=250)),
('age', models.IntegerField()),
('education', models.CharField(max_length=250)),
('work_experience', models.IntegerField()),
('position', models.CharField(choices=[('1', 'Captain'), ('2', 'Second pilot'), ('3', 'Navigator'), ('4', 'Steward'), ('5', 'Stewardess')], default='1', max_length=1)),
('is_allow', models.BooleanField(default=True)),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='airport_app.Company')),
],
),
migrations.CreateModel(
name='TransitLanding',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('arrival_date', models.DateTimeField()),
('departure_date', models.DateTimeField()),
('landing_point', models.CharField(max_length=250)),
('flight', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='airport_app.Flight')),
],
),
migrations.CreateModel(
name='Plane',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('plane_model', models.CharField(max_length=250)),
('seats_num', models.IntegerField()),
('plane_type', models.CharField(choices=[('1', 'Passenger'), ('2', 'Cargo')], default='1', max_length=1)),
('plane_speed', models.IntegerField()),
('is_repair', models.BooleanField(default=False)),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='airport_app.Company')),
],
),
migrations.CreateModel(
name='Crew',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='airport_app.Company')),
('flight', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='airport_app.Flight')),
],
),
migrations.CreateModel(
name='Challenger',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=250)),
('last_name', models.CharField(max_length=250)),
('patronymic', models.CharField(max_length=250)),
('age', models.IntegerField()),
('education', models.CharField(max_length=250)),
('work_experience', models.IntegerField()),
('position', models.CharField(choices=[('1', 'Captain'), ('2', 'Second pilot'), ('3', 'Navigator'), ('4', 'Steward'), ('5', 'Stewardess')], default='1', max_length=1)),
('passport', models.IntegerField()),
('is_hired', models.BooleanField(default=True)),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='airport_app.Company')),
],
),
migrations.CreateModel(
name='ArrivalDeparture',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('arrival_date', models.DateTimeField()),
('departure_date', models.DateTimeField()),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='airport_app.Company')),
('flight', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='airport_app.Flight')),
],
),
]
| StarcoderdataPython |
6450132 | import json
import pytest
from datetime import datetime
from django.db import connection
from model_mommy import mommy
from rest_framework import status
from usaspending_api.search.tests.test_mock_data_search import non_legacy_filters, legacy_filters
from usaspending_api.awards.v2.lookups.lookups import all_award_types_mappings
from usaspending_api.awards.models import Award
from usaspending_api.references.models.ref_program_activity import RefProgramActivity
@pytest.fixture
def spending_by_award_test_data():
mommy.make("references.LegalEntity", legal_entity_id=1001)
mommy.make("references.LegalEntity", legal_entity_id=1002)
mommy.make("references.LegalEntity", legal_entity_id=1003)
mommy.make(
"recipient.RecipientLookup",
id=1001,
recipient_hash="bb7d6b0b-f890-4cec-a8ae-f777c8f5c3a9",
legal_business_name="recipient_name_for_award_1001",
duns="duns_1001",
)
mommy.make(
"recipient.RecipientLookup",
id=1002,
recipient_hash="180bddfc-67f0-42d6-8279-a014d1062d65",
legal_business_name="recipient_name_for_award_1002",
duns="duns_1002",
)
mommy.make(
"recipient.RecipientLookup",
id=1003,
recipient_hash="28aae030-b4b4-4494-8a75-3356208469cf",
legal_business_name="recipient_name_for_award_1003",
duns="duns_1003",
)
mommy.make(
"recipient.RecipientProfile",
id=2001,
recipient_hash="bb7d6b0b-f890-4cec-a8ae-f777c8f5c3a9",
recipient_level="R",
recipient_name="recipient_name_1001",
recipient_unique_id="duns_1001",
)
mommy.make(
"recipient.RecipientProfile",
id=2002,
recipient_hash="180bddfc-67f0-42d6-8279-a014d1062d65",
recipient_level="R",
recipient_name="recipient_name_1002",
recipient_unique_id="duns_1002",
)
mommy.make(
"recipient.RecipientProfile",
id=2003,
recipient_hash="28aae030-b4b4-4494-8a75-3356208469cf",
recipient_level="R",
recipient_name="recipient_name_1003",
recipient_unique_id="duns_1003",
)
mommy.make(
"awards.Award",
id=1,
type="A",
category="contract",
piid="abc111",
recipient_id=1001,
latest_transaction_id=1,
generated_unique_award_id="CONT_AWD_TESTING_1",
)
mommy.make(
"awards.Award",
id=2,
type="A",
category="contract",
piid="abc222",
recipient_id=1002,
latest_transaction_id=2,
generated_unique_award_id="CONT_AWD_TESTING_2",
)
mommy.make(
"awards.Award",
id=3,
type="A",
category="contract",
piid="abc333",
recipient_id=1003,
latest_transaction_id=6,
generated_unique_award_id="CONT_AWD_TESTING_3",
)
mommy.make("awards.TransactionNormalized", id=1, award_id=1, action_date="2014-01-01", is_fpds=True)
mommy.make("awards.TransactionNormalized", id=2, award_id=1, action_date="2015-01-01", is_fpds=True)
mommy.make("awards.TransactionNormalized", id=3, award_id=2, action_date="2016-01-01", is_fpds=True)
mommy.make("awards.TransactionNormalized", id=4, award_id=3, action_date="2017-01-01", is_fpds=True)
mommy.make("awards.TransactionNormalized", id=5, award_id=3, action_date="2018-01-01", is_fpds=True)
mommy.make("awards.TransactionNormalized", id=6, award_id=3, action_date="2019-01-01", is_fpds=True)
mommy.make("awards.TransactionFPDS", transaction_id=1)
mommy.make("awards.TransactionFPDS", transaction_id=2)
mommy.make("awards.TransactionFPDS", transaction_id=3)
mommy.make("awards.TransactionFPDS", transaction_id=4)
mommy.make("awards.TransactionFPDS", transaction_id=5)
mommy.make("awards.TransactionFPDS", transaction_id=6)
mommy.make("awards.BrokerSubaward", id=1, award_id=1, subaward_number=11111, awardee_or_recipient_uniqu="duns_1001")
mommy.make("awards.BrokerSubaward", id=2, award_id=2, subaward_number=22222, awardee_or_recipient_uniqu="duns_1002")
mommy.make("awards.BrokerSubaward", id=3, award_id=2, subaward_number=33333, awardee_or_recipient_uniqu="duns_1002")
mommy.make("awards.BrokerSubaward", id=4, award_id=3, subaward_number=44444, awardee_or_recipient_uniqu="duns_1003")
mommy.make("awards.BrokerSubaward", id=6, award_id=3, subaward_number=66666, awardee_or_recipient_uniqu="duns_1003")
mommy.make(
"awards.Subaward",
id=1,
award_id=1,
latest_transaction_id=1,
subaward_number=11111,
prime_award_type="A",
award_type="procurement",
action_date="2014-01-01",
amount=10000,
prime_recipient_name="recipient_name_for_award_1001",
recipient_unique_id="duns_1001",
piid="PIID1001",
awarding_toptier_agency_name="awarding toptier 8001",
awarding_subtier_agency_name="awarding subtier 8001",
)
mommy.make(
"awards.Subaward",
id=2,
award_id=1,
latest_transaction_id=2,
subaward_number=22222,
prime_award_type="A",
award_type="procurement",
action_date="2015-01-01",
amount=20000,
prime_recipient_name="recipient_name_for_award_1001",
recipient_unique_id="duns_1001",
piid="PIID2001",
awarding_toptier_agency_name="awarding toptier 8002",
awarding_subtier_agency_name="awarding subtier 8002",
)
mommy.make(
"awards.Subaward",
id=3,
award_id=2,
latest_transaction_id=3,
subaward_number=33333,
prime_award_type="A",
award_type="procurement",
action_date="2016-01-01",
amount=30000,
prime_recipient_name="recipient_name_for_award_1002",
recipient_unique_id="duns_1002",
piid="PIID3002",
awarding_toptier_agency_name="awarding toptier 8003",
awarding_subtier_agency_name="awarding subtier 8003",
)
mommy.make(
"awards.Subaward",
id=6,
award_id=3,
latest_transaction_id=6,
subaward_number=66666,
prime_award_type="A",
award_type="procurement",
action_date="2019-01-01",
amount=60000,
prime_recipient_name="recipient_name_for_award_1003",
recipient_unique_id="duns_1003",
piid="PIID6003",
awarding_toptier_agency_name="awarding toptier 8006",
awarding_subtier_agency_name="awarding subtier 8006",
)
# Ref Program Activity
ref_program_activity_1 = {"id": 1}
mommy.make("references.RefProgramActivity", **ref_program_activity_1)
# Ref Object Class
ref_object_class_1 = {"id": 1, "object_class": "111"}
mommy.make("references.ObjectClass", **ref_object_class_1)
# Financial Accounts by Awards
financial_accounts_by_awards_1 = {
"award": Award.objects.get(pk=1),
"program_activity": RefProgramActivity.objects.get(pk=1),
}
mommy.make("awards.FinancialAccountsByAwards", **financial_accounts_by_awards_1)
@pytest.mark.django_db
def test_spending_by_award_subaward_success(client, spending_by_award_test_data, refresh_matviews):
# Testing all filters
resp = client.post(
"/api/v2/search/spending_by_award",
content_type="application/json",
data=json.dumps(
{"subawards": True, "fields": ["Sub-Award ID"], "sort": "Sub-Award ID", "filters": non_legacy_filters()}
),
)
assert resp.status_code == status.HTTP_200_OK
# Testing contents of what is returned
resp = client.post(
"/api/v2/search/spending_by_award",
content_type="application/json",
data=json.dumps(
{
"subawards": True,
"fields": [
"Sub-Award ID",
"Sub-Awardee Name",
"Sub-Award Date",
"Sub-Award Amount",
"Awarding Agency",
"Awarding Sub Agency",
"Prime Award ID",
"Prime Recipient Name",
"recipient_id",
"prime_award_recipient_id",
],
"sort": "Sub-Award ID",
"filters": {"award_type_codes": ["A"]},
"limit": 2,
"page": 1,
}
),
)
assert resp.status_code == status.HTTP_200_OK
assert resp.json()["page_metadata"]["page"] == 1
assert resp.json()["page_metadata"]["hasNext"]
assert resp.json()["limit"] == 2
assert len(resp.json()["results"]) == 2
assert resp.json()["results"][0] == {
"Awarding Agency": "awarding toptier 8006",
"Awarding Sub Agency": "awarding subtier 8006",
"Prime Award ID": "PIID6003",
"Prime Recipient Name": "recipient_name_for_award_1003",
"Sub-Award Amount": 60000.0,
"Sub-Award Date": "2019-01-01",
"Sub-Award ID": "66666",
"Sub-Awardee Name": "RECIPIENT_NAME_FOR_AWARD_1003",
"prime_award_internal_id": 3,
"internal_id": "66666",
"prime_award_recipient_id": "28aae030-b4b4-4494-8a75-3356208469cf-R",
"recipient_id": None,
"prime_award_generated_internal_id": "CONT_AWD_TESTING_3",
}
assert resp.json()["results"][1] == {
"Awarding Agency": "awarding toptier 8003",
"Awarding Sub Agency": "awarding subtier 8003",
"Prime Award ID": "PIID3002",
"Prime Recipient Name": "recipient_name_for_award_1002",
"Sub-Award Amount": 30000.0,
"Sub-Award Date": "2016-01-01",
"Sub-Award ID": "33333",
"Sub-Awardee Name": "RECIPIENT_NAME_FOR_AWARD_1002",
"prime_award_internal_id": 2,
"internal_id": "33333",
"prime_award_recipient_id": "180bddfc-67f0-42d6-8279-a014d1062d65-R",
"recipient_id": None,
"prime_award_generated_internal_id": "CONT_AWD_TESTING_2",
}
@pytest.mark.django_db
def test_spending_by_award_success(client, refresh_matviews):
resp = client.post(
"/api/v2/search/spending_by_award",
content_type="application/json",
data=json.dumps(
{"subawards": False, "fields": ["Award ID"], "sort": "Award ID", "filters": non_legacy_filters()}
),
)
assert resp.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_spending_by_award_legacy_filters(client, refresh_matviews):
resp = client.post(
"/api/v2/search/spending_by_award",
content_type="application/json",
data=json.dumps({"subawards": False, "fields": ["Award ID"], "sort": "Award ID", "filters": legacy_filters()}),
)
assert resp.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_no_intersection(client):
mommy.make("references.LegalEntity", legal_entity_id=1)
mommy.make("awards.Award", id=1, type="A", recipient_id=1, latest_transaction_id=1)
mommy.make("awards.TransactionNormalized", id=1, action_date="2010-10-01", award_id=1, is_fpds=True)
mommy.make("awards.TransactionFPDS", transaction_id=1)
with connection.cursor() as cursor:
cursor.execute("refresh materialized view concurrently mv_contract_award_search")
request = {
"subawards": False,
"fields": ["Award ID"],
"sort": "Award ID",
"filters": {"award_type_codes": ["A", "B", "C", "D"]},
}
resp = client.post("/api/v2/search/spending_by_award", content_type="application/json", data=json.dumps(request))
assert resp.status_code == status.HTTP_200_OK
assert len(resp.data["results"]) == 1
request["filters"]["award_type_codes"].append("no intersection")
resp = client.post("/api/v2/search/spending_by_award", content_type="application/json", data=json.dumps(request))
assert resp.status_code == status.HTTP_200_OK
assert len(resp.data["results"]) == 0, "Results returned, there should be 0"
@pytest.fixture
def awards_over_different_date_ranges():
award_category_list = ["contracts", "direct_payments", "grants", "idvs", "loans", "other_financial_assistance"]
# The date ranges for the different awards are setup to cover possible intersection points by the
# different date ranges being searched. The comments on each line specify where the date ranges are
# suppose to overlap the searched for date ranges. The search for date ranges are:
# - {"start_date": "2015-01-01", "end_date": "2015-12-31"}
# - {"start_date": "2017-02-01", "end_date": "2017-11-30"}
date_range_list = [
# Intersect only one of the date ranges searched for
{"date_signed": datetime(2014, 1, 1), "action_date": datetime(2014, 5, 1)}, # Before both
{"date_signed": datetime(2014, 3, 1), "action_date": datetime(2015, 4, 15)}, # Beginning of first
{"date_signed": datetime(2015, 2, 1), "action_date": datetime(2015, 7, 1)}, # Middle of first
{"date_signed": datetime(2015, 2, 1), "action_date": datetime(2015, 4, 17)},
{"date_signed": datetime(2014, 12, 1), "action_date": datetime(2016, 1, 1)}, # All of first
{"date_signed": datetime(2015, 11, 1), "action_date": datetime(2016, 3, 1)}, # End of first
{"date_signed": datetime(2016, 2, 23), "action_date": datetime(2016, 7, 19)}, # Between both
{"date_signed": datetime(2016, 11, 26), "action_date": datetime(2017, 3, 1)}, # Beginning of second
{"date_signed": datetime(2017, 5, 1), "action_date": datetime(2017, 7, 1)}, # Middle of second
{"date_signed": datetime(2017, 1, 1), "action_date": datetime(2017, 12, 1)}, # All of second
{"date_signed": datetime(2017, 9, 1), "action_date": datetime(2017, 12, 17)}, # End of second
{"date_signed": datetime(2018, 2, 1), "action_date": datetime(2018, 7, 1)}, # After both
# Intersect both date ranges searched for
{"date_signed": datetime(2014, 12, 1), "action_date": datetime(2017, 12, 5)}, # Completely both
{"date_signed": datetime(2015, 7, 1), "action_date": datetime(2017, 5, 1)}, # Partially both
{"date_signed": datetime(2014, 10, 3), "action_date": datetime(2017, 4, 8)}, # All first; partial second
{"date_signed": datetime(2015, 8, 1), "action_date": datetime(2018, 1, 2)}, # Partial first; all second
]
award_id = 0
for award_category in award_category_list:
for date_range in date_range_list:
award_id += 1
guai = "AWARD_{}".format(award_id)
award_type_list = all_award_types_mappings[award_category]
award_type = award_type_list[award_id % len(award_type_list)]
recipient = mommy.make("references.LegalEntity", legal_entity_id=2000 + award_id)
award = mommy.make(
"awards.Award",
id=award_id,
generated_unique_award_id=guai,
type=award_type,
category=award_category,
latest_transaction_id=1000 + award_id,
date_signed=date_range["date_signed"],
recipient=recipient,
piid="abcdefg{}".format(award_id),
fain="xyz{}".format(award_id),
uri="abcxyx{}".format(award_id),
)
mommy.make(
"awards.TransactionNormalized", id=1000 + award_id, award=award, action_date=date_range["action_date"]
)
@pytest.mark.django_db
def test_date_range_search_with_one_range(client, awards_over_different_date_ranges, refresh_matviews):
contract_type_list = all_award_types_mappings["contracts"]
grants_type_list = all_award_types_mappings["grants"]
# Test with contracts
request_with_contracts = {
"subawards": False,
"fields": ["Award ID"],
"sort": "Award ID",
"limit": 50,
"page": 1,
"filters": {
"time_period": [{"start_date": "2015-01-01", "end_date": "2015-12-31"}],
"award_type_codes": contract_type_list,
},
}
resp = client.post(
"/api/v2/search/spending_by_award/", content_type="application/json", data=json.dumps(request_with_contracts)
)
assert resp.status_code == status.HTTP_200_OK
assert len(resp.data["results"]) == 9
# Test with grants
request_with_grants = {
"subawards": False,
"fields": ["Award ID"],
"sort": "Award ID",
"limit": 50,
"page": 1,
"filters": {
"time_period": [{"start_date": "2017-02-01", "end_date": "2017-11-30"}],
"award_type_codes": grants_type_list,
},
}
resp = client.post(
"/api/v2/search/spending_by_award/", content_type="application/json", data=json.dumps(request_with_grants)
)
assert resp.status_code == status.HTTP_200_OK
assert len(resp.data["results"]) == 8
# Test with only one specific award showing
request_for_one_award = {
"subawards": False,
"fields": ["Award ID"],
"sort": "Award ID",
"limit": 50,
"page": 1,
"filters": {
"time_period": [{"start_date": "2014-01-03", "end_date": "2014-01-08"}],
"award_type_codes": contract_type_list,
},
}
resp = client.post(
"/api/v2/search/spending_by_award/", content_type="application/json", data=json.dumps(request_for_one_award)
)
assert resp.status_code == status.HTTP_200_OK
assert len(resp.data["results"]) == 1
assert resp.data["results"] == [{"Award ID": "abcdefg1", "internal_id": 1, "generated_internal_id": "AWARD_1"}]
# Test with no award showing
request_for_no_awards = {
"subawards": False,
"fields": ["Award ID"],
"sort": "Award ID",
"limit": 50,
"page": 1,
"filters": {
"time_period": [{"start_date": "2013-01-03", "end_date": "2013-01-08"}],
"award_type_codes": grants_type_list,
},
}
resp = client.post(
"/api/v2/search/spending_by_award/", content_type="application/json", data=json.dumps(request_for_no_awards)
)
assert resp.status_code == status.HTTP_200_OK
assert len(resp.data["results"]) == 0
@pytest.mark.django_db
def test_date_range_search_with_two_ranges(client, awards_over_different_date_ranges, refresh_matviews):
contract_type_list = all_award_types_mappings["contracts"]
grants_type_list = all_award_types_mappings["grants"]
# Test with contracts
request_with_contracts = {
"subawards": False,
"fields": ["Award ID"],
"sort": "Award ID",
"limit": 50,
"page": 1,
"filters": {
"time_period": [
{"start_date": "2015-01-01", "end_date": "2015-12-31"},
{"start_date": "2017-02-01", "end_date": "2017-11-30"},
],
"award_type_codes": contract_type_list,
},
}
resp = client.post(
"/api/v2/search/spending_by_award/", content_type="application/json", data=json.dumps(request_with_contracts)
)
assert resp.status_code == status.HTTP_200_OK
assert len(resp.data["results"]) == 13
# Test with grants
request_with_grants = {
"subawards": False,
"fields": ["Award ID"],
"sort": "Award ID",
"limit": 50,
"page": 1,
"filters": {
"time_period": [
{"start_date": "2015-01-01", "end_date": "2015-12-31"},
{"start_date": "2017-02-01", "end_date": "2017-11-30"},
],
"award_type_codes": grants_type_list,
},
}
resp = client.post(
"/api/v2/search/spending_by_award/", content_type="application/json", data=json.dumps(request_with_grants)
)
assert resp.status_code == status.HTTP_200_OK
assert len(resp.data["results"]) == 13
# Test with two specific awards showing
request_for_two_awards = {
"subawards": False,
"fields": ["Award ID"],
"sort": "Award ID",
"limit": 50,
"page": 1,
"filters": {
"time_period": [
{"start_date": "2014-01-03", "end_date": "2014-01-08"},
{"start_date": "2018-06-01", "end_date": "2018-06-23"},
],
"award_type_codes": grants_type_list,
},
}
resp = client.post(
"/api/v2/search/spending_by_award/", content_type="application/json", data=json.dumps(request_for_two_awards)
)
assert resp.status_code == status.HTTP_200_OK
assert len(resp.data["results"]) == 2
assert resp.data["results"] == [
{"Award ID": "xyz44", "internal_id": 44, "generated_internal_id": "AWARD_44"},
{"Award ID": "xyz33", "internal_id": 33, "generated_internal_id": "AWARD_33"},
]
# Test with no award showing
request_for_no_awards = {
"subawards": False,
"fields": ["Award ID"],
"sort": "Award ID",
"limit": 50,
"page": 1,
"filters": {
"time_period": [
{"start_date": "2013-01-03", "end_date": "2013-01-08"},
{"start_date": "2019-06-01", "end_date": "2019-06-23"},
],
"award_type_codes": grants_type_list,
},
}
resp = client.post(
"/api/v2/search/spending_by_award/", content_type="application/json", data=json.dumps(request_for_no_awards)
)
assert resp.status_code == status.HTTP_200_OK
assert len(resp.data["results"]) == 0
| StarcoderdataPython |
12824987 | import json
import os
import re
import shutil
import sys
import time
winrm = True
ssh = False
keep_input_artifact = True
vmx_data_post = False
compression_level = 0
chocolatey = False
add_debugging = True
set_packer_debug = False
add_debug_log = True
add_unzip_vbs = False
add_shell_command = False
add_ssh_uninstaller = False
tools_upload_flavor = False
default_cm = 'nocm'
attach_provisions_iso = False
attach_windows_iso = True
attach_vboxguestadditions_iso = True
attach_shared_folder = False
if add_ssh_uninstaller:
add_debugging = False
add_debug_log = False
vmx_data_post = False
def touch(filename, mtime):
with open(filename, 'a+'):
pass
os.utime(filename, (mtime, mtime))
return 0
def touch_by_file(filename, touch_filename):
touch(filename, os.path.getmtime(touch_filename))
if len(sys.argv) < 2:
sys.exit('Usage: ' + sys.argv[0] + ' filename.json')
if len(sys.argv) >= 3:
winrm = True
vmx_data_post = True
json_file_path = sys.argv[1]
orig = json_file_path + '.orig'
print('Updating ' + json_file_path)
if not os.path.isfile(orig):
mtime = os.path.getmtime(json_file_path)
shutil.copyfile(json_file_path, orig)
touch(orig, mtime)
json_file = open(orig, 'rb')
json_data = json.load(json_file)
debug_cmd = 'floppy/zzz-debug-log.cmd'
save_logs_cmd = 'script/save-logs.cmd'
unzip_vbs = 'floppy/unzip.vbs'
wget_exe = '.windows/wget.exe'
download_cmd = 'floppy/_download.cmd'
packer_config_cmd = 'floppy/_packer_config.cmd'
packer_config_local_cmd = 'floppy/_packer_config_local.cmd'
shutdown_seconds = '10'
timeout_seconds = '10000'
if winrm:
winrm_suffix = '_winrm'
else:
winrm_suffix = ''
shutdown_comment = 'Packer Shutdown'
shutdown_command = 'shutdown /s /t %s /f /d p:4:1 /c "%s"' % (shutdown_seconds, shutdown_comment)
cwd = os.getcwd()
provisions_iso = cwd + '/.windows/provisions/provisions.iso'
windows_iso = 'C:/Program Files (x86)/VMware/VMware Workstation/windows.iso'
vboxguestadditions_iso = "C:/Progra~1/Oracle/VirtualBox/VBoxGuestAdditions.iso"
for i, a in enumerate(json_data['builders']):
if re.search('^(vmware|virtualbox)\-', a['type']):
del a['keep_failed_build']
#a['output_directory'] = 'output-%s_%s%s' % (a['type'], a['vm_name'], winrm_suffix)
#a['ssh_wait_timeout'] = timeout_seconds + 's'
#a['shutdown_timeout'] = timeout_seconds + 's'
#a['shutdown_command'] = shutdown_command
if add_ssh_uninstaller:
del a['shutdown_timeout']
#del a['shutdown_command']
#a['shutdown_command'] = 'choice /C Y /N /T %s /D Y /M "Waiting %s seconds"' % (timeout_seconds, timeout_seconds)
#a['http_directory'] = 'floppy'
floppy_files = dict.fromkeys(a['floppy_files'], True)
if add_debug_log:
if os.path.exists(debug_cmd):
floppy_files[debug_cmd] = True
if os.path.exists(download_cmd):
floppy_files[download_cmd] = True
if os.path.exists(packer_config_cmd):
floppy_files[packer_config_cmd] = True
if os.path.exists(packer_config_local_cmd):
floppy_files[packer_config_local_cmd] = True
if os.path.exists(wget_exe):
floppy_files[wget_exe] = True
if add_unzip_vbs:
if os.path.exists(unzip_vbs):
floppy_files[unzip_vbs] = True
if not ssh:
if 'floppy/cygwin.bat' in floppy_files:
del floppy_files['floppy/cygwin.bat']
if 'floppy/openssh.bat' in floppy_files:
del floppy_files['floppy/openssh.bat']
a['floppy_files'] = sorted(floppy_files)
if re.search('^vmware\-', a['type']):
# to turn off to see if Cygwin is failing because of this
if winrm or add_ssh_uninstaller:
# buggy with winrm
a['tools_upload_flavor'] = ''
# a['disk_type_id'] = "0"
# a['skip_compaction'] = compression_level == 0
if winrm:
a['communicator'] = 'winrm'
a['winrm_username'] = 'vagrant'
a['winrm_password'] = '<PASSWORD>'
a['winrm_timeout'] = timeout_seconds + 's'
if not tools_upload_flavor:
a['tools_upload_flavor'] = ''
if not 'vmx_data' in a:
a['vmx_data'] = {}
if attach_shared_folder:
a['vmx_data']['sharedFolder.maxNum'] = '1'
a['vmx_data']['sharedFolder0.enabled'] = 'TRUE'
a['vmx_data']['sharedFolder0.expiration'] = 'never'
a['vmx_data']['sharedFolder0.guestName'] = 'C'
a['vmx_data']['sharedFolder0.hostPath'] = 'C:\\'
a['vmx_data']['sharedFolder0.present'] = 'TRUE'
a['vmx_data']['sharedFolder0.readAccess'] = 'TRUE'
a['vmx_data']['sharedFolder0.writeAccess'] = 'TRUE'
a['vmx_data']['hgfs.maprootshare'] = 'TRUE'
a['vmx_data']['sound.autodetect'] = 'TRUE'
a['vmx_data']['sound.filename'] = '-1'
#a['vmx_data']['sound.pciSlotNumber'] = '32'
a['vmx_data']['sound.present'] = 'TRUE'
a['vmx_data']['sound.startconnected'] = 'TRUE'
a['vmx_data']['sound.virtualdev'] = 'hdaudio'
# a['vmx_data']['virtualhw.version'] = '10'
if attach_provisions_iso:
if os.path.exists(provisions_iso):
a['vmx_data']['ide1:1.deviceType'] = 'cdrom-image'
a['vmx_data']['ide1:1.fileName'] = provisions_iso
a['vmx_data']['ide1:1.present'] = 'TRUE'
a['vmx_data']['ide1:1.startConnected'] = 'TRUE'
if attach_windows_iso:
if os.path.exists(windows_iso):
a['vmx_data']['scsi0:1.present'] = 'TRUE'
a['vmx_data']['scsi0:1.deviceType'] = 'cdrom-image'
a['vmx_data']['scsi0:1.fileName'] = '{{ user `vmware_windows_iso` }}'
if vmx_data_post:
if not 'vmx_data_post' in a:
a['vmx_data_post'] = {}
a['vmx_data_post']['ethernet0.virtualDev'] = 'vmxnet3'
a['vmx_data_post']['RemoteDisplay.vnc.enabled'] = 'false'
a['vmx_data_post']['RemoteDisplay.vnc.port'] = '5900'
a['vmx_data_post']['scsi0.virtualDev'] = 'lsilogic'
if re.search('^virtualbox\-', a['type']):
if not 'vboxmanage' in a:
a['vboxmanage'] = []
if attach_provisions_iso:
if os.path.exists(provisions_iso):
a['vboxmanage'].append([
"storageattach",
"{{.Name}}",
"--storagectl",
"IDE Controller",
"--port",
"1",
"--device",
"1",
"--type",
"dvddrive",
"--medium",
provisions_iso
])
if attach_vboxguestadditions_iso:
if os.path.exists(vboxguestadditions_iso):
# a['guest_additions_url'] = vboxguestadditions_iso
a['vboxmanage'].append([
"storageattach",
"{{.Name}}",
"--storagectl",
"SATA",
"--port",
"1",
"--device",
"0",
"--type",
"dvddrive",
"--medium",
vboxguestadditions_iso
])
# builders: modify iso properties
a['iso_checksum'] = '{{ user `iso_checksum` }}'
a['iso_checksum_type'] = '{{ user `iso_checksum_type` }}'
a['iso_url'] = '{{ user `iso_url` }}/{{ user `iso_name` }}'
for i in json_data['post-processors']:
if i['type'] == 'vagrant':
i['keep_input_artifact'] = keep_input_artifact
i['compression_level'] = compression_level
#if winrm:
# i['output'] = 'winrm-' + i['output']
#if compression_level == 0:
# i['only'] = 'force-vagrant'
#else:
del i['only']
packer_debug_env = 'PACKER_DEBUG=1'
if add_shell_command:
env_vars = [
"CM={{user `cm`}}",
"CM_VERSION={{user `cm_version`}}",
]
if set_packer_debug:
env_vars.append(packer_debug_env)
debug_step = {
"environment_vars": env_vars,
"script": debug_cmd,
"type": "shell",
}
json_data['provisioners'].insert(0, debug_step)
for i, a in enumerate(json_data['provisioners']):
if a['type'] != 'windows-shell':
continue
if winrm:
# use winrm defaults
if 'remote_path' in a:
del a['remote_path']
if 'execute_command' in a:
del a['execute_command']
#a['guest_os_type'] = 'windows'
if 'inline' in a:
if winrm or add_ssh_uninstaller:
if re.search('^rm ', a['inline'][0]):
del json_data['provisioners'][i]
continue
#if winrm:
#a['binary'] = 'true'
if 'script' in a:
continue
if not 'scripts' in a:
continue
#if 'execute_command' in a:
# a['execute_command'] = re.sub(' /c ', ' /q /c ', a['execute_command'])
if set_packer_debug:
if 'environment_vars' in a:
packer_debug = False
for j in a['environment_vars']:
if j == packer_debug_env:
packer_debug = True
break
if not packer_debug:
a['environment_vars'].append(packer_debug_env)
scripts = []
if add_debugging:
if os.path.exists('script/dump-logs.cmd'):
scripts.append('script/dump-logs.cmd')
# don't need any more:
#scripts.append('script/01-install-handle.cmd')
for j in a['scripts']:
if j == 'script/clean.bat':
if add_debugging:
scripts.append('script/save-logs.cmd')
scripts.append('script/save-temp-dirs.cmd')
if chocolatey:
scripts.append('script/nuget.cmd')
#scripts.append('script/reboot.cmd')
scripts.append('script/chocolatey.cmd')
if compression_level == 0:
if j == 'script/clean.bat':
continue
if j == "script/ultradefrag.bat":
continue
if j == "script/uninstall-7zip.bat":
continue
if j == "script/sdelete.bat":
continue
#if not add_ssh_uninstaller:
scripts.append(j)
if add_debug_log:
scripts.append(debug_cmd)
if add_ssh_uninstaller:
if re.search('cygwin', json_file_path):
scripts.append('script/uninstall-cygwin.cmd')
else:
scripts.append('script/uninstall-openssh.cmd')
a['scripts'] = scripts
if 'variables' in json_data:
json_data['variables']['cm'] = default_cm
json_data['variables']['shutdown_command'] = shutdown_command
json_data['variables']['vmware_windows_iso'] = windows_iso
#json_data['variables']['iso_checksum_type'] = 'sha1'
#json_data['variables']['iso_name'] = json_data['variables']['iso_url']
#json_data['variables']['iso_url'] = 'iso'
new_data = json_data
mtime = os.path.getmtime(json_file_path)
new_data = json.dumps(new_data, sort_keys=True, indent=2, separators=(',', ': '))
json_file.close()
json_file = open(json_file_path, 'w')
json_file.write(new_data)
json_file.close()
touch(json_file_path, mtime)
| StarcoderdataPython |
11381037 | """Tests for fooof.core.strings."""
from fooof.core.strings import *
from fooof.core.strings import _format, _no_model_str
###################################################################################################
###################################################################################################
def test_gen_width_warning_str():
assert gen_width_warning_str(0.5, 0.5)
def test_gen_version_str():
assert gen_version_str()
def test_gen_settings_str(tfm):
assert gen_settings_str(tfm)
def test_gen_freq_range_str(tfm):
assert gen_freq_range_str(tfm)
def test_gen_methods_report_str():
assert gen_methods_report_str()
def test_gen_methods_text_str(tfm):
# Test with and without passing in a FOOOF object
assert gen_methods_text_str()
assert gen_methods_text_str(tfm)
def test_gen_results_fm_str(tfm):
assert gen_results_fm_str(tfm)
def test_gen_results_fg_str(tfg):
assert gen_results_fg_str(tfg)
def test_gen_issue_str():
assert gen_issue_str()
def test_no_model_str():
assert _no_model_str()
def test_format():
str_lst = ['=', '', 'a', '', 'b', '', '=']
str_out_1 = _format(str_lst, False)
assert str_out_1
assert str_out_1.count('\n') == 6
str_out_2 = _format(str_lst, True)
assert str_out_2
assert str_out_2.count('\n') == 3
| StarcoderdataPython |
3367913 | <filename>donation/migrations/0003_auto_20161021_0002.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('donation', '0002_transitionaldonationsfilefromdrupal'),
]
operations = [
migrations.RenameModel(
old_name='TransitionalDonationsFileFromDrupal',
new_name='TransitionalDonationsFile',
),
]
| StarcoderdataPython |
4908573 | # coding:utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.auth.signers.signer import Signer
class AccessKeySigner(Signer):
def __init__(self, access_key_credential):
self._credential = access_key_credential
def sign(self, region_id, request):
cred = self._credential
header = request.get_signed_header(region_id, cred.access_key_id, cred.access_key_secret)
url = request.get_url(region_id, cred.access_key_id, cred.access_key_secret)
return header, url
| StarcoderdataPython |
11226647 | # -*- coding: utf-8 -*-
"""
randomforest_classifier.py: implements the random forest classifier, it's
fitting and prediction
@author: <NAME>
"""
# IMPORTS
from sklearn import ensemble as ensemble
# FUNCTIONS
def RandomForestClassifier(max_depth, random_state, n_estimators):
"""
Returns the Random Forest Classifier from scikit-learn
Parameters
----------
max_depth: int
maximal depth of each tree
random_state: int
random number to unsure reproducability
n_estimators: int
number of estimators
Examples
--------
>>> base_model = RandomForestClassifier(2, 245, 100)
Returns
-------
sklearn.RandomForestClassifier object
Random Forest model
"""
rf = ensemble.RandomForestClassifier(
max_depth=max_depth,
random_state=random_state,
n_estimators=n_estimators)
return rf
def fitModel(model, x_train, y_train):
"""
Implements scikit-learn's fit() for Random Forest Classifiers
Parameters
----------
model: sklearn.RandomForestClassifier object
Random Forest model to be fitted
x_train: list
list of training values
y_train: list
list of training labels
Examples
--------
>>> model = RandomForestClassifier(2, 245, 50)
>>> fitModel(model, x_train, y_train)
Returns
-------
Nothing
"""
model.fit(x_train, y_train)
def predictModel(model, x_test):
"""
Implements scikit-learn's predict() for Random Forest Classifiers
Parameters
----------
model: sklearn.RandomForestClassifier object
fitted Random Forest model
x_test: list
list of test values
Examples
--------
>>> pred = predictModel(base_model, x_test)
Returns
-------
list
list of predicted labels
"""
return model.predict(x_test)
| StarcoderdataPython |
4900088 | # -*- coding: utf-8 -*-
from torch import nn
from torchvision import models
class CNNNet(nn.Module):
def __init__(self, out_dim, **kwargs):
super(CNNNet, self).__init__()
self.model = models.resnet18(pretrained=False)
self.model.fc = nn.Sequential(
# nn.Linear(2048, 2048),
# nn.ReLU(),
# nn.Dropout(0.5),
nn.Linear(512, out_dim))
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.model(x)
if not self.training:
out = self.sigmoid(out)
return out | StarcoderdataPython |
41011 | <filename>tests/test_nafigator.py<gh_stars>1-10
#!/usr/bin/env python
"""Tests for `nafigator` package."""
import unittest
unittest.TestLoader.sortTestMethodsUsing = None
from deepdiff import DeepDiff
from click.testing import CliRunner
from nafigator import NafDocument, parse2naf
from os.path import join
class TestNafigator_pdf(unittest.TestCase):
"""Tests for `nafigator` package."""
def test_1_pdf_generate_naf(self):
""" """
tree = parse2naf.generate_naf(
input=join("tests", "tests", "example.pdf"),
engine="stanza",
language="en",
naf_version="v3.1",
dtd_validation=False,
params={},
nlp=None,
)
assert tree.write(join("tests", "tests", "example.naf.xml")) == None
def test_1_split_pre_linguistic(self):
""" """
# only save the preprocess steps
tree = parse2naf.generate_naf(
input=join("tests", "tests", "example.pdf"),
engine="stanza",
language="en",
naf_version="v3.1",
dtd_validation=False,
params={'linguistic_layers': []},
nlp=None,
)
tree.write(join("tests", "tests", "example_preprocess.naf.xml")) == None
# start with saved document and process linguistic steps
naf = NafDocument().open(join("tests", "tests", "example_preprocess.naf.xml"))
tree = parse2naf.generate_naf(
input=naf,
engine="stanza",
language="en",
naf_version="v3.1",
params = {'preprocess_layers': []}
)
doc = NafDocument().open(join("tests", "tests", "example.naf.xml"))
assert tree.raw == doc.raw
def test_2_pdf_header_filedesc(self):
""" """
naf = NafDocument().open(join("tests", "tests", "example.naf.xml"))
actual = naf.header["fileDesc"]
expected = {
"filename": "tests\\tests\\example.pdf",
"filetype": "application/pdf",
}
assert actual["filename"] == expected["filename"]
assert actual["filetype"] == expected["filetype"]
def test_3_pdf_header_public(self):
""" """
naf = NafDocument().open(join("tests", "tests", "example.naf.xml"))
actual = naf.header["public"]
expected = {
"{http://purl.org/dc/elements/1.1/}uri": "tests\\tests\\example.pdf",
"{http://purl.org/dc/elements/1.1/}format": "application/pdf",
}
assert actual == expected, (
"expected: " + str(expected) + ", actual: " + str(actual)
)
# def test_4_pdf_header_linguistic_processors(self):
# naf = NafDocument().open(join("tests", "tests", "example.naf.xml"))
# actual = naf.header['linguisticProcessors']
# expected = [{'layer': 'pdftoxml', 'lps':
# [{'name': 'pdfminer-pdf2xml',
# 'version': 'pdfminer_version-20200124',
# 'beginTimestamp': '2021-05-05T13:25:16UTC',
# 'endTimestamp': '2021-05-05T13:25:16UTC'}]},
# {'layer': 'pdftotext', 'lps':
# [{'name': 'pdfminer-pdf2text',
# 'version': 'pdfminer_version-20200124',
# 'beginTimestamp': '2021-05-05T13:25:16UTC',
# 'endTimestamp': '2021-05-05T13:25:16UTC'}]},
# {'layer': 'formats', 'lps':
# [{'name': 'stanza-model_en',
# 'version': 'stanza_version-1.2',
# 'beginTimestamp': '2021-05-05T13:25:18UTC',
# 'endTimestamp': '2021-05-05T13:25:18UTC'}]},
# {'layer': 'entities', 'lps':
# [{'name': 'stanza-model_en',
# 'version': 'stanza_version-1.2',
# 'beginTimestamp': '2021-05-05T13:25:18UTC',
# 'endTimestamp': '2021-05-05T13:25:18UTC'}]},
# {'layer': 'text', 'lps':
# [{'name': 'stanza-model_en',
# 'version': 'stanza_version-1.2',
# 'beginTimestamp': '2021-05-05T13:25:18UTC',
# 'endTimestamp': '2021-05-05T13:25:18UTC'}]},
# {'layer': 'terms', 'lps':
# [{'name': 'stanza-model_en',
# 'version': 'stanza_version-1.2',
# 'beginTimestamp': '2021-05-05T13:25:18UTC',
# 'endTimestamp': '2021-05-05T13:25:18UTC'}]},
# {'layer': 'deps', 'lps':
# [{'name': 'stanza-model_en',
# 'version': 'stanza_version-1.2',
# 'beginTimestamp': '2021-05-05T13:25:18UTC',
# 'endTimestamp': '2021-05-05T13:25:18UTC'}]},
# {'layer': 'multiwords', 'lps':
# [{'name': 'stanza-model_en',
# 'version': 'stanza_version-1.2',
# 'beginTimestamp': '2021-05-05T13:25:18UTC',
# 'endTimestamp': '2021-05-05T13:25:18UTC'}]},
# {'layer': 'raw', 'lps':
# [{'name': 'stanza-model_en',
# 'version': 'stanza_version-1.2',
# 'beginTimestamp': '2021-05-05T13:25:18UTC',
# 'endTimestamp': '2021-05-05T13:25:18UTC'}]}]
# assert actual == expected, "expected: "+str(expected)+", actual: "+str(actual)
def test_5_pdf_formats(self):
naf = NafDocument().open(join("tests", "tests", "example.naf.xml"))
actual = naf.formats
expected = [
{
"length": "268",
"offset": "0",
"textboxes": [
{
"textlines": [
{
"texts": [
{
"font": "CIDFont+F1",
"size": "12.000",
"length": "87",
"offset": "0",
"text": "The Nafigator package allows you to store NLP output from custom made spaCy and stanza ",
}
]
},
{
"texts": [
{
"font": "CIDFont+F1",
"size": "12.000",
"length": "77",
"offset": "88",
"text": "pipelines with (intermediate) results and all processing steps in one format.",
}
]
},
]
},
{
"textlines": [
{
"texts": [
{
"font": "CIDFont+F1",
"size": "12.000",
"length": "86",
"offset": "167",
"text": "Multiwords like in “we have set that out below” are recognized (depending on your NLP ",
}
]
},
{
"texts": [
{
"font": "CIDFont+F1",
"size": "12.000",
"length": "11",
"offset": "254",
"text": "processor).",
}
]
},
]
},
],
"figures": [],
"headers": [],
}
]
assert actual == expected, (
"expected: " + str(expected) + ", actual: " + str(actual)
)
def test_6_pdf_entities(self):
naf = NafDocument().open(join("tests", "tests", "example.naf.xml"))
actual = naf.entities
expected = [
{
"id": "e1",
"type": "PRODUCT",
"text": "Nafigator",
"span": [{"id": "t2"}],
},
{"id": "e2", "type": "CARDINAL", "text": "one", "span": [{"id": "t28"}]},
]
assert actual == expected, (
"expected: " + str(expected) + ", actual: " + str(actual)
)
def test_7_pdf_text(self):
naf = NafDocument().open(join("tests", "tests", "example.naf.xml"))
actual = naf.text
expected = [
{
"text": "The",
"page": "1",
"para": "1",
"sent": "1",
"id": "w1",
"length": "3",
"offset": "0",
},
{
"text": "Nafigator",
"page": "1",
"para": "1",
"sent": "1",
"id": "w2",
"length": "9",
"offset": "4",
},
{
"text": "package",
"page": "1",
"para": "1",
"sent": "1",
"id": "w3",
"length": "7",
"offset": "14",
},
{
"text": "allows",
"page": "1",
"para": "1",
"sent": "1",
"id": "w4",
"length": "6",
"offset": "22",
},
{
"text": "you",
"page": "1",
"para": "1",
"sent": "1",
"id": "w5",
"length": "3",
"offset": "29",
},
{
"text": "to",
"page": "1",
"para": "1",
"sent": "1",
"id": "w6",
"length": "2",
"offset": "33",
},
{
"text": "store",
"page": "1",
"para": "1",
"sent": "1",
"id": "w7",
"length": "5",
"offset": "36",
},
{
"text": "NLP",
"page": "1",
"para": "1",
"sent": "1",
"id": "w8",
"length": "3",
"offset": "42",
},
{
"text": "output",
"page": "1",
"para": "1",
"sent": "1",
"id": "w9",
"length": "6",
"offset": "46",
},
{
"text": "from",
"page": "1",
"para": "1",
"sent": "1",
"id": "w10",
"length": "4",
"offset": "53",
},
{
"text": "custom",
"page": "1",
"para": "1",
"sent": "1",
"id": "w11",
"length": "6",
"offset": "58",
},
{
"text": "made",
"page": "1",
"para": "1",
"sent": "1",
"id": "w12",
"length": "4",
"offset": "65",
},
{
"text": "spa",
"page": "1",
"para": "1",
"sent": "1",
"id": "w13",
"length": "3",
"offset": "70",
},
{
"text": "Cy",
"page": "1",
"para": "1",
"sent": "2",
"id": "w14",
"length": "2",
"offset": "73",
},
{
"text": "and",
"page": "1",
"para": "1",
"sent": "2",
"id": "w15",
"length": "3",
"offset": "76",
},
{
"text": "stanza",
"page": "1",
"para": "1",
"sent": "2",
"id": "w16",
"length": "6",
"offset": "80",
},
{
"text": "pipelines",
"page": "1",
"para": "1",
"sent": "2",
"id": "w17",
"length": "9",
"offset": "88",
},
{
"text": "with",
"page": "1",
"para": "1",
"sent": "2",
"id": "w18",
"length": "4",
"offset": "98",
},
{
"text": "(",
"page": "1",
"para": "1",
"sent": "2",
"id": "w19",
"length": "1",
"offset": "103",
},
{
"text": "intermediate",
"page": "1",
"para": "1",
"sent": "2",
"id": "w20",
"length": "12",
"offset": "104",
},
{
"text": ")",
"page": "1",
"para": "1",
"sent": "2",
"id": "w21",
"length": "1",
"offset": "116",
},
{
"text": "results",
"page": "1",
"para": "1",
"sent": "2",
"id": "w22",
"length": "7",
"offset": "118",
},
{
"text": "and",
"page": "1",
"para": "1",
"sent": "2",
"id": "w23",
"length": "3",
"offset": "126",
},
{
"text": "all",
"page": "1",
"para": "1",
"sent": "2",
"id": "w24",
"length": "3",
"offset": "130",
},
{
"text": "processing",
"page": "1",
"para": "1",
"sent": "2",
"id": "w25",
"length": "10",
"offset": "134",
},
{
"text": "steps",
"page": "1",
"para": "1",
"sent": "2",
"id": "w26",
"length": "5",
"offset": "145",
},
{
"text": "in",
"page": "1",
"para": "1",
"sent": "2",
"id": "w27",
"length": "2",
"offset": "151",
},
{
"text": "one",
"page": "1",
"para": "1",
"sent": "2",
"id": "w28",
"length": "3",
"offset": "154",
},
{
"text": "format",
"page": "1",
"para": "1",
"sent": "2",
"id": "w29",
"length": "6",
"offset": "158",
},
{
"text": ".",
"page": "1",
"para": "1",
"sent": "2",
"id": "w30",
"length": "1",
"offset": "164",
},
{
"text": "Multiwords",
"page": "1",
"para": "2",
"sent": "3",
"id": "w31",
"length": "10",
"offset": "167",
},
{
"text": "like",
"page": "1",
"para": "2",
"sent": "3",
"id": "w32",
"length": "4",
"offset": "178",
},
{
"text": "in",
"page": "1",
"para": "2",
"sent": "3",
"id": "w33",
"length": "2",
"offset": "183",
},
{
"text": "“",
"page": "1",
"para": "2",
"sent": "3",
"id": "w34",
"length": "1",
"offset": "186",
},
{
"text": "we",
"page": "1",
"para": "2",
"sent": "3",
"id": "w35",
"length": "2",
"offset": "187",
},
{
"text": "have",
"page": "1",
"para": "2",
"sent": "3",
"id": "w36",
"length": "4",
"offset": "190",
},
{
"text": "set",
"page": "1",
"para": "2",
"sent": "3",
"id": "w37",
"length": "3",
"offset": "195",
},
{
"text": "that",
"page": "1",
"para": "2",
"sent": "3",
"id": "w38",
"length": "4",
"offset": "199",
},
{
"text": "out",
"page": "1",
"para": "2",
"sent": "3",
"id": "w39",
"length": "3",
"offset": "204",
},
{
"text": "below",
"page": "1",
"para": "2",
"sent": "3",
"id": "w40",
"length": "5",
"offset": "208",
},
{
"text": "”",
"page": "1",
"para": "2",
"sent": "3",
"id": "w41",
"length": "1",
"offset": "213",
},
{
"text": "are",
"page": "1",
"para": "2",
"sent": "3",
"id": "w42",
"length": "3",
"offset": "215",
},
{
"text": "recognized",
"page": "1",
"para": "2",
"sent": "3",
"id": "w43",
"length": "10",
"offset": "219",
},
{
"text": "(",
"page": "1",
"para": "2",
"sent": "3",
"id": "w44",
"length": "1",
"offset": "230",
},
{
"text": "depending",
"page": "1",
"para": "2",
"sent": "3",
"id": "w45",
"length": "9",
"offset": "231",
},
{
"text": "on",
"page": "1",
"para": "2",
"sent": "3",
"id": "w46",
"length": "2",
"offset": "241",
},
{
"text": "your",
"page": "1",
"para": "2",
"sent": "3",
"id": "w47",
"length": "4",
"offset": "244",
},
{
"text": "NLP",
"page": "1",
"para": "2",
"sent": "3",
"id": "w48",
"length": "3",
"offset": "249",
},
{
"text": "processor",
"page": "1",
"para": "2",
"sent": "3",
"id": "w49",
"length": "9",
"offset": "254",
},
{
"text": ")",
"page": "1",
"para": "2",
"sent": "3",
"id": "w50",
"length": "1",
"offset": "263",
},
{
"text": ".",
"page": "1",
"para": "2",
"sent": "3",
"id": "w51",
"length": "1",
"offset": "264",
},
]
diff = DeepDiff(actual, expected)
assert diff == dict(), diff
def test_8_pdf_terms(self):
naf = NafDocument().open(join("tests", "tests", "example.naf.xml"))
actual = naf.terms
expected = [
{
"id": "t1",
"lemma": "the",
"pos": "DET",
"type": "open",
"morphofeat": "Definite=Def|PronType=Art",
"span": [{"id": "w1"}],
},
{
"id": "t2",
"lemma": "Nafigator",
"pos": "PROPN",
"type": "open",
"morphofeat": "Number=Sing",
"span": [{"id": "w2"}],
},
{
"id": "t3",
"lemma": "package",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Sing",
"span": [{"id": "w3"}],
},
{
"id": "t4",
"lemma": "allow",
"pos": "VERB",
"type": "open",
"morphofeat": "Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin",
"span": [{"id": "w4"}],
},
{
"id": "t5",
"lemma": "you",
"pos": "PRON",
"type": "open",
"morphofeat": "Case=Acc|Person=2|PronType=Prs",
"span": [{"id": "w5"}],
},
{
"id": "t6",
"lemma": "to",
"pos": "PART",
"type": "open",
"span": [{"id": "w6"}],
},
{
"id": "t7",
"lemma": "store",
"pos": "VERB",
"type": "open",
"morphofeat": "VerbForm=Inf",
"span": [{"id": "w7"}],
},
{
"id": "t8",
"lemma": "nlp",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Sing",
"span": [{"id": "w8"}],
},
{
"id": "t9",
"lemma": "output",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Sing",
"span": [{"id": "w9"}],
},
{
"id": "t10",
"lemma": "from",
"pos": "ADP",
"type": "open",
"span": [{"id": "w10"}],
},
{
"id": "t11",
"lemma": "custom",
"pos": "ADJ",
"type": "open",
"morphofeat": "Degree=Pos",
"span": [{"id": "w11"}],
},
{
"id": "t12",
"lemma": "make",
"pos": "VERB",
"type": "open",
"morphofeat": "Tense=Past|VerbForm=Part",
"span": [{"id": "w12"}],
},
{
"id": "t13",
"lemma": "spa",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Sing",
"span": [{"id": "w13"}],
},
{
"id": "t14",
"lemma": "cy",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Sing",
"span": [{"id": "w14"}],
},
{
"id": "t15",
"lemma": "and",
"pos": "CCONJ",
"type": "open",
"span": [{"id": "w15"}],
},
{
"id": "t16",
"lemma": "stanza",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Sing",
"span": [{"id": "w16"}],
},
{
"id": "t17",
"lemma": "pipeline",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Plur",
"span": [{"id": "w17"}],
},
{
"id": "t18",
"lemma": "with",
"pos": "ADP",
"type": "open",
"span": [{"id": "w18"}],
},
{
"id": "t19",
"lemma": "(",
"pos": "PUNCT",
"type": "open",
"span": [{"id": "w19"}],
},
{
"id": "t20",
"lemma": "intermediate",
"pos": "ADJ",
"type": "open",
"morphofeat": "Degree=Pos",
"span": [{"id": "w20"}],
},
{
"id": "t21",
"lemma": ")",
"pos": "PUNCT",
"type": "open",
"span": [{"id": "w21"}],
},
{
"id": "t22",
"lemma": "result",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Plur",
"span": [{"id": "w22"}],
},
{
"id": "t23",
"lemma": "and",
"pos": "CCONJ",
"type": "open",
"span": [{"id": "w23"}],
},
{
"id": "t24",
"lemma": "all",
"pos": "DET",
"type": "open",
"span": [{"id": "w24"}],
},
{
"id": "t25",
"lemma": "processing",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Sing",
"span": [{"id": "w25"}],
},
{
"id": "t26",
"lemma": "step",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Plur",
"span": [{"id": "w26"}],
},
{
"id": "t27",
"lemma": "in",
"pos": "ADP",
"type": "open",
"span": [{"id": "w27"}],
},
{
"id": "t28",
"lemma": "one",
"pos": "NUM",
"type": "open",
"morphofeat": "NumType=Card",
"span": [{"id": "w28"}],
},
{
"id": "t29",
"lemma": "format",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Sing",
"span": [{"id": "w29"}],
},
{
"id": "t30",
"lemma": ".",
"pos": "PUNCT",
"type": "open",
"span": [{"id": "w30"}],
},
{
"id": "t31",
"lemma": "multiword",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Plur",
"span": [{"id": "w31"}],
},
{
"id": "t32",
"lemma": "like",
"pos": "ADP",
"type": "open",
"span": [{"id": "w32"}],
},
{
"id": "t33",
"lemma": "in",
"pos": "ADP",
"type": "open",
"span": [{"id": "w33"}],
},
{
"id": "t34",
"lemma": '"',
"pos": "PUNCT",
"type": "open",
"span": [{"id": "w34"}],
},
{
"id": "t35",
"lemma": "we",
"pos": "PRON",
"type": "open",
"morphofeat": "Case=Nom|Number=Plur|Person=1|PronType=Prs",
"span": [{"id": "w35"}],
},
{
"id": "t36",
"lemma": "have",
"pos": "AUX",
"type": "open",
"morphofeat": "Mood=Ind|Tense=Pres|VerbForm=Fin",
"span": [{"id": "w36"}],
},
{
"id": "t37",
"lemma": "set",
"pos": "VERB",
"type": "open",
"morphofeat": "Tense=Past|VerbForm=Part",
"component_of": "mw1",
"span": [{"id": "w37"}],
},
{
"id": "t38",
"lemma": "that",
"pos": "SCONJ",
"type": "open",
"span": [{"id": "w38"}],
},
{
"id": "t39",
"lemma": "out",
"pos": "ADP",
"type": "open",
"component_of": "mw1",
"span": [{"id": "w39"}],
},
{
"id": "t40",
"lemma": "below",
"pos": "ADV",
"type": "open",
"span": [{"id": "w40"}],
},
{
"id": "t41",
"lemma": '"',
"pos": "PUNCT",
"type": "open",
"span": [{"id": "w41"}],
},
{
"id": "t42",
"lemma": "be",
"pos": "AUX",
"type": "open",
"morphofeat": "Mood=Ind|Tense=Pres|VerbForm=Fin",
"span": [{"id": "w42"}],
},
{
"id": "t43",
"lemma": "recognize",
"pos": "VERB",
"type": "open",
"morphofeat": "Tense=Past|VerbForm=Part|Voice=Pass",
"span": [{"id": "w43"}],
},
{
"id": "t44",
"lemma": "(",
"pos": "PUNCT",
"type": "open",
"span": [{"id": "w44"}],
},
{
"id": "t45",
"lemma": "depend",
"pos": "VERB",
"type": "open",
"morphofeat": "VerbForm=Ger",
"span": [{"id": "w45"}],
},
{
"id": "t46",
"lemma": "on",
"pos": "ADP",
"type": "open",
"span": [{"id": "w46"}],
},
{
"id": "t47",
"lemma": "you",
"pos": "PRON",
"type": "open",
"morphofeat": "Person=2|Poss=Yes|PronType=Prs",
"span": [{"id": "w47"}],
},
{
"id": "t48",
"lemma": "nlp",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Sing",
"span": [{"id": "w48"}],
},
{
"id": "t49",
"lemma": "processor",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Sing",
"span": [{"id": "w49"}],
},
{
"id": "t50",
"lemma": ")",
"pos": "PUNCT",
"type": "open",
"span": [{"id": "w50"}],
},
{
"id": "t51",
"lemma": ".",
"pos": "PUNCT",
"type": "open",
"span": [{"id": "w51"}],
},
]
assert actual == expected, (
"expected: " + str(expected) + ", actual: " + str(actual)
)
def test_9_pdf_dependencies(self):
naf = NafDocument().open(join("tests", "tests", "example.naf.xml"))
actual = naf.deps
expected = [
{"from_term": "t3", "to_term": "t1", "rfunc": "det"},
{"from_term": "t4", "to_term": "t3", "rfunc": "nsubj"},
{"from_term": "t3", "to_term": "t2", "rfunc": "compound"},
{"from_term": "t4", "to_term": "t5", "rfunc": "obj"},
{"from_term": "t7", "to_term": "t6", "rfunc": "mark"},
{"from_term": "t4", "to_term": "t7", "rfunc": "xcomp"},
{"from_term": "t9", "to_term": "t8", "rfunc": "compound"},
{"from_term": "t7", "to_term": "t9", "rfunc": "obj"},
{"from_term": "t13", "to_term": "t10", "rfunc": "case"},
{"from_term": "t7", "to_term": "t13", "rfunc": "obl"},
{"from_term": "t12", "to_term": "t11", "rfunc": "compound"},
{"from_term": "t13", "to_term": "t12", "rfunc": "amod"},
{"from_term": "t17", "to_term": "t14", "rfunc": "compound"},
{"from_term": "t16", "to_term": "t15", "rfunc": "cc"},
{"from_term": "t14", "to_term": "t16", "rfunc": "conj"},
{"from_term": "t22", "to_term": "t18", "rfunc": "case"},
{"from_term": "t17", "to_term": "t22", "rfunc": "nmod"},
{"from_term": "t22", "to_term": "t19", "rfunc": "punct"},
{"from_term": "t22", "to_term": "t20", "rfunc": "amod"},
{"from_term": "t22", "to_term": "t21", "rfunc": "punct"},
{"from_term": "t26", "to_term": "t23", "rfunc": "cc"},
{"from_term": "t22", "to_term": "t26", "rfunc": "conj"},
{"from_term": "t26", "to_term": "t24", "rfunc": "det"},
{"from_term": "t26", "to_term": "t25", "rfunc": "compound"},
{"from_term": "t29", "to_term": "t27", "rfunc": "case"},
{"from_term": "t26", "to_term": "t29", "rfunc": "nmod"},
{"from_term": "t29", "to_term": "t28", "rfunc": "nummod"},
{"from_term": "t17", "to_term": "t30", "rfunc": "punct"},
{"from_term": "t37", "to_term": "t32", "rfunc": "mark"},
{"from_term": "t31", "to_term": "t37", "rfunc": "acl"},
{"from_term": "t37", "to_term": "t33", "rfunc": "mark"},
{"from_term": "t37", "to_term": "t34", "rfunc": "punct"},
{"from_term": "t37", "to_term": "t35", "rfunc": "nsubj"},
{"from_term": "t37", "to_term": "t36", "rfunc": "aux"},
{"from_term": "t43", "to_term": "t38", "rfunc": "mark"},
{"from_term": "t37", "to_term": "t43", "rfunc": "ccomp"},
{"from_term": "t37", "to_term": "t39", "rfunc": "compound:prt"},
{"from_term": "t37", "to_term": "t40", "rfunc": "advmod"},
{"from_term": "t37", "to_term": "t41", "rfunc": "punct"},
{"from_term": "t43", "to_term": "t42", "rfunc": "aux:pass"},
{"from_term": "t49", "to_term": "t44", "rfunc": "punct"},
{"from_term": "t43", "to_term": "t49", "rfunc": "obl"},
{"from_term": "t49", "to_term": "t45", "rfunc": "case"},
{"from_term": "t49", "to_term": "t46", "rfunc": "case"},
{"from_term": "t49", "to_term": "t47", "rfunc": "nmod:poss"},
{"from_term": "t49", "to_term": "t48", "rfunc": "compound"},
{"from_term": "t49", "to_term": "t50", "rfunc": "punct"},
{"from_term": "t43", "to_term": "t51", "rfunc": "punct"},
]
assert actual == expected, (
"expected: " + str(expected) + ", actual: " + str(actual)
)
def test_10_pdf_multiwords(self):
naf = NafDocument().open(join("tests", "tests", "example.naf.xml"))
actual = naf.multiwords
expected = [
{
"id": "mw1",
"lemma": "set_out",
"pos": "VERB",
"type": "phrasal",
"components": [
{"id": "mw1.c1", "span": [{"id": "t37"}]},
{"id": "mw1.c2", "span": [{"id": "t39"}]},
],
}
]
assert actual == expected, (
"expected: " + str(expected) + ", actual: " + str(actual)
)
def test_11_raw(self):
naf = NafDocument().open(join("tests", "tests", "example.naf.xml"))
actual = naf.raw
expected = "The Nafigator package allows you to store NLP output from custom made spaCy and stanza pipelines with (intermediate) results and all processing steps in one format. Multiwords like in “we have set that out below” are recognized (depending on your NLP processor)."
assert actual == expected, (
"expected: " + str(expected) + ", actual: " + str(actual)
)
# def test_command_line_interface(self):
# """Test the CLI."""
# runner = CliRunner()
# result = runner.invoke(cli.main)
# assert result.exit_code == 0
# # assert 'nafigator.cli.main' in result.output
# help_result = runner.invoke(cli.main, ['--help'])
# assert help_result.exit_code == 0
# assert '--help Show this message and exit.' in help_result.output
class TestNafigator_docx(unittest.TestCase):
def test_1_docx_generate_naf(self):
""" """
tree = parse2naf.generate_naf(
input=join("tests", "tests", "example.docx"),
engine="stanza",
language="en",
naf_version="v3.1",
dtd_validation=False,
params={},
nlp=None,
)
assert tree.write(join("tests", "tests", "example.docx.naf.xml")) == None
def test_2_docx_header_filedesc(self):
""" """
naf = NafDocument().open(join("tests", "tests", "example.docx.naf.xml"))
actual = naf.header["fileDesc"]
expected = {
"filename": "tests\\tests\\example.docx",
"filetype": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
}
assert actual["filename"] == expected["filename"]
assert actual["filetype"] == expected["filetype"]
def test_3_docx_header_public(self):
""" """
naf = NafDocument().open(join("tests", "tests", "example.docx.naf.xml"))
actual = naf.header["public"]
expected = {
"{http://purl.org/dc/elements/1.1/}uri": "tests\\tests\\example.docx",
"{http://purl.org/dc/elements/1.1/}format": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
}
assert actual == expected, (
"expected: " + str(expected) + ", actual: " + str(actual)
)
# def test_5_formats(self):
# assert actual == expected
def test_6_docx_entities(self):
naf = NafDocument().open(join("tests", "tests", "example.docx.naf.xml"))
actual = naf.entities
expected = [
{
"id": "e1",
"type": "PRODUCT",
"text": "Nafigator",
"span": [{"id": "t2"}],
},
{"id": "e2", "type": "PRODUCT", "text": "Spacy", "span": [{"id": "t13"}]},
{"id": "e3", "type": "CARDINAL", "text": "one", "span": [{"id": "t27"}]},
]
assert actual == expected, (
"expected: " + str(expected) + ", actual: " + str(actual)
)
def test_7_docx_text(self):
naf = NafDocument().open(join("tests", "tests", "example.docx.naf.xml"))
actual = naf.text
expected = [
{
"text": "The",
"id": "w1",
"sent": "1",
"para": "1",
"page": "1",
"offset": "0",
"length": "3",
},
{
"text": "Nafigator",
"id": "w2",
"sent": "1",
"para": "1",
"page": "1",
"offset": "4",
"length": "9",
},
{
"text": "package",
"id": "w3",
"sent": "1",
"para": "1",
"page": "1",
"offset": "14",
"length": "7",
},
{
"text": "allows",
"id": "w4",
"sent": "1",
"para": "1",
"page": "1",
"offset": "22",
"length": "6",
},
{
"text": "you",
"id": "w5",
"sent": "1",
"para": "1",
"page": "1",
"offset": "29",
"length": "3",
},
{
"text": "to",
"id": "w6",
"sent": "1",
"para": "1",
"page": "1",
"offset": "33",
"length": "2",
},
{
"text": "store",
"id": "w7",
"sent": "1",
"para": "1",
"page": "1",
"offset": "36",
"length": "5",
},
{
"text": "NLP",
"id": "w8",
"sent": "1",
"para": "1",
"page": "1",
"offset": "42",
"length": "3",
},
{
"text": "output",
"id": "w9",
"sent": "1",
"para": "1",
"page": "1",
"offset": "46",
"length": "6",
},
{
"text": "from",
"id": "w10",
"sent": "1",
"para": "1",
"page": "1",
"offset": "53",
"length": "4",
},
{
"text": "custom",
"id": "w11",
"sent": "1",
"para": "1",
"page": "1",
"offset": "58",
"length": "6",
},
{
"text": "made",
"id": "w12",
"sent": "1",
"para": "1",
"page": "1",
"offset": "65",
"length": "4",
},
{
"text": "Spacy",
"id": "w13",
"sent": "1",
"para": "1",
"page": "1",
"offset": "70",
"length": "5",
},
{
"text": "and",
"id": "w14",
"sent": "1",
"para": "1",
"page": "1",
"offset": "76",
"length": "3",
},
{
"text": "stanza",
"id": "w15",
"sent": "1",
"para": "1",
"page": "1",
"offset": "80",
"length": "6",
},
{
"text": "pipelines",
"id": "w16",
"sent": "1",
"para": "1",
"page": "1",
"offset": "87",
"length": "9",
},
{
"text": "with",
"id": "w17",
"sent": "1",
"para": "1",
"page": "1",
"offset": "97",
"length": "4",
},
{
"text": "(",
"id": "w18",
"sent": "1",
"para": "1",
"page": "1",
"offset": "102",
"length": "1",
},
{
"text": "intermediate",
"id": "w19",
"sent": "1",
"para": "1",
"page": "1",
"offset": "103",
"length": "12",
},
{
"text": ")",
"id": "w20",
"sent": "1",
"para": "1",
"page": "1",
"offset": "115",
"length": "1",
},
{
"text": "results",
"id": "w21",
"sent": "1",
"para": "1",
"page": "1",
"offset": "117",
"length": "7",
},
{
"text": "and",
"id": "w22",
"sent": "1",
"para": "1",
"page": "1",
"offset": "125",
"length": "3",
},
{
"text": "all",
"id": "w23",
"sent": "1",
"para": "1",
"page": "1",
"offset": "129",
"length": "3",
},
{
"text": "processing",
"id": "w24",
"sent": "1",
"para": "1",
"page": "1",
"offset": "133",
"length": "10",
},
{
"text": "steps",
"id": "w25",
"sent": "1",
"para": "1",
"page": "1",
"offset": "144",
"length": "5",
},
{
"text": "in",
"id": "w26",
"sent": "1",
"para": "1",
"page": "1",
"offset": "150",
"length": "2",
},
{
"text": "one",
"id": "w27",
"sent": "1",
"para": "1",
"page": "1",
"offset": "153",
"length": "3",
},
{
"text": "format",
"id": "w28",
"sent": "1",
"para": "1",
"page": "1",
"offset": "157",
"length": "6",
},
{
"text": ".",
"id": "w29",
"sent": "1",
"para": "1",
"page": "1",
"offset": "163",
"length": "1",
},
{
"text": "Multiwords",
"id": "w30",
"sent": "2",
"para": "2",
"page": "1",
"offset": "166",
"length": "10",
},
{
"text": "like",
"id": "w31",
"sent": "2",
"para": "2",
"page": "1",
"offset": "177",
"length": "4",
},
{
"text": "in",
"id": "w32",
"sent": "2",
"para": "2",
"page": "1",
"offset": "182",
"length": "2",
},
{
"text": "“",
"id": "w33",
"sent": "2",
"para": "2",
"page": "1",
"offset": "185",
"length": "1",
},
{
"text": "we",
"id": "w34",
"sent": "2",
"para": "2",
"page": "1",
"offset": "186",
"length": "2",
},
{
"text": "have",
"id": "w35",
"sent": "2",
"para": "2",
"page": "1",
"offset": "189",
"length": "4",
},
{
"text": "set",
"id": "w36",
"sent": "2",
"para": "2",
"page": "1",
"offset": "194",
"length": "3",
},
{
"text": "that",
"id": "w37",
"sent": "2",
"para": "2",
"page": "1",
"offset": "198",
"length": "4",
},
{
"text": "out",
"id": "w38",
"sent": "2",
"para": "2",
"page": "1",
"offset": "203",
"length": "3",
},
{
"text": "below",
"id": "w39",
"sent": "2",
"para": "2",
"page": "1",
"offset": "207",
"length": "5",
},
{
"text": "”",
"id": "w40",
"sent": "2",
"para": "2",
"page": "1",
"offset": "212",
"length": "1",
},
{
"text": "are",
"id": "w41",
"sent": "2",
"para": "2",
"page": "1",
"offset": "214",
"length": "3",
},
{
"text": "recognized",
"id": "w42",
"sent": "2",
"para": "2",
"page": "1",
"offset": "218",
"length": "10",
},
{
"text": "(",
"id": "w43",
"sent": "2",
"para": "2",
"page": "1",
"offset": "229",
"length": "1",
},
{
"text": "depending",
"id": "w44",
"sent": "2",
"para": "2",
"page": "1",
"offset": "230",
"length": "9",
},
{
"text": "on",
"id": "w45",
"sent": "2",
"para": "2",
"page": "1",
"offset": "240",
"length": "2",
},
{
"text": "your",
"id": "w46",
"sent": "2",
"para": "2",
"page": "1",
"offset": "243",
"length": "4",
},
{
"text": "NLP",
"id": "w47",
"sent": "2",
"para": "2",
"page": "1",
"offset": "248",
"length": "3",
},
{
"text": "processor",
"id": "w48",
"sent": "2",
"para": "2",
"page": "1",
"offset": "252",
"length": "9",
},
{
"text": ")",
"id": "w49",
"sent": "2",
"para": "2",
"page": "1",
"offset": "261",
"length": "1",
},
{
"text": ".",
"id": "w50",
"sent": "2",
"para": "2",
"page": "1",
"offset": "262",
"length": "1",
},
]
diff = DeepDiff(actual, expected)
assert diff == dict(), diff
def test_8_docx_terms(self):
naf = NafDocument().open(join("tests", "tests", "example.docx.naf.xml"))
actual = naf.terms
expected = [
{
"id": "t1",
"type": "open",
"lemma": "the",
"pos": "DET",
"morphofeat": "Definite=Def|PronType=Art",
"span": [{"id": "w1"}],
},
{
"id": "t2",
"type": "open",
"lemma": "Nafigator",
"pos": "PROPN",
"morphofeat": "Number=Sing",
"span": [{"id": "w2"}],
},
{
"id": "t3",
"type": "open",
"lemma": "package",
"pos": "NOUN",
"morphofeat": "Number=Sing",
"span": [{"id": "w3"}],
},
{
"id": "t4",
"type": "open",
"lemma": "allow",
"pos": "VERB",
"morphofeat": "Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin",
"span": [{"id": "w4"}],
},
{
"id": "t5",
"type": "open",
"lemma": "you",
"pos": "PRON",
"morphofeat": "Case=Acc|Person=2|PronType=Prs",
"span": [{"id": "w5"}],
},
{
"id": "t6",
"type": "open",
"lemma": "to",
"pos": "PART",
"span": [{"id": "w6"}],
},
{
"id": "t7",
"type": "open",
"lemma": "store",
"pos": "VERB",
"morphofeat": "VerbForm=Inf",
"span": [{"id": "w7"}],
},
{
"id": "t8",
"type": "open",
"lemma": "nlp",
"pos": "NOUN",
"morphofeat": "Number=Sing",
"span": [{"id": "w8"}],
},
{
"id": "t9",
"type": "open",
"lemma": "output",
"pos": "NOUN",
"morphofeat": "Number=Sing",
"span": [{"id": "w9"}],
},
{
"id": "t10",
"type": "open",
"lemma": "from",
"pos": "ADP",
"span": [{"id": "w10"}],
},
{
"id": "t11",
"type": "open",
"lemma": "custom",
"pos": "NOUN",
"morphofeat": "Number=Sing",
"span": [{"id": "w11"}],
},
{
"id": "t12",
"type": "open",
"lemma": "make",
"pos": "VERB",
"morphofeat": "Tense=Past|VerbForm=Part",
"span": [{"id": "w12"}],
},
{
"id": "t13",
"type": "open",
"lemma": "spacy",
"pos": "NOUN",
"morphofeat": "Number=Sing",
"span": [{"id": "w13"}],
},
{
"id": "t14",
"type": "open",
"lemma": "and",
"pos": "CCONJ",
"span": [{"id": "w14"}],
},
{
"id": "t15",
"type": "open",
"lemma": "stanza",
"pos": "NOUN",
"morphofeat": "Number=Sing",
"span": [{"id": "w15"}],
},
{
"id": "t16",
"type": "open",
"lemma": "pipeline",
"pos": "NOUN",
"morphofeat": "Number=Plur",
"span": [{"id": "w16"}],
},
{
"id": "t17",
"type": "open",
"lemma": "with",
"pos": "ADP",
"span": [{"id": "w17"}],
},
{
"id": "t18",
"type": "open",
"lemma": "(",
"pos": "PUNCT",
"span": [{"id": "w18"}],
},
{
"id": "t19",
"type": "open",
"lemma": "intermediate",
"pos": "ADJ",
"morphofeat": "Degree=Pos",
"span": [{"id": "w19"}],
},
{
"id": "t20",
"type": "open",
"lemma": ")",
"pos": "PUNCT",
"span": [{"id": "w20"}],
},
{
"id": "t21",
"type": "open",
"lemma": "result",
"pos": "NOUN",
"morphofeat": "Number=Plur",
"span": [{"id": "w21"}],
},
{
"id": "t22",
"type": "open",
"lemma": "and",
"pos": "CCONJ",
"span": [{"id": "w22"}],
},
{
"id": "t23",
"type": "open",
"lemma": "all",
"pos": "DET",
"span": [{"id": "w23"}],
},
{
"id": "t24",
"type": "open",
"lemma": "processing",
"pos": "NOUN",
"morphofeat": "Number=Sing",
"span": [{"id": "w24"}],
},
{
"id": "t25",
"type": "open",
"lemma": "step",
"pos": "NOUN",
"morphofeat": "Number=Plur",
"span": [{"id": "w25"}],
},
{
"id": "t26",
"type": "open",
"lemma": "in",
"pos": "ADP",
"span": [{"id": "w26"}],
},
{
"id": "t27",
"type": "open",
"lemma": "one",
"pos": "NUM",
"morphofeat": "NumType=Card",
"span": [{"id": "w27"}],
},
{
"id": "t28",
"type": "open",
"lemma": "format",
"pos": "NOUN",
"morphofeat": "Number=Sing",
"span": [{"id": "w28"}],
},
{
"id": "t29",
"type": "open",
"lemma": ".",
"pos": "PUNCT",
"span": [{"id": "w29"}],
},
{
"id": "t30",
"type": "open",
"lemma": "multiword",
"pos": "NOUN",
"morphofeat": "Number=Plur",
"span": [{"id": "w30"}],
},
{
"id": "t31",
"type": "open",
"lemma": "like",
"pos": "ADP",
"span": [{"id": "w31"}],
},
{
"id": "t32",
"type": "open",
"lemma": "in",
"pos": "ADP",
"span": [{"id": "w32"}],
},
{
"id": "t33",
"type": "open",
"lemma": '"',
"pos": "PUNCT",
"span": [{"id": "w33"}],
},
{
"id": "t34",
"type": "open",
"lemma": "we",
"pos": "PRON",
"morphofeat": "Case=Nom|Number=Plur|Person=1|PronType=Prs",
"span": [{"id": "w34"}],
},
{
"id": "t35",
"type": "open",
"lemma": "have",
"pos": "AUX",
"morphofeat": "Mood=Ind|Tense=Pres|VerbForm=Fin",
"span": [{"id": "w35"}],
},
{
"id": "t36",
"type": "open",
"lemma": "set",
"pos": "VERB",
"morphofeat": "Tense=Past|VerbForm=Part",
"component_of": "mw1",
"span": [{"id": "w36"}],
},
{
"id": "t37",
"type": "open",
"lemma": "that",
"pos": "SCONJ",
"span": [{"id": "w37"}],
},
{
"id": "t38",
"type": "open",
"lemma": "out",
"pos": "ADP",
"component_of": "mw1",
"span": [{"id": "w38"}],
},
{
"id": "t39",
"type": "open",
"lemma": "below",
"pos": "ADV",
"span": [{"id": "w39"}],
},
{
"id": "t40",
"type": "open",
"lemma": '"',
"pos": "PUNCT",
"span": [{"id": "w40"}],
},
{
"id": "t41",
"type": "open",
"lemma": "be",
"pos": "AUX",
"morphofeat": "Mood=Ind|Tense=Pres|VerbForm=Fin",
"span": [{"id": "w41"}],
},
{
"id": "t42",
"type": "open",
"lemma": "recognize",
"pos": "VERB",
"morphofeat": "Tense=Past|VerbForm=Part|Voice=Pass",
"span": [{"id": "w42"}],
},
{
"id": "t43",
"type": "open",
"lemma": "(",
"pos": "PUNCT",
"span": [{"id": "w43"}],
},
{
"id": "t44",
"type": "open",
"lemma": "depend",
"pos": "VERB",
"morphofeat": "VerbForm=Ger",
"span": [{"id": "w44"}],
},
{
"id": "t45",
"type": "open",
"lemma": "on",
"pos": "ADP",
"span": [{"id": "w45"}],
},
{
"id": "t46",
"type": "open",
"lemma": "you",
"pos": "PRON",
"morphofeat": "Person=2|Poss=Yes|PronType=Prs",
"span": [{"id": "w46"}],
},
{
"id": "t47",
"type": "open",
"lemma": "nlp",
"pos": "NOUN",
"morphofeat": "Number=Sing",
"span": [{"id": "w47"}],
},
{
"id": "t48",
"type": "open",
"lemma": "processor",
"pos": "NOUN",
"morphofeat": "Number=Sing",
"span": [{"id": "w48"}],
},
{
"id": "t49",
"type": "open",
"lemma": ")",
"pos": "PUNCT",
"span": [{"id": "w49"}],
},
{
"id": "t50",
"type": "open",
"lemma": ".",
"pos": "PUNCT",
"span": [{"id": "w50"}],
},
]
assert actual == expected, (
"expected: " + str(expected) + ", actual: " + str(actual)
)
def test_9_docx_dependencies(self):
naf = NafDocument().open(join("tests", "tests", "example.docx.naf.xml"))
actual = naf.deps
expected = [
{"from_term": "t3", "to_term": "t1", "rfunc": "det"},
{"from_term": "t4", "to_term": "t3", "rfunc": "nsubj"},
{"from_term": "t3", "to_term": "t2", "rfunc": "compound"},
{"from_term": "t4", "to_term": "t5", "rfunc": "obj"},
{"from_term": "t7", "to_term": "t6", "rfunc": "mark"},
{"from_term": "t4", "to_term": "t7", "rfunc": "xcomp"},
{"from_term": "t9", "to_term": "t8", "rfunc": "compound"},
{"from_term": "t7", "to_term": "t9", "rfunc": "obj"},
{"from_term": "t13", "to_term": "t10", "rfunc": "case"},
{"from_term": "t9", "to_term": "t13", "rfunc": "nmod"},
{"from_term": "t12", "to_term": "t11", "rfunc": "compound"},
{"from_term": "t13", "to_term": "t12", "rfunc": "amod"},
{"from_term": "t16", "to_term": "t14", "rfunc": "cc"},
{"from_term": "t13", "to_term": "t16", "rfunc": "conj"},
{"from_term": "t16", "to_term": "t15", "rfunc": "compound"},
{"from_term": "t21", "to_term": "t17", "rfunc": "case"},
{"from_term": "t7", "to_term": "t21", "rfunc": "obl"},
{"from_term": "t21", "to_term": "t18", "rfunc": "punct"},
{"from_term": "t21", "to_term": "t19", "rfunc": "amod"},
{"from_term": "t21", "to_term": "t20", "rfunc": "punct"},
{"from_term": "t25", "to_term": "t22", "rfunc": "cc"},
{"from_term": "t13", "to_term": "t25", "rfunc": "conj"},
{"from_term": "t25", "to_term": "t23", "rfunc": "det"},
{"from_term": "t25", "to_term": "t24", "rfunc": "compound"},
{"from_term": "t28", "to_term": "t26", "rfunc": "case"},
{"from_term": "t25", "to_term": "t28", "rfunc": "nmod"},
{"from_term": "t28", "to_term": "t27", "rfunc": "nummod"},
{"from_term": "t4", "to_term": "t29", "rfunc": "punct"},
{"from_term": "t36", "to_term": "t31", "rfunc": "mark"},
{"from_term": "t30", "to_term": "t36", "rfunc": "acl"},
{"from_term": "t36", "to_term": "t32", "rfunc": "mark"},
{"from_term": "t36", "to_term": "t33", "rfunc": "punct"},
{"from_term": "t36", "to_term": "t34", "rfunc": "nsubj"},
{"from_term": "t36", "to_term": "t35", "rfunc": "aux"},
{"from_term": "t42", "to_term": "t37", "rfunc": "mark"},
{"from_term": "t36", "to_term": "t42", "rfunc": "ccomp"},
{"from_term": "t36", "to_term": "t38", "rfunc": "compound:prt"},
{"from_term": "t36", "to_term": "t39", "rfunc": "advmod"},
{"from_term": "t36", "to_term": "t40", "rfunc": "punct"},
{"from_term": "t42", "to_term": "t41", "rfunc": "aux:pass"},
{"from_term": "t48", "to_term": "t43", "rfunc": "punct"},
{"from_term": "t42", "to_term": "t48", "rfunc": "obl"},
{"from_term": "t48", "to_term": "t44", "rfunc": "case"},
{"from_term": "t48", "to_term": "t45", "rfunc": "case"},
{"from_term": "t48", "to_term": "t46", "rfunc": "nmod:poss"},
{"from_term": "t48", "to_term": "t47", "rfunc": "compound"},
{"from_term": "t48", "to_term": "t49", "rfunc": "punct"},
{"from_term": "t42", "to_term": "t50", "rfunc": "punct"},
]
assert actual == expected, (
"expected: " + str(expected) + ", actual: " + str(actual)
)
def test_10_docx_multiwords(self):
naf = NafDocument().open(join("tests", "tests", "example.docx.naf.xml"))
actual = naf.multiwords
expected = [
{
"id": "mw1",
"lemma": "set_out",
"pos": "VERB",
"type": "phrasal",
"components": [
{"id": "mw1.c1", "span": [{"id": "t36"}]},
{"id": "mw1.c2", "span": [{"id": "t38"}]},
],
}
]
assert actual == expected, (
"expected: " + str(expected) + ", actual: " + str(actual)
)
def test_11_docx_raw(self):
naf = NafDocument().open(join("tests", "tests", "example.docx.naf.xml"))
actual = naf.raw
expected = "The Nafigator package allows you to store NLP output from custom made Spacy and stanza pipelines with (intermediate) results and all processing steps in one format. Multiwords like in “we have set that out below” are recognized (depending on your NLP processor)."
assert actual == expected, (
"expected: " + str(expected) + ", actual: " + str(actual)
)
| StarcoderdataPython |
361674 | import cv2
import numpy as np
vc = cv2.VideoCapture('./oriImgs/test.avi')
# 创建混合高斯模型用于背景建模
fgbg = cv2.createBackgroundSubtractorMOG2()
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
while True:
ret, frame = vc.read()
if frame is None:
break
fgmask = fgbg.apply(frame)
# 形态学开运算去噪点
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
# 寻找视频中的运动轮廓
moveCnts = cv2.findContours(fgmask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
# 筛选行人轮廓
for c in moveCnts:
perimeter = cv2.arcLength(c, True)
# 周长是根据实际任务指定的
if perimeter > 180:
x, y, w, h = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow('frame', frame)
cv2.imshow('fgmask', fgmask)
if cv2.waitKey(10) & 0xFF ==27:
break
vc.release()
cv2.destroyAllWindows() | StarcoderdataPython |
5144312 | <reponame>RaulRPrado/tev-binaries-model<filename>applications/process_raw_data.py
#!/usr/bin/python3
import matplotlib.pyplot as plt
import numpy as np
import math
import logging
import astropy.units as u
from astropy.io import ascii
logging.getLogger().setLevel(logging.DEBUG)
if __name__ == '__main__':
'''
Energies in keV
Integrated fluxes given in erg cm-2 s-1
Warning: NuSTAR files are mislabed - flux_error_hi is the model fit.
Use only flux_error_lo as flux_err.
'''
logging.warning(
'Warning: NuSTAR files are mislabed - flux_error_hi is the model fit. '
'Use only flux_error_lo as flux_err.'
)
convFluxNuSTAR = u.keV.to(u.erg)
convEnergyVTS = u.TeV.to(u.keV)
convFluxVTS = u.TeV.to(u.erg)
# Period 0 - Nov 2017
logging.info('Period 0 - Nov. 2017')
fileNameNuSTAR = 'data/nov_joint_pow_3-30_keV_3sig_new.csv'
dataNuSTAR = ascii.read(fileNameNuSTAR, format='basic')
fileNameVTS = 'data/VTS_58073-58083.ecsv'
dataVTS = ascii.read(fileNameVTS, format='basic')
fileNameVTS_GT = 'data/HESSJ0632p057.VTS.nustar-58073-58083.spectrum.ecsv'
dataVTS_GT = ascii.read(fileNameVTS_GT, format='basic')
outData = dict()
outData['energy'] = list()
outData['flux'] = list()
outData['flux_err'] = list()
for d in dataNuSTAR:
outData['energy'].append(d['energy'])
outData['flux'].append(d['flux'] * convFluxNuSTAR)
outData['flux_err'].append(d['flux_error_lo'] * convFluxNuSTAR)
for d in dataVTS:
en = 10**d['lge'] # TeV
outData['energy'].append(en * convEnergyVTS)
outData['flux'].append(d['dnde'] * (en**2) * convFluxVTS)
dnde_err = (d['dnde_error_hi'] + d['dnde_error_lo']) / 2
outData['flux_err'].append(dnde_err * (en**2) * convFluxVTS)
ascii.write(outData, 'data/HESS_J0632_0.csv', format='basic', overwrite=True)
# GT
outData_GT = dict()
outData_GT['energy'] = list()
outData_GT['flux'] = list()
outData_GT['flux_err'] = list()
for d in dataNuSTAR:
outData_GT['energy'].append(d['energy'])
outData_GT['flux'].append(d['flux'] * convFluxNuSTAR)
outData_GT['flux_err'].append(d['flux_error_lo'] * convFluxNuSTAR)
for d in dataVTS_GT:
en = d['e_ref'] # TeV
outData_GT['energy'].append(en * convEnergyVTS)
outData_GT['flux'].append(d['dnde'] * (en**2) * convFluxVTS)
dnde_err = (d['dnde_errn'] + d['dnde_errp']) / 2
outData_GT['flux_err'].append(dnde_err * (en**2) * convFluxVTS)
ascii.write(outData_GT, 'data/HESS_J0632_0_GT.csv', format='basic', overwrite=True)
# Period 1 - Dec 2017
logging.info('Period 1 - Dec. 2017')
fileNameNuSTAR = 'data/dec_joint_pow_3-30_keV_3sig_new.csv'
dataNuSTAR = ascii.read(fileNameNuSTAR, format='basic')
fileNameVTS = 'data/VTS_58101-58103.ecsv'
dataVTS = ascii.read(fileNameVTS, format='basic')
fileNameVTS_GT = 'data/HESSJ0632p057.VTS.nustar-58101-58103.spectrum.ecsv'
dataVTS_GT = ascii.read(fileNameVTS_GT, format='basic')
outData = dict()
outData['energy'] = list()
outData['flux'] = list()
outData['flux_err'] = list()
for d in dataNuSTAR:
outData['energy'].append(d['energy'])
outData['flux'].append(d['flux'] * convFluxNuSTAR)
outData['flux_err'].append(d['flux_error_lo'] * convFluxNuSTAR)
for d in dataVTS:
en = 10**d['lge'] # TeV
outData['energy'].append(en * convEnergyVTS)
outData['flux'].append(d['dnde'] * (en**2) * convFluxVTS)
dnde_err = (d['dnde_error_hi'] + d['dnde_error_lo']) / 2
outData['flux_err'].append(dnde_err * (en**2) * convFluxVTS)
ascii.write(outData, 'data/HESS_J0632_1.csv', format='basic', overwrite=True)
# GT
outData_GT = dict()
outData_GT['energy'] = list()
outData_GT['flux'] = list()
outData_GT['flux_err'] = list()
for d in dataNuSTAR:
outData_GT['energy'].append(d['energy'])
outData_GT['flux'].append(d['flux'] * convFluxNuSTAR)
outData_GT['flux_err'].append(d['flux_error_lo'] * convFluxNuSTAR)
for d in dataVTS_GT:
en = d['e_ref'] # TeV
outData_GT['energy'].append(en * convEnergyVTS)
outData_GT['flux'].append(d['dnde'] * (en**2) * convFluxVTS)
dnde_err = (d['dnde_errn'] + d['dnde_errp']) / 2
outData_GT['flux_err'].append(dnde_err * (en**2) * convFluxVTS)
ascii.write(outData_GT, 'data/HESS_J0632_1_GT.csv', format='basic', overwrite=True)
# Period 2 - Dec 2019
logging.info('Period 2 - Dec. 2019')
fileNameNuSTAR = 'data/nu30502017002_E3_20_SED_FPMA.csv'
dataNuSTAR = ascii.read(fileNameNuSTAR, format='basic')
fileNameVTS = 'data/1912_BDTmoderate2tel_SED.csv'
dataVTS = ascii.read(fileNameVTS, format='basic')
outData = dict()
outData['energy'] = list()
outData['flux'] = list()
outData['flux_err'] = list()
for d in dataNuSTAR:
outData['energy'].append(d['energy'])
outData['flux'].append(d['flux'] * convFluxNuSTAR)
outData['flux_err'].append(d['flux_error'] * convFluxNuSTAR)
for d in dataVTS:
# if d['dnde_error'] == 0.:
# continue
outData['energy'].append(d['energy'] * convEnergyVTS)
outData['flux'].append(d['dnde'] * (d['energy']**2) * convFluxVTS)
outData['flux_err'].append(d['dnde_error'] * (d['energy']**2) * convFluxVTS)
ascii.write(outData, 'data/HESS_J0632_2.csv', format='basic', overwrite=True)
# Period 3 - Jan 2020
logging.info('Period 3 - Jan. 2020')
fileNameVTS = 'data/2001_BDTmoderate2tel_SED.csv'
dataVTS = ascii.read(fileNameVTS, format='basic')
outData = dict()
outData['energy'] = list()
outData['flux'] = list()
outData['flux_err'] = list()
for d in dataVTS:
# if d['dnde_error'] == 0.:
# continue
outData['energy'].append(d['energy'] * convEnergyVTS)
outData['flux'].append(d['dnde'] * (d['energy']**2) * convFluxVTS)
outData['flux_err'].append(d['dnde_error'] * (d['energy']**2) * convFluxVTS)
ascii.write(outData, 'data/HESS_J0632_3.csv', format='basic', overwrite=True)
# Period 4 - Feb 2020
logging.info('Period 4 - Feb. 2020')
fileNameNuSTAR = 'data/nu30502017004_E3_20_SED_FPMA.csv'
dataNuSTAR = ascii.read(fileNameNuSTAR, format='basic')
fileNameVTS = 'data/2002_BDTmoderate2tel_SED.csv'
dataVTS = ascii.read(fileNameVTS, format='basic')
outData = dict()
outData['energy'] = list()
outData['flux'] = list()
outData['flux_err'] = list()
for d in dataNuSTAR:
outData['energy'].append(d['energy'])
outData['flux'].append(d['flux'] * convFluxNuSTAR)
outData['flux_err'].append(d['flux_error'] * convFluxNuSTAR)
for d in dataVTS:
# if d['dnde_error'] == 0.:
# continue
outData['energy'].append(d['energy'] * convEnergyVTS)
outData['flux'].append(d['dnde'] * (d['energy']**2) * convFluxVTS)
outData['flux_err'].append(d['dnde_error'] * (d['energy']**2) * convFluxVTS)
ascii.write(outData, 'data/HESS_J0632_4.csv', format='basic', overwrite=True)
| StarcoderdataPython |
11201314 | import os
import numpy as np
from typing import List, Optional, Callable
from .. import backend as F
from ..convert import heterograph as dgl_heterograph
from ..base import dgl_warning, DGLError
import ast
import pydantic as dt
import pandas as pd
import yaml
class MetaNode(dt.BaseModel):
""" Class of node_data in YAML. Internal use only. """
file_name: str
ntype: Optional[str] = '_V'
graph_id_field: Optional[str] = 'graph_id'
node_id_field: Optional[str] = 'node_id'
class MetaEdge(dt.BaseModel):
""" Class of edge_data in YAML. Internal use only. """
file_name: str
etype: Optional[List[str]] = ['_V', '_E', '_V']
graph_id_field: Optional[str] = 'graph_id'
src_id_field: Optional[str] = 'src_id'
dst_id_field: Optional[str] = 'dst_id'
class MetaGraph(dt.BaseModel):
""" Class of graph_data in YAML. Internal use only. """
file_name: str
graph_id_field: Optional[str] = 'graph_id'
class MetaYaml(dt.BaseModel):
""" Class of YAML. Internal use only. """
version: Optional[str] = '1.0.0'
dataset_name: str
separator: Optional[str] = ','
node_data: List[MetaNode]
edge_data: List[MetaEdge]
graph_data: Optional[MetaGraph] = None
def load_yaml_with_sanity_check(yaml_file):
""" Load yaml and do sanity check. Internal use only. """
with open(yaml_file) as f:
yaml_data = yaml.load(f, Loader=yaml.loader.SafeLoader)
try:
meta_yaml = MetaYaml(**yaml_data)
except dt.ValidationError as e:
print(
"Details of pydantic.ValidationError:\n{}".format(e.json()))
raise DGLError(
"Validation Error for YAML fields. Details are shown above.")
if meta_yaml.version != '1.0.0':
raise DGLError("Invalid CSVDataset version {}. Supported versions: '1.0.0'".format(
meta_yaml.version))
ntypes = [meta.ntype for meta in meta_yaml.node_data]
if len(ntypes) > len(set(ntypes)):
raise DGLError(
"Each node CSV file must have a unique node type name, but found duplicate node type: {}.".format(ntypes))
etypes = [tuple(meta.etype) for meta in meta_yaml.edge_data]
if len(etypes) > len(set(etypes)):
raise DGLError(
"Each edge CSV file must have a unique edge type name, but found duplicate edge type: {}.".format(etypes))
return meta_yaml
def _validate_data_length(data_dict):
len_dict = {k: len(v) for k, v in data_dict.items()}
lst = list(len_dict.values())
res = lst.count(lst[0]) == len(lst)
if not res:
raise DGLError(
"All data are required to have same length while some of them does not. Length of data={}".format(str(len_dict)))
class BaseData:
""" Class of base data which is inherited by Node/Edge/GraphData. Internal use only. """
@staticmethod
def read_csv(file_name, base_dir, separator):
csv_path = file_name
if base_dir is not None:
csv_path = os.path.join(base_dir, csv_path)
return pd.read_csv(csv_path, sep=separator)
@staticmethod
def pop_from_dataframe(df: pd.DataFrame, item: str):
ret = None
try:
ret = df.pop(item).to_numpy().squeeze()
except KeyError:
pass
return ret
class NodeData(BaseData):
""" Class of node data which is used for DGLGraph construction. Internal use only. """
def __init__(self, node_id, data, type=None, graph_id=None):
self.id = np.array(node_id, dtype=np.int64)
self.data = data
self.type = type if type is not None else '_V'
self.graph_id = np.array(graph_id, dtype=np.int) if graph_id is not None else np.full(
len(node_id), 0)
_validate_data_length({**{'id': self.id, 'graph_id': self.graph_id}, **self.data})
@staticmethod
def load_from_csv(meta: MetaNode, data_parser: Callable, base_dir=None, separator=','):
df = BaseData.read_csv(meta.file_name, base_dir, separator)
node_ids = BaseData.pop_from_dataframe(df, meta.node_id_field)
graph_ids = BaseData.pop_from_dataframe(df, meta.graph_id_field)
if node_ids is None:
raise DGLError("Missing node id field [{}] in file [{}].".format(
meta.node_id_field, meta.file_name))
ntype = meta.ntype
ndata = data_parser(df)
return NodeData(node_ids, ndata, type=ntype, graph_id=graph_ids)
@staticmethod
def to_dict(node_data: List['NodeData']) -> dict:
# node_ids could be arbitrary numeric values, namely non-sorted, duplicated, not labeled from 0 to num_nodes-1
node_dict = {}
for n_data in node_data:
graph_ids = np.unique(n_data.graph_id)
for graph_id in graph_ids:
idx = n_data.graph_id == graph_id
ids = n_data.id[idx]
u_ids, u_indices = np.unique(ids, return_index=True)
if len(ids) > len(u_ids):
dgl_warning(
"There exist duplicated ids and only the first ones are kept.")
if graph_id not in node_dict:
node_dict[graph_id] = {}
node_dict[graph_id][n_data.type] = {'mapping': {index: i for i,
index in enumerate(ids[u_indices])},
'data': {k: F.tensor(v[idx][u_indices])
for k, v in n_data.data.items()}}
return node_dict
class EdgeData(BaseData):
""" Class of edge data which is used for DGLGraph construction. Internal use only. """
def __init__(self, src_id, dst_id, data, type=None, graph_id=None):
self.src = np.array(src_id, dtype=np.int64)
self.dst = np.array(dst_id, dtype=np.int64)
self.data = data
self.type = type if type is not None else ('_V', '_E', '_V')
self.graph_id = np.array(graph_id, dtype=np.int) if graph_id is not None else np.full(
len(src_id), 0)
_validate_data_length({**{'src': self.src, 'dst': self.dst, 'graph_id': self.graph_id}, **self.data})
@staticmethod
def load_from_csv(meta: MetaEdge, data_parser: Callable, base_dir=None, separator=','):
df = BaseData.read_csv(meta.file_name, base_dir, separator)
src_ids = BaseData.pop_from_dataframe(df, meta.src_id_field)
if src_ids is None:
raise DGLError("Missing src id field [{}] in file [{}].".format(
meta.src_id_field, meta.file_name))
dst_ids = BaseData.pop_from_dataframe(df, meta.dst_id_field)
if dst_ids is None:
raise DGLError("Missing dst id field [{}] in file [{}].".format(
meta.dst_id_field, meta.file_name))
graph_ids = BaseData.pop_from_dataframe(df, meta.graph_id_field)
etype = tuple(meta.etype)
edata = data_parser(df)
return EdgeData(src_ids, dst_ids, edata, type=etype, graph_id=graph_ids)
@staticmethod
def to_dict(edge_data: List['EdgeData'], node_dict: dict) -> dict:
edge_dict = {}
for e_data in edge_data:
(src_type, e_type, dst_type) = e_data.type
graph_ids = np.unique(e_data.graph_id)
for graph_id in graph_ids:
if graph_id in edge_dict and e_data.type in edge_dict[graph_id]:
raise DGLError(f"Duplicate edge type[{e_data.type}] for same graph[{graph_id}], please place the same edge_type for same graph into single EdgeData.")
idx = e_data.graph_id == graph_id
src_mapping = node_dict[graph_id][src_type]['mapping']
dst_mapping = node_dict[graph_id][dst_type]['mapping']
src_ids = [src_mapping[index] for index in e_data.src[idx]]
dst_ids = [dst_mapping[index] for index in e_data.dst[idx]]
if graph_id not in edge_dict:
edge_dict[graph_id] = {}
edge_dict[graph_id][e_data.type] = {'edges': (F.tensor(src_ids), F.tensor(dst_ids)),
'data': {k: F.tensor(v[idx])
for k, v in e_data.data.items()}}
return edge_dict
class GraphData(BaseData):
""" Class of graph data which is used for DGLGraph construction. Internal use only. """
def __init__(self, graph_id, data):
self.graph_id = np.array(graph_id, dtype=np.int64)
self.data = data
_validate_data_length({**{'graph_id': self.graph_id}, **self.data})
@staticmethod
def load_from_csv(meta: MetaGraph, data_parser: Callable, base_dir=None, separator=','):
df = BaseData.read_csv(meta.file_name, base_dir, separator)
graph_ids = BaseData.pop_from_dataframe(df, meta.graph_id_field)
if graph_ids is None:
raise DGLError("Missing graph id field [{}] in file [{}].".format(
meta.graph_id_field, meta.file_name))
gdata = data_parser(df)
return GraphData(graph_ids, gdata)
@staticmethod
def to_dict(graph_data: 'GraphData', graphs_dict: dict) -> dict:
missing_ids = np.setdiff1d(
np.array(list(graphs_dict.keys())), graph_data.graph_id)
if len(missing_ids) > 0:
raise DGLError(
"Found following graph ids in node/edge CSVs but not in graph CSV: {}.".format(missing_ids))
graph_ids = graph_data.graph_id
graphs = []
for graph_id in graph_ids:
if graph_id not in graphs_dict:
graphs_dict[graph_id] = dgl_heterograph(
{('_V', '_E', '_V'): ([], [])})
for graph_id in graph_ids:
graphs.append(graphs_dict[graph_id])
data = {k: F.tensor(v) for k, v in graph_data.data.items()}
return graphs, data
class DGLGraphConstructor:
""" Class for constructing DGLGraph from Node/Edge/Graph data. Internal use only. """
@staticmethod
def construct_graphs(node_data, edge_data, graph_data=None):
if not isinstance(node_data, list):
node_data = [node_data]
if not isinstance(edge_data, list):
edge_data = [edge_data]
node_dict = NodeData.to_dict(node_data)
edge_dict = EdgeData.to_dict(edge_data, node_dict)
graph_dict = DGLGraphConstructor._construct_graphs(
node_dict, edge_dict)
if graph_data is None:
graph_data = GraphData(np.full(1, 0), {})
graphs, data = GraphData.to_dict(
graph_data, graph_dict)
return graphs, data
@staticmethod
def _construct_graphs(node_dict, edge_dict):
graph_dict = {}
for graph_id in node_dict:
if graph_id not in edge_dict:
edge_dict[graph_id][('_V', '_E', '_V')] = {'edges': ([], [])}
graph = dgl_heterograph({etype: edata['edges']
for etype, edata in edge_dict[graph_id].items()},
num_nodes_dict={ntype: len(ndata['mapping'])
for ntype, ndata in node_dict[graph_id].items()})
def assign_data(type, src_data, dst_data):
for key, value in src_data.items():
dst_data[type].data[key] = value
for type, data in node_dict[graph_id].items():
assign_data(type, data['data'], graph.nodes)
for (type), data in edge_dict[graph_id].items():
assign_data(type, data['data'], graph.edges)
graph_dict[graph_id] = graph
return graph_dict
class DefaultDataParser:
""" Default data parser for DGLCSVDataset. It
1. ignores any columns which does not have a header.
2. tries to convert to list of numeric values(generated by
np.array().tolist()) if cell data is a str separated by ','.
3. read data and infer data type directly, otherwise.
"""
def __call__(self, df: pd.DataFrame):
data = {}
for header in df:
if 'Unnamed' in header:
dgl_warning("Unamed column is found. Ignored...")
continue
dt = df[header].to_numpy().squeeze()
if len(dt) > 0 and isinstance(dt[0], str):
#probably consists of list of numeric values
dt = np.array([ast.literal_eval(row) for row in dt])
data[header] = dt
return data
| StarcoderdataPython |
4892573 | <filename>engine/keyandstate.py<gh_stars>0
# ibus-byrninpikak - byrninpikak IME
#
# Copyright (c) 2022 Harsiharsi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gi import require_version
require_version('IBus', '1.0')
from gi.repository import IBus
class KeyAndState:
def __init__(self, keyval, state):
self.keyval = keyval
self.state = state
def __eq__(self, ks):
if isinstance(ks, KeyAndState):
return self.__dict__ == ks.__dict__
return False
| StarcoderdataPython |
11266886 | from opfu.stock import Stock
if __name__ == '__main__':
stock_1 = Stock(10, 3, is_short=True)
print(stock_1.payoff(20))
stock_1.graph_payoff()
print(stock_1.find_break_even())
| StarcoderdataPython |
3266544 | <reponame>jmossberg/ParseBankStatement
# coding=utf-8
# see: https://www.python.org/dev/peps/pep-0263/
import argparse
import re
import time
import os.path
class ErrorInputLineEndsWithCsv(Exception):
def __init__(self, message):
self.message = message
class ErrorOutputFileAlreadyExists(Exception):
def __init__(self, message):
self.message = message
class FileReader:
def __init__(cls, file_name):
cls.f_input = open(file_name, 'r')
def __del__(cls):
cls.f_input.close()
def read_line(cls):
line = cls.f_input.readline()
if line == "":
return None
return line
def __iter__(cls):
return cls
def __next__(cls):
line = cls.read_line()
if line is None:
raise StopIteration
return line
class FileWriter:
def __init__(cls, file_name):
if os.path.isfile(file_name):
raise ErrorOutputFileAlreadyExists("Output file name already exists")
cls.f_output = open(file_name, 'w')
def __del__(cls):
cls.f_output.close()
def write_line(cls, line):
cls.f_output.write(line)
class OutputFileName:
ERROR_MSG_INPUT_FILE_ENDS_WITH_CSV = "Input file must not end with .csv"
def create_output_file_name(cls, input_file):
pcsv = re.compile(r"\.csv$")
if pcsv.search(input_file):
raise ErrorInputLineEndsWithCsv(cls.ERROR_MSG_INPUT_FILE_ENDS_WITH_CSV)
ptxt = re.compile(r"\.txt$")
input_file_elements = ptxt.split(input_file)
input_file_without_postfix = input_file_elements[0]
output_file_name = input_file_without_postfix + ".csv"
return output_file_name
class StatementConverter:
def __init__(cls, statement_line_converter, file_reader, file_writer):
cls.statement_line_converter = statement_line_converter
cls.file_reader = file_reader
cls.file_writer = file_writer
def add_csv_header(cls, file_writer):
out_line = "Date,Payee,Category,Memo,Outflow,Inflow\n"
file_writer.write_line(out_line)
def convert(cls):
cls.add_csv_header(cls.file_writer)
for line in cls.file_reader:
converted_line = cls.statement_line_converter.convert_line(line)
if len(converted_line) > 0:
cls.file_writer.write_line(converted_line)
class GeneralLineConverter:
REGEXP_YEAR_MONTH_DAY = r"\d\d\d\d-\d\d-\d\d"
REGEXP_DAY_MONTHSTRING_YEAR = r"\d\d [a-ö]{3,3} \d\d\d\d"
FORMAT_YEAR_MONTH_DAY = "%Y-%m-%d"
FORMAT_DAY_MONTH_YEAR = "%d/%m/%Y"
FORMAT_DAY_MONTH_YEAR_SPACES = "%d %m %Y"
YEAR_MONTH_DAY_LENGTH = 11
def __init__(self, bank):
self.bank = bank
self.ignore_line = ""
self.regexp_date = self.REGEXP_YEAR_MONTH_DAY
self.format_date = self.FORMAT_YEAR_MONTH_DAY
self.convert_date_with_month_string = False
if "santander" == self.bank:
self.transaction_position = 4
self.transaction_includes_currency = 'kr'
self.payee_position = 2
self.use_second_data = False
self.ignore_line = "Transaktioner ovan har du ännu inte fått på ditt kontoutdrag."
elif "skandia" == self.bank:
self.transaction_position = 2
self.transaction_includes_currency = ''
self.payee_position = 1
self.use_second_data = True
elif "ica" == self.bank:
self.transaction_position = 4
self.transaction_includes_currency = 'kr'
self.use_second_data = False
self.payee_position = 1
elif "ica2" == self.bank:
self.transaction_position = 4
self.transaction_includes_currency = 'kr'
self.use_second_data = False
self.payee_position = 1
self.regexp_date = self.REGEXP_DAY_MONTHSTRING_YEAR
self.convert_date_with_month_string = True
self.format_date = self.FORMAT_DAY_MONTH_YEAR_SPACES
else:
raise Exception("Invalid bank" + self.bank)
def parse_outflow(self, line):
outflow = self.parse_transaction(line)
if '-' == outflow[0]:
return outflow[1:]
else:
return ""
def parse_inflow(self, line):
inflow = self.parse_transaction(line)
if '-' == inflow[0]:
return ""
else:
return inflow
def parse_transaction(self, line):
statement_items = line.split('\t')
outflow = statement_items[self.transaction_position]
outflow = outflow.replace(',', '.')
outflow = outflow.replace(' ', '')
outflow = outflow.replace(self.transaction_includes_currency, '')
outflow = outflow.strip()
return outflow
def remove_date_from_payee(self, line):
regexp = re.compile(self.REGEXP_YEAR_MONTH_DAY)
matches = regexp.findall(line)
if len(matches) > 0:
return line[self.YEAR_MONTH_DAY_LENGTH:] # Remove date at the beginning of Payee
return line
def parse_payee(self, line):
statement_items = line.split('\t')
payee = statement_items[self.payee_position] # Get Payee from list, date is stored in index 0
payee = payee.replace(',', '.')
payee = payee.replace('\\\\', ' ')
payee = payee.replace('\\', '')
payee = payee.strip() # Remove trailing with space
payee = self.remove_date_from_payee(payee)
return payee
def parse_date(self, line):
date_year_month_day = self._parse_year_month_day(line)
date_day_month_year = self._convert_date_string(date_year_month_day)
return date_day_month_year
def _parse_year_month_day(self, line):
regexp = re.compile(self.regexp_date)
matches = regexp.findall(line)
date_year_month_day = ""
if (len(matches) == 1):
date_year_month_day = matches[0]
elif (len(matches) == 2):
if self.use_second_data:
date_year_month_day = matches[1]
else:
date_year_month_day = matches[0]
else:
raise Exception("Invalid number of dates found in line: " + line)
return date_year_month_day
def _convert_date_with_month_string(self, extracted_date_as_string):
month_string = self._extract_month_string(extracted_date_as_string)
month_number = self._convert_month_string_to_month_number(month_string)
result = extracted_date_as_string.replace(month_string, month_number)
return result
def _convert_month_string_to_month_number(self, month_string):
if month_string == "jan":
return "01"
if month_string == "feb":
return "02"
if month_string == "mar":
return "03"
if month_string == "apr":
return "04"
if month_string == "maj":
return "05"
if month_string == "jun":
return "06"
if month_string == "jul":
return "07"
if month_string == "aug":
return "08"
if month_string == "sep":
return "09"
if month_string == "okt":
return "10"
if month_string == "nov":
return "11"
if month_string == "dec":
return "12"
raise Exception("Cannot convert month string to month number: " + month_string)
def _extract_month_string(self, extracted_date_as_string):
regexp = re.compile("[a-ö]{3,3}")
matches = regexp.findall(extracted_date_as_string)
month_string = matches[0]
return month_string
def _convert_date_string(self, extracted_date_as_string):
if self.convert_date_with_month_string:
extracted_date_as_string = self._convert_date_with_month_string(extracted_date_as_string)
extracted_date = time.strptime(extracted_date_as_string, self.format_date)
extracted_date_as_string_day_month_year = time.strftime(self.FORMAT_DAY_MONTH_YEAR, extracted_date)
return extracted_date_as_string_day_month_year
def convert_line(self, line):
if ((len(self.ignore_line) > 0) and (self.ignore_line in line)):
return ""
# Date,Payee,Category,Memo,Outflow,Inflow
out_line = ""
out_line += self.parse_date(line) + ","
out_line += self.parse_payee(line) + ","
out_line += "," # Category
out_line += "," # Memo
out_line += self.parse_outflow(line) + ","
out_line += self.parse_inflow(line) + "\n"
return out_line
class IcaLineConverter:
REGEXP_YEAR_MONTH_DAY = r"\d\d\d\d-\d\d-\d\d"
REGEXP_DAY_MONTHSTRING_YEAR = r"\d\d [a-ö]{3,3} \d\d\d\d"
FORMAT_YEAR_MONTH_DAY = "%Y-%m-%d"
FORMAT_DAY_MONTH_YEAR = "%d/%m/%Y"
FORMAT_DAY_MONTH_YEAR_SPACES = "%d %m %Y"
YEAR_MONTH_DAY_LENGTH = 11
def __init__(self, bank):
self.bank = bank
self.ignore_line = ""
self.regexp_date = self.REGEXP_YEAR_MONTH_DAY
self.format_date = self.FORMAT_YEAR_MONTH_DAY
self.convert_date_with_month_string = False
if "ica2" == self.bank:
self.transaction_position = 4
self.transaction_includes_currency = 'kr'
self.use_second_data = False
self.payee_position = 1
else:
raise Exception("Invalid bank" + self.bank)
def parse_outflow(self, line):
outflow = self.parse_transaction(line)
if '-' == outflow[0]:
return outflow[1:]
else:
return ""
def parse_inflow(self, line):
inflow = self.parse_transaction(line)
if '-' == inflow[0]:
return ""
else:
return inflow
def parse_transaction(self, line):
statement_items = line.split(';')
outflow = statement_items[self.transaction_position]
outflow = outflow.replace(',', '.')
outflow = outflow.replace(' ', '')
outflow = outflow.replace(self.transaction_includes_currency, '')
outflow = outflow.strip()
return outflow
def remove_date_from_payee(self, line):
regexp = re.compile(self.REGEXP_YEAR_MONTH_DAY)
matches = regexp.findall(line)
if len(matches) > 0:
return line[self.YEAR_MONTH_DAY_LENGTH:] # Remove date at the beginning of Payee
return line
def parse_payee(self, line):
statement_items = line.split(';')
payee = statement_items[self.payee_position] # Get Payee from list, date is stored in index 0
payee = payee.replace(',', '.')
payee = payee.replace('\\\\', ' ')
payee = payee.replace('\\', '')
payee = payee.strip() # Remove trailing with space
payee = self.remove_date_from_payee(payee)
return payee
def parse_date(self, line):
date_year_month_day = self._parse_year_month_day(line)
date_day_month_year = self._convert_date_string(date_year_month_day)
return date_day_month_year
def _parse_year_month_day(self, line):
regexp = re.compile(self.regexp_date)
matches = regexp.findall(line)
date_year_month_day = ""
if (len(matches) == 1):
date_year_month_day = matches[0]
elif (len(matches) == 2):
if self.use_second_data:
date_year_month_day = matches[1]
else:
date_year_month_day = matches[0]
else:
raise Exception("Invalid number of dates found in line: " + line)
return date_year_month_day
def _convert_date_with_month_string(self, extracted_date_as_string):
month_string = self._extract_month_string(extracted_date_as_string)
month_number = self._convert_month_string_to_month_number(month_string)
result = extracted_date_as_string.replace(month_string, month_number)
return result
def _convert_month_string_to_month_number(self, month_string):
if month_string == "jan":
return "01"
if month_string == "feb":
return "02"
if month_string == "mar":
return "03"
if month_string == "apr":
return "04"
if month_string == "maj":
return "05"
if month_string == "jun":
return "06"
if month_string == "jul":
return "07"
if month_string == "aug":
return "08"
if month_string == "sep":
return "09"
if month_string == "okt":
return "10"
if month_string == "nov":
return "11"
if month_string == "dec":
return "12"
raise Exception("Cannot convert month string to month number: " + month_string)
def _extract_month_string(self, extracted_date_as_string):
regexp = re.compile("[a-ö]{3,3}")
matches = regexp.findall(extracted_date_as_string)
month_string = matches[0]
return month_string
def _convert_date_string(self, extracted_date_as_string):
if self.convert_date_with_month_string:
extracted_date_as_string = self._convert_date_with_month_string(extracted_date_as_string)
extracted_date = time.strptime(extracted_date_as_string, self.format_date)
extracted_date_as_string_day_month_year = time.strftime(self.FORMAT_DAY_MONTH_YEAR, extracted_date)
return extracted_date_as_string_day_month_year
def convert_line(self, line):
if ((len(self.ignore_line) > 0) and (self.ignore_line in line)):
return ""
# Date,Payee,Category,Memo,Outflow,Inflow
out_line = ""
out_line += self.parse_date(line) + ","
out_line += self.parse_payee(line) + ","
out_line += "," # Category
out_line += "," # Memo
out_line += self.parse_outflow(line) + ","
out_line += self.parse_inflow(line) + "\n"
return out_line
def parse_command_line_arguments():
# Setup the argument parser
parser = argparse.ArgumentParser()
parser.add_argument("bank", help="valid banks: santander, skandia, ica")
parser.add_argument("input_file", help="text file with bank statement from the bank")
parser.add_argument("--output_file",
help="csv file to be consumed by YNAB (default: same name as input file but with .csv postfix)",
default=None)
args = parser.parse_args()
input_file = args.input_file
output_file = args.output_file
bank = args.bank
return input_file, output_file, bank
def main():
input_file, output_file, bank = parse_command_line_arguments()
output_file_name = OutputFileName()
if None == output_file:
output_file = output_file_name.create_output_file_name(input_file)
print("Input file.: {}".format(input_file))
print("Output file: {}".format(output_file))
print("Bank.......: {}".format(bank))
file_reader = FileReader(input_file)
file_writer = FileWriter(output_file)
statement_line_converter = GeneralLineConverter(bank)
statement_converter = StatementConverter(statement_line_converter, file_reader, file_writer)
statement_converter.convert()
if __name__ == '__main__':
main()
| StarcoderdataPython |
4852773 | <reponame>Daniela-Sanchez/ClienteServidor<filename>cssenv/Scripts/csaplication/Administrador/serializers.py
# ----------------------------- Librerias -----------------------------
from rest_framework import routers, serializers, viewsets
# ----------------------------- Modelos -----------------------------
from Administrador.models import Administrador
class AdministradorSerializers(serializers.ModelSerializer):
class Meta:
model = Administrador
fields = ('id', 'name', 'ap_pat', 'ap_mat', 'year') | StarcoderdataPython |
6540199 | <filename>tests/flattened_schema.py
FLATTENED_SCHEMA = {
("definitions", "location"): {
"type": "object",
"properties": {
"country": {"type": "string"},
"stateNumber": {"type": "integer"},
},
},
("properties", "coordinate", "items"): {
"type": "object",
"properties": {"lat": {"type": "number"}, "long": {"type": "number"}},
},
(): {
"type": "object",
"properties": {
"state": {"$ref": ("definitions", "location")},
"coordinates": {
"type": "array",
"items": {"$ref": ("properties", "coordinate", "items")},
},
"surroundingStates": {
"type": "object",
"patternProperties": {
"[A-Za-z0-9]{1,64}": {"$ref": ("definitions", "location")}
},
},
},
},
}
| StarcoderdataPython |
3513400 | <reponame>kev0960/HwaTu
import unittest
from game import HwaTu, Player, Card
class HwaTuTest(unittest.TestCase):
def setUp(self):
self.game = HwaTu()
def test_play_card_simple(self):
# Case 1
self.game.cards_on_pile = [Card(5)]
self.game.opened_cards = [Card(1), Card(2)]
earned_card = self.game.play_card((Card(3), Card(2)))
self.assertEqual(set(earned_card), {Card(2), Card(3)})
self.assertEqual(set(self.game.opened_cards), {Card(5), Card(1)})
# Case 2
self.game.cards_on_pile = [Card(3)]
self.game.opened_cards = [Card(0), Card(1)]
earned_card = self.game.play_card((Card(2), Card(0)))
self.assertEqual(set(earned_card), {Card(0), Card(1), Card(2), Card(3)})
self.assertEqual(set(self.game.opened_cards), set())
# Case 3
self.game.cards_on_pile = [Card(5)]
self.game.opened_cards = [Card(0), Card(6)]
earned_card = self.game.play_card((Card(1), Card(0)))
self.assertEqual(set(earned_card), {Card(0), Card(1), Card(5), Card(6)})
self.assertEqual(set(self.game.opened_cards), set())
# Case 4
self.game.cards_on_pile = [Card(5)]
self.game.opened_cards = [Card(0), Card(6)]
earned_card = self.game.play_card((Card(10), None))
self.assertEqual(set(earned_card), {Card(5), Card(6)})
self.assertEqual(set(self.game.opened_cards), {Card(0), Card(10)})
# Case 5
self.game.cards_on_pile = [Card(5)]
self.game.opened_cards = [Card(0), Card(1)]
earned_card = self.game.play_card((Card(10), None))
self.assertEqual(set(earned_card), set())
self.assertEqual(set(self.game.opened_cards), {Card(0), Card(1), Card(5), Card(10)})
# Case 6 - Bbuck
self.game.cards_on_pile = [Card(2)]
self.game.opened_cards = [Card(0), Card(5)]
earned_card = self.game.play_card((Card(1), None))
self.assertEqual(set(earned_card), set())
self.assertEqual(set(self.game.opened_cards), {Card(0), Card(1), Card(2), Card(5)})
# Case 7
self.game.cards_on_pile = [Card(5)]
self.game.opened_cards = [Card(0), Card(3)]
earned_card = self.game.play_card((Card(10), None))
self.assertEqual(set(earned_card), set())
self.assertEqual(set(self.game.opened_cards), {Card(0), Card(3), Card(5), Card(10)})
class PlayerTest(unittest.TestCase):
def setUp(self):
self.game = HwaTu()
self.player = Player(self.game, 0)
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
5107822 | #!/usr/bin/env python3
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test sync
#
from test_framework import BitcoinTestFramework
from authproxy import AuthServiceProxy, JSONRPCException
from util import *
import os
import shutil
import random
from threading import Thread
from queue import Queue
def mineSingleBlock(miningQueue):
while not miningQueue.empty():
taskObject = miningQueue.get()
taskCompleted = False
nodeToWorkOn = taskObject[0]
blockCount = taskObject[1]
while not taskCompleted:
try:
if blockCount > 0:
nodeToWorkOn.setgenerate(True,1)
blockCount -= 1
else:
taskCompleted = True
miningQueue.task_done()
except Exception as e:
# This exception is due to a failure to mine this specific block
dummyExceptionHandling = str(e)
return True
class SyncTest(BitcoinTestFramework):
def run_test(self):
# Mine 51 up blocks - by randomly asking nodes
nodeIdsToGenerateNextBlock = [random.randrange(len(self.nodes)) for j in range(51)]
numberOfBlocksPerNode = {i: nodeIdsToGenerateNextBlock.count(i) for i in nodeIdsToGenerateNextBlock}
nodeMiningQueues = [ Queue() ] * len(self.nodes)
for nodeId in range(len(self.nodes)):
nodeMiningQueues[nodeId].put((self.nodes[nodeId],numberOfBlocksPerNode[nodeId]))
for nodeThreadIndex in range(len(self.nodes)):
worker = Thread(target=mineSingleBlock,args=[nodeMiningQueues[nodeThreadIndex]] )
worker.setDaemon(True)
worker.start()
for qObj in nodeMiningQueues:
qObj.join()
sync_blocks(self.nodes)
self.nodes[1].setgenerate(True, 50)
sync_blocks(self.nodes)
bestBlockHash = self.nodes[0].getbestblockhash()
print("Block count totals {}".format(self.nodes[0].getblockcount()) )
for node in self.nodes[:1]:
assert_equal(node.getbestblockhash() , bestBlockHash)
if __name__ == '__main__':
SyncTest().main()
| StarcoderdataPython |
109189 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contribution',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('amount', models.DecimalField(null=True, max_digits=9, decimal_places=2, blank=True)),
('currency', models.CharField(default=b'USD', max_length=3, choices=[(b'AUD', 'Australia Dollar'), (b'BRL', 'Brazil Real'), (b'CAD', 'Canada Dollar'), (b'CHF', 'Switzerland Franc'), (b'CZK', 'Czech Republic Koruna'), (b'DKK', 'Denmark Krone'), (b'EUR', 'Euro Member Countries'), (b'GBP', 'United Kingdom Pound'), (b'HKD', 'Hong Kong Dollar'), (b'HUF', 'Hungary Forint'), (b'ILS', 'Israel Shekel'), (b'JPY', 'Japan Yen'), (b'MXN', 'Mexico Peso'), (b'MYR', 'Malaysia Ringgit'), (b'NOK', 'Norway Krone'), (b'NZD', 'New Zealand Dollar'), (b'PHP', 'Philippines Peso'), (b'PLN', 'Poland Zloty'), (b'SEK', 'Sweden Krona'), (b'SGD', 'Singapore Dollar'), (b'THB', 'Thailand Baht'), (b'TWD', 'Taiwan New Dollar'), (b'USD', 'United States Dollar')])),
('source', models.CharField(max_length=255, null=True)),
('source_locale', models.CharField(max_length=10, null=True)),
('uuid', models.CharField(max_length=255, null=True, db_index=True)),
('comment', models.CharField(max_length=255)),
('transaction_id', models.CharField(max_length=255, null=True, db_index=True)),
('paykey', models.CharField(max_length=255, null=True)),
('type', models.PositiveIntegerField(default=0, db_index=True, choices=[(0, 'Voluntary'), (1, 'Purchase'), (2, 'Refund'), (3, 'Chargeback'), (99, 'Other')])),
],
options={
'db_table': 'stats_contributions',
},
bases=(models.Model,),
),
]
| StarcoderdataPython |
3500478 | """
"""
from typing import List, Callable, NamedTuple
import argparse
import json
import logging
import os
from string import Template
import sys
import hashlib
import torch
from flask import Flask, request, Response, jsonify, render_template, send_from_directory
from flask_cors import CORS
from gevent.pywsgi import WSGIServer
from lm_explorer import LanguageModel
from lm_explorer.util.sampling import random_sample
logging.basicConfig(level=logging.INFO)
class BeamElement(NamedTuple):
score: float
prev_str: str
next_str: str
class ServerError(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
error_dict = dict(self.payload or ())
error_dict['message'] = self.message
return error_dict
def make_app(google_analytics_ua: str) -> Flask:
app = Flask(__name__) # pylint: disable=invalid-name
# We hash the javascript file and use it as a cache breaker
hasher = hashlib.md5()
app_js = open("static/app.js")
hasher.update(app_js.read().encode())
js_hash=hasher.hexdigest()
@app.errorhandler(ServerError)
def handle_invalid_usage(error: ServerError) -> Response: # pylint: disable=unused-variable
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.route('/')
def index() -> Response: # pylint: disable=unused-variable
return render_template(
'app.html',
google_analytics_ua=google_analytics_ua,
js_hash=js_hash
)
@app.route('/static/<path:path>')
def static_proxy(path: str) -> Response: # pylint: disable=unused-variable
return send_from_directory('static', path)
@app.route('/predict', methods=['POST', 'OPTIONS'])
def predict() -> Response: # pylint: disable=unused-variable
if request.method == "OPTIONS":
return Response(response="", status=200)
data = request.get_json()
previous_str = data["previous"]
next_str = data.get("next")
topk = data.get("topk", 10)
# Log the query
app.logger.info(f"<{previous_str}> <{next_str}>")
model_name = data.get("model_name", "gpt2/117M")
model = LanguageModel(model_name)
logits = model.predict(previous_str, next_str)
probabilities = torch.nn.functional.softmax(logits)
best_logits, best_indices = logits.topk(topk)
best_words = [model[idx.item()] for idx in best_indices]
best_probabilities = probabilities[best_indices].tolist()
# random sample
# random_id = random_sample(logits)
# random_word = model[random_id]
# random_word_logit = logits[random_id].item()
# random_word_probability = probabilities[random_id].item()
return jsonify({
"logits": best_logits.tolist(),
"probabilities": best_probabilities,
"words": best_words,
"output": previous_str + (next_str or "")
})
# This endpoint isn't used, so it's commented out. You can re-enable
# it by uncommenting it.
#
# @app.route('/random', methods=['POST', 'OPTIONS'])
# def random() -> Response: # pylint: disable=unused-variable
# if request.method == "OPTIONS":
# return Response(response="", status=200)
# data = request.get_json()
# previous_str = data["previous"]
# next_str = data.get("next", None)
# topk = data.get("topk", 10)
# num_steps = data.get('numsteps', 1)
# temperature = data.get("temperature", 1.0)
# logits = model.predict(previous_str, next_str)
# probabilities = torch.nn.functional.softmax(logits / temperature)
# samples = torch.multinomial(probabilities, num_samples=topk, replacement=False)
# outputs = [(f"{previous_str}{next_str or ''}", model[idx.item()]) for idx in samples]
# for _ in range(num_steps - 1):
# new_outputs = []
# for p, n in outputs:
# logits = model.predict(p, n)
# probabilities = torch.nn.functional.softmax(logits / temperature)
# random_id = random_sample(logits / temperature)
# random_word = model[random_id]
# random_word_logit = logits[random_id].item()
# random_word_probability = probabilities[random_id].item()
# new_outputs.append((f"{p}{n}", random_word))
# outputs = new_outputs
# return jsonify({
# "previous": previous_str,
# "words": [f"{p}{n}" for p, n in outputs],
# "logits": [0 for _ in outputs],
# "probabilities": [0 for _ in outputs]
# })
# This endpoint isn't used, so it's commented out. You can re-enable
# it by uncommenting it.
#
# @app.route('/beam', methods=['POST', 'OPTIONS'])
# def beam() -> Response: # pylint: disable=unused-variable
# if request.method == "OPTIONS":
# return Response(response="", status=200)
# data = request.get_json()
# previous_str = data["previous"]
# next_str = data.get("next", "")
# topk = data.get("topk", 10)
# num_steps = data['numsteps']
# def candidates(s1: str = "", s2: str = None, score: float = 0.0) -> List[BeamElement]:
# logits = model.predict(previous_str + s1, s2)
# log_probabilities = torch.nn.functional.log_softmax(logits) + score
# best_log_probabilities, best_indices = log_probabilities.topk(topk)
# new_str = s1 if s2 is None else s1 + s2
# beam = [BeamElement(lp.item() + score, new_str, model[idx.item()])
# for lp, idx in zip(best_log_probabilities, best_indices)]
# return beam
# # Initial step
# beam = candidates(next_str)
# for i in range(num_steps - 1):
# new_beam: List[BeamElement] = []
# for element in beam:
# new_beam.extend(candidates(element.prev_str, element.next_str, element.score))
# new_beam.sort(key=lambda elt: elt.score, reverse=True)
# beam = new_beam[:topk]
# return jsonify({
# "previous": previous_str,
# "words": [elt.prev_str + elt.next_str for elt in beam],
# "logits": [elt.score for elt in beam],
# "probabilities": [elt.score for elt in beam]
# })
return app
def main(args):
# Executing this file with no extra options runs the simple service with the bidaf test fixture
# and the machine-comprehension predictor. There's no good reason you'd want
# to do this, except possibly to test changes to the stock HTML).
parser = argparse.ArgumentParser(description='Serve up a simple model')
parser.add_argument('--port', type=int, default=8000, help='port to serve the demo on')
parser.add_argument('--dev', action='store_true', help='if true launch flask so that the server restarted as changes occur to the template')
args = parser.parse_args(args)
app = make_app(google_analytics_ua=os.environ.get(
"GOOGLE_ANALYTICS_UA",
"UA-120916510-5" # Defaults to the development / staging UA
))
CORS(app)
if args.dev:
app.debug = True
app.run(port=args.port, host='0.0.0.0')
print(f"Serving demo on port {args.port}")
else:
http_server = WSGIServer(('0.0.0.0', args.port), app, log=sys.stdout)
print(f"Serving demo on port {args.port}")
http_server.serve_forever()
#
# HTML and Templates for the default bare-bones app are below
#
_HTML = """
"""
if __name__ == "__main__":
main(sys.argv[1:])
| StarcoderdataPython |
4984195 | # -*- coding: utf-8 -*-
"""
imapy.structures
~~~~~~~~~~~~~~~~
This module contains data structures used by Imapy
:copyright: (c) 2015 by <NAME>.
:license: MIT, see LICENSE for more details.
"""
class CaseInsensitiveDict(dict):
"""Case-insensitive dictionary object"""
def __init__(self, **kwargs):
super(CaseInsensitiveDict, self).__init__(self)
def __setitem__(self, key, value):
super(CaseInsensitiveDict, self).__setitem__(key.lower(), value)
def __getitem__(self, key):
return super(CaseInsensitiveDict, self).__getitem__(key.lower())
| StarcoderdataPython |
5074867 | <filename>avionics/bootloader/bootloader_client_test.py
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from makani.avionics.bootloader import bootloader_client
from makani.avionics.common import aio
from makani.avionics.firmware.identity import identity_types
from makani.avionics.network import aio_labels
from makani.avionics.network import aio_node
from makani.lib.python import c_helpers
import mock
hardware_type_helper = c_helpers.EnumHelper('HardwareType', identity_types)
class ParseArgumentsTest(unittest.TestCase):
def RunParse(self, arg_string):
argv = ['bootloader_client.py'] + arg_string.split()
with mock.patch.object(sys, 'argv', argv):
return bootloader_client.ParseArguments()
def testBatt(self):
parsed_args = self.RunParse('--target batt_a batt_application.elf')
args = parsed_args['args']
self.assertEqual(parsed_args['file'], 'batt_application.elf')
self.assertEqual(args.force_hardware, None)
self.assertEqual(parsed_args['cur_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeBattA))
self.assertEqual(parsed_args['new_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeBattA))
self.assertEqual(parsed_args['cur_node_index'], aio_labels.kBattA)
self.assertEqual(parsed_args['new_node_index'],
parsed_args['cur_node_index'])
self.assertEqual(parsed_args['update_type'], 'Application')
self.assertFalse(args.dump_image)
self.assertFalse(args.ignore_mismatch)
def testCoreSwitch(self):
parsed_args = self.RunParse('--target cs_a cs_application.elf')
args = parsed_args['args']
self.assertEqual(args.target, 'cs_a')
self.assertEqual(parsed_args['file'], 'cs_application.elf')
self.assertEqual(args.force_hardware, None)
self.assertEqual(parsed_args['cur_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeCsA))
self.assertEqual(parsed_args['new_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeCsA))
self.assertEqual(parsed_args['cur_node_index'], aio_labels.kCoreSwitchA)
self.assertEqual(parsed_args['new_node_index'],
parsed_args['cur_node_index'])
self.assertEqual(parsed_args['update_type'], 'Application')
self.assertFalse(args.dump_image)
self.assertFalse(args.ignore_mismatch)
def testCoreSwitchGroundStation(self):
parsed_args = self.RunParse('--target cs_gs_a cs_application.elf')
args = parsed_args['args']
self.assertEqual(args.target, 'cs_gs_a')
self.assertEqual(parsed_args['file'], 'cs_application.elf')
self.assertEqual(args.force_hardware, None)
self.assertEqual(parsed_args['cur_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeCsGsA))
self.assertEqual(parsed_args['new_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeCsGsA))
self.assertEqual(parsed_args['cur_node_index'], aio_labels.kCoreSwitchGsA)
self.assertEqual(parsed_args['new_node_index'],
parsed_args['cur_node_index'])
self.assertEqual(parsed_args['update_type'], 'Application')
self.assertFalse(args.dump_image)
self.assertFalse(args.ignore_mismatch)
def testDrum(self):
parsed_args = self.RunParse('--target drum_sensors_a drum_application.elf')
args = parsed_args['args']
self.assertEqual(args.target, 'drum_sensors_a')
self.assertEqual(parsed_args['file'], 'drum_application.elf')
self.assertEqual(args.force_hardware, None)
self.assertEqual(
parsed_args['cur_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeDrumSensorsA))
self.assertEqual(
parsed_args['new_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeDrumSensorsA))
self.assertEqual(parsed_args['cur_node_index'], 0)
self.assertEqual(parsed_args['new_node_index'],
parsed_args['cur_node_index'])
self.assertEqual(parsed_args['update_type'], 'Application')
self.assertFalse(args.dump_image)
self.assertFalse(args.ignore_mismatch)
def testFlightComputer(self):
parsed_args = self.RunParse('--target fc_b fc_application.elf')
args = parsed_args['args']
self.assertEqual(args.target, 'fc_b')
self.assertEqual(parsed_args['file'], 'fc_application.elf')
self.assertEqual(args.force_hardware, None)
self.assertEqual(parsed_args['cur_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeFcB))
self.assertEqual(parsed_args['new_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeFcB))
self.assertEqual(parsed_args['cur_node_index'], aio_labels.kControllerB)
self.assertEqual(parsed_args['new_node_index'],
parsed_args['cur_node_index'])
self.assertEqual(parsed_args['update_type'], 'Application')
self.assertFalse(args.dump_image)
self.assertFalse(args.ignore_mismatch)
def testGps(self):
parsed_args = self.RunParse(
'--target gps_base_station gps_application.elf')
args = parsed_args['args']
self.assertEqual(args.target, 'gps_base_station')
self.assertEqual(parsed_args['file'], 'gps_application.elf')
self.assertEqual(args.force_hardware, None)
self.assertEqual(parsed_args['cur_ip'],
aio.AioNodeToIpAddressString(
aio_node.kAioNodeGpsBaseStation))
self.assertEqual(parsed_args['new_ip'],
aio.AioNodeToIpAddressString(
aio_node.kAioNodeGpsBaseStation))
self.assertEqual(parsed_args['cur_node_index'], 0)
self.assertEqual(parsed_args['new_node_index'],
parsed_args['cur_node_index'])
self.assertEqual(parsed_args['update_type'], 'Application')
self.assertFalse(args.dump_image)
self.assertFalse(args.ignore_mismatch)
def testMotor(self):
parsed_args = self.RunParse('--target motor_pti motor_application.elf')
args = parsed_args['args']
self.assertEqual(args.target, 'motor_pti')
self.assertEqual(parsed_args['file'], 'motor_application.elf')
self.assertEqual(args.force_hardware, None)
self.assertEqual(parsed_args['cur_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeMotorPti))
self.assertEqual(parsed_args['new_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeMotorPti))
self.assertEqual(parsed_args['cur_node_index'], aio_labels.kMotorPti)
self.assertEqual(parsed_args['new_node_index'],
parsed_args['cur_node_index'])
self.assertEqual(parsed_args['update_type'], 'Application')
self.assertFalse(args.dump_image)
self.assertFalse(args.ignore_mismatch)
def testPlatform(self):
parsed_args = self.RunParse(
'--target platform_sensors_a platform_application.elf')
args = parsed_args['args']
self.assertEqual(args.target, 'platform_sensors_a')
self.assertEqual(parsed_args['file'], 'platform_application.elf')
self.assertEqual(args.force_hardware, None)
self.assertEqual(parsed_args['cur_ip'],
aio.AioNodeToIpAddressString(
aio_node.kAioNodePlatformSensorsA))
self.assertEqual(parsed_args['new_ip'],
aio.AioNodeToIpAddressString(
aio_node.kAioNodePlatformSensorsA))
self.assertEqual(parsed_args['cur_node_index'], 0)
self.assertEqual(parsed_args['new_node_index'],
parsed_args['cur_node_index'])
self.assertEqual(parsed_args['update_type'], 'Application')
self.assertFalse(args.dump_image)
self.assertFalse(args.ignore_mismatch)
def testRecorderTms570(self):
parsed_args = self.RunParse(
'--target recorder_tms570_platform recorder_application.elf')
args = parsed_args['args']
self.assertEqual(args.target, 'recorder_tms570_platform')
self.assertEqual(parsed_args['file'], 'recorder_application.elf')
self.assertEqual(args.force_hardware, None)
self.assertEqual(
parsed_args['cur_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeRecorderTms570Platform))
self.assertEqual(
parsed_args['new_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeRecorderTms570Platform))
self.assertEqual(parsed_args['cur_node_index'],
aio_labels.kRecorderTms570Platform)
self.assertEqual(parsed_args['new_node_index'],
parsed_args['cur_node_index'])
self.assertEqual(parsed_args['update_type'], 'Application')
self.assertFalse(args.dump_image)
self.assertFalse(args.ignore_mismatch)
def testServo(self):
parsed_args = self.RunParse(
'--target servo_e2 servo_application.elf')
args = parsed_args['args']
self.assertEqual(args.target, 'servo_e2')
self.assertEqual(parsed_args['file'], 'servo_application.elf')
self.assertEqual(args.force_hardware, None)
self.assertEqual(parsed_args['cur_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeServoE2))
self.assertEqual(parsed_args['new_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeServoE2))
self.assertEqual(parsed_args['cur_node_index'], aio_labels.kServoE2)
self.assertEqual(parsed_args['new_node_index'],
parsed_args['cur_node_index'])
self.assertEqual(parsed_args['update_type'], 'Application')
self.assertFalse(args.dump_image)
self.assertFalse(args.ignore_mismatch)
def testDump(self):
parsed_args = self.RunParse(
'--target servo_e2 --dump_image servo_application.elf')
args = parsed_args['args']
self.assertEqual(args.target, 'servo_e2')
self.assertEqual(parsed_args['file'], 'servo_application.elf')
self.assertEqual(args.force_hardware, None)
self.assertEqual(parsed_args['cur_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeServoE2))
self.assertEqual(parsed_args['new_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeServoE2))
self.assertEqual(parsed_args['cur_node_index'], aio_labels.kServoE2)
self.assertEqual(parsed_args['new_node_index'],
parsed_args['cur_node_index'])
self.assertEqual(parsed_args['update_type'], 'Application')
self.assertTrue(args.dump_image)
def testBadUpdateType(self):
with self.assertRaises(ValueError):
self.RunParse('--target fc_c '
'tms570-bin/avionics/bootloader/firmware/bootloader.elf')
def testFcConfigParamsSuccess(self):
parsed_args = self.RunParse('--target fc_c --config fc_config_params.bin')
args = parsed_args['args']
self.assertEqual(args.target, 'fc_c')
self.assertEqual(parsed_args['file'], 'fc_config_params.bin')
self.assertEqual(parsed_args['cur_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeFcC))
self.assertEqual(parsed_args['new_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeFcC))
self.assertEqual(parsed_args['cur_node_index'], aio_labels.kFlightComputerC)
self.assertEqual(parsed_args['new_node_index'],
parsed_args['cur_node_index'])
self.assertEqual(parsed_args['update_type'], 'ConfigParams')
self.assertFalse(args.dump_image)
self.assertFalse(args.ignore_mismatch)
def testFcCalibParamsSuccess(self):
parsed_args = self.RunParse(
'--target fc_c rev_a3_fc_calib_params.bin --calib')
args = parsed_args['args']
self.assertEqual(args.target, 'fc_c')
self.assertEqual(parsed_args['file'], 'rev_a3_fc_calib_params.bin')
self.assertEqual(parsed_args['cur_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeFcC))
self.assertEqual(parsed_args['new_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeFcC))
self.assertEqual(parsed_args['cur_node_index'], aio_labels.kFlightComputerC)
self.assertEqual(parsed_args['new_node_index'],
parsed_args['cur_node_index'])
self.assertEqual(parsed_args['update_type'], 'CalibParams')
self.assertFalse(args.dump_image)
self.assertFalse(args.ignore_mismatch)
def testBootloaderSuccess(self):
parsed_args = self.RunParse(
'--target motor_sbo --bootloader bootloader.elf')
args = parsed_args['args']
self.assertEqual(args.target, 'motor_sbo')
self.assertEqual(parsed_args['file'], 'bootloader.elf')
self.assertEqual(parsed_args['cur_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeMotorSbo))
self.assertEqual(parsed_args['new_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeMotorSbo))
self.assertEqual(parsed_args['cur_node_index'], aio_labels.kMotorSbo)
self.assertEqual(parsed_args['new_node_index'],
parsed_args['cur_node_index'])
self.assertEqual(parsed_args['update_type'], 'Bootloader')
self.assertFalse(args.dump_image)
self.assertFalse(args.ignore_mismatch)
def testIgnoreMismatch(self):
parsed_args = self.RunParse(
'--target motor_sbo --ignore_mismatch motor_application.elf')
args = parsed_args['args']
self.assertEqual(args.target, 'motor_sbo')
self.assertEqual(parsed_args['file'], 'motor_application.elf')
self.assertEqual(parsed_args['cur_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeMotorSbo))
self.assertEqual(parsed_args['new_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeMotorSbo))
self.assertEqual(parsed_args['cur_node_index'], aio_labels.kMotorSbo)
self.assertEqual(parsed_args['new_node_index'],
parsed_args['cur_node_index'])
self.assertEqual(parsed_args['update_type'], 'Application')
self.assertFalse(args.dump_image)
self.assertTrue(args.ignore_mismatch)
def testBootloaderOverrideTarget(self):
parsed_args = self.RunParse(
'--target fc_a --bootloader --override_target gps_base_station'
' bootloader.elf')
args = parsed_args['args']
self.assertEqual(args.target, 'fc_a')
self.assertEqual(parsed_args['file'], 'bootloader.elf')
self.assertEqual(parsed_args['cur_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeFcA))
self.assertEqual(parsed_args['new_ip'],
aio.AioNodeToIpAddressString(
aio_node.kAioNodeGpsBaseStation))
self.assertEqual(parsed_args['cur_node_index'], aio_labels.kFlightComputerA)
self.assertEqual(parsed_args['new_node_index'], aio_labels.kGpsBaseStation)
self.assertEqual(parsed_args['update_type'], 'Bootloader')
self.assertFalse(args.dump_image)
self.assertFalse(args.ignore_mismatch)
def testOverrideTargetWithoutBootloaderFlag(self):
with self.assertRaises(ValueError):
self.RunParse('--target fc_a fc_application.bin --override_target fc_c')
def testBootloaderWithoutBootloaderFlag(self):
with self.assertRaises(ValueError):
self.RunParse('--target motor_sbo bootloader.bin')
def testParamsBadExtension(self):
with self.assertRaises(ValueError):
self.RunParse('--target fc_c --config fc_params.elf')
def testDumpWithoutElf(self):
with self.assertRaises(ValueError):
self.RunParse('--target fc_c --dump_image fc_application.bin')
def testCalibParamsWithoutCalibFlag(self):
with self.assertRaises(ValueError):
self.RunParse('--target fc_c fc_calib_params.bin')
def testConfigParamsWithCalibFlag(self):
with self.assertRaises(ValueError):
self.RunParse('--target fc_c fc_config_params.bin --calib')
def testNonParamsWithCalibFlag(self):
with self.assertRaises(ValueError):
self.RunParse('--target fc_c fc_application.elf --calib')
def testBootloaderForceHardwareFc(self):
parsed_args = self.RunParse(
'--target cs_a --bootloader --force_hardware=fc cs_bootloader.elf')
args = parsed_args['args']
self.assertEqual(args.target, 'cs_a')
self.assertEqual(parsed_args['file'], 'cs_bootloader.elf')
self.assertEqual(args.force_hardware, 'fc')
self.assertEqual(parsed_args['cur_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeCsA))
self.assertEqual(parsed_args['new_ip'],
aio.AioNodeToIpAddressString(aio_node.kAioNodeCsA))
self.assertEqual(parsed_args['cur_node_index'], aio_labels.kCoreSwitchA)
self.assertEqual(parsed_args['new_node_index'], aio_labels.kCoreSwitchA)
self.assertEqual(parsed_args['update_type'], 'Bootloader')
self.assertFalse(args.dump_image)
self.assertFalse(args.ignore_mismatch)
def testBootloaderForceHardwareOld(self):
with self.assertRaises(ValueError):
self.RunParse(
'--target cs_a cs_bootloader.elf --bootloader --force_hardware=old')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1865533 | # -*- coding: utf-8 -*-
# flake8: noqa
"""Remo dataset module.
This module contains functions to work with REMO datasets.
"""
# flake8: noqa
from . import cal, codes
from .cal import parse_dates
def preprocess(ds, use_cftime=False):
"""preprocessing for opening with xr.open_mfdataset
This function can be used as the preprocess function for
opening a REMO dataset with xr.open_mfdataset. The function
will update meta information according to the REMO code table
and also parse the time axis if it contains absolute times.
"""
ds = update_meta_info(ds)
try:
return parse_dates(ds, use_cftime)
except:
return ds
def open_remo_mfdataset(filenames, update_meta=False, parse_dates=False):
import xarray as xr
ds = xr.open_mfdataset(filenames)
if update_meta:
ds = update_meta_infos(ds)
if parse_dates:
ds = parse_dates(ds)
return ds
def open_remo_dataset(
filename,
options="-f nc4",
update_meta=False,
returnX=True,
inplace=False,
parse_time=False,
**kwargs
):
"""Read a REMO dataset.
Read in a REMO dataset into xarray.Dataset or netCDF4.Dataset from IEG or NetCDF and
provide meta data.
Parameters
----------
filename : str
Filename of IEG or NetCDF file.
update_meta: bool
Update variable meta information of the dataset from REMO tables.
returnX : bool
Return an xarray.Dataset. If False, use netCDF4 Dataset.
inplace: bool
Update meta info on disk, only useful for netCDF4 Datasets.
parse_time: bool
Parse absolute time axis into datetime objects.
Returns
-------
dataset
Returns an xarray.Dataset or netCDF4.Dataset.
"""
format = _get_fileformat(filename)
# print(format)
if "NetCDF" in format and not options:
ds = _read_nc_dataset(filename, returnX=returnX, inplace=inplace, **kwargs)
elif "IEG" in format:
if returnX:
ds = _read_with_cdo(filename, options, returnXDataset=True)
else:
ds = _read_with_cdo(filename, options, returnCdf=not inplace)
if inplace:
ds = _read_nc_dataset(ds, returnX=False, inplace=inplace)
else:
ds = _read_nc_dataset(filename, returnX, **kwargs)
if update_meta:
ds = update_meta_info(ds)
if parse_time is True:
ds = cal.parse_dates(ds)
return ds
def _read_nc_dataset(filename, returnX=True, inplace=False, **kwargs):
"""Use xarray or netCDF4 to read NetCDF."""
if returnX:
import xarray as xr
if type(filename) is list:
return xr.open_mfdataset(filename, **kwargs)
return xr.open_dataset(filename, **kwargs)
else:
from netCDF4 import Dataset, MFDataset
if inplace:
mode = "a"
else:
mode = "r"
if type(filename) is list:
return MFDataset(filename, mode="r")
return Dataset(filename, mode="a")
def _read_with_cdo(filename, options="", **kwargs):
"""uses cdo to read unknown file format."""
from cdo import Cdo
return Cdo().copy(options=options, input=filename, **kwargs)
def _get_fileformat(filename):
""""""
try:
from cdo import Cdo
return Cdo().showformat(input=filename)[0]
except:
return "Unknown"
def update_meta_info(ds, id=None):
"""Updates meta info of a dataset.
Updates variable names and attributes in an xarray Dataset
based on Remo table.
Parameters
----------
ds : dataset
The dataset in which meta info should be updated.
Returns
-------
dataset
The dataset with updated meta information.
"""
meta = {var: _get_meta_info(ds[var]) for var in ds.variables}
renames = {var: info["variable"] for var, info in meta.items() if info if not None}
ds = _update_attrs(ds, meta)
ds = _rename_ds(ds, renames)
return ds
def _get_meta_info(da, id=None):
"""Get meta info for a netcdf variable."""
if id:
attrs = codes.get_dict(id)
else:
try:
attrs = codes.get_dict(da.name)
except:
try:
attrs = codes.get_dict(da.code)
except:
attrs = None
return attrs
def _rename_ds(ds, renames):
"""Rename variables in a dataset."""
try: # xarray
return ds.rename(renames)
except: # netCDF4
for old, new in renames.items():
if old != new:
ds.renameVariable(old, new)
return ds
def _update_attrs(ds, meta):
"""Update variable attributes in a dataset."""
for var, info in meta.items():
if info:
filter_info = {key: value for key, value in info.items() if value}
# print(filter_info)
try: # xarray
ds[var].attrs.update(filter_info)
except: # netCDF4
ds[var].setncatts(filter_info)
return ds
| StarcoderdataPython |
12849959 | <reponame>agnisain123/CodeChef-1<filename>Beginner/Easy Math/easy_math.py
t=int(input())
for _ in range(t):
n=int(input())
a=list(map(int, input().split()))
max_sum=0
for j in range(n):
for k in range(j+1, n):
num=a[j]*a[k]
add=0
while(num!=0):
add+=num%10
num=num//10
if max_sum<add:
max_sum=add
print(max_sum)
| StarcoderdataPython |
5170153 | <gh_stars>0
"""
用户你好!
lightmysql是一个可以简单地使用Python操作MySQL数据库的扩展。
我们的主要功能是根据Python传入的列表和字典生成MySQL语言,并通过pymysql提交。
由于以轻量为目标,我们暂时保留了INSERT SELECT UPDATE DELETE四条语句,创建库、表等操作没有写入。
针对未适配的操作,你可以通过run_code()函数手动编写SQL语句、MySQL客户端或使用图形化软件操作。
下面将介绍各个函数的详细功能和传参方式。
我们假设我们的MySQL中存在一个名为yxzl的数据库,其中有一个名为users的数据表,
表中有两个字段:name(TEXT) 和 age(int)
依赖安装:
pip3 install lightmysql
"""
# 让我们首先导入包
import lightmysql
# 下面让我们连接到数据库,并选定要操作的库名称为yxzl
conn = lightmysql.Connect(host="127.0.0.1", user="root", password="", database="yxzl", port=3306, charset="utf8")
# 插入一些数据用于测试
conn.insert("users", {"name": "user1", "age": 15})
conn.insert("users", {"name": "user2", "age": 20})
conn.insert("users", {"name": "user3", "age": 100})
# 等价SQL(第一个insert的):
# INSERT INTO users name, age VALUES 'user1', 15;
# 下面的代码用于查询数据
# 这个写法可以获取数据表(test)中的所有记录
print(conn.get("test"))
# 等价SQL:SELECT * FROM test;
# 输出:[('user1', 15), ('user2', 20), ('user3', 100)]
# 查询记录的一个(或多个)字段
# target指定返回被选中记录需要返回的字段
print(conn.get("test", target=["age"]))
# 等价SQL:SELECT age FROM test;
# 输出:[(15), (20), (100)]
# 包含查询条件(WHERE子句)的查询
print(conn.get("test", target=["age"], condition={"name": "user1"}))
# 等价SQL:SELECT age FROM test WHERE name='user1';
# 输出:[(15)]
print(conn.get("test", condition={"age": 20}))
# 等价SQL:SELECT * FROM test WHERE age=20;
# 输出:[('user2', 20)]
# 提示:虽然对于int类型的数据在定义或书写查询条件时SQL语言支持两旁带引号的数值,
# 但是lightmysql由于Python字符串转义需要不支持。
print(conn.get("test", condition={"age": [20, 100]}))
# 等价SQL:SELECT * FROM test WHERE (age=20 or age=100);
# 输出:[('user2', 20), ('user3', 100)]
print(conn.get("test", condition={"name": "user2", "age": 20}))
# 等价SQL:SELECT * FROM test WHERE name='user2' and age=20;
# 输出:[('user2', 20)]
print(conn.get("test", condition={"name": "user2", "age": 100}, condition_sp="or"))
# 等价SQL:SELECT * FROM test WHERE name='user2' or age=100;
# 输出:[('user2', 20), ('user3', 100)]
# 暂不支持更复杂的条件关系。
# upsate和delete的WHERE子句于此处完全相同。
# 下面介绍update
conn.update("test", changes={"age": 50}, condition={"name": "user3"})
# 等价SQL:UPDATE test SET age=50 WHERE name='user3';
# 即changes里面存储的是需要更新的字段和新的值,
# condition对应WHERE子句的生成,规则与get一样。
# DELETE
conn.delete("test", condition={"name": "user1"})
# 等价SQL:DELETE FROM test WHERE name='user1';
# 非常简单,只需要传入condition生成WHERE子句,规则与前文相同。
# 重启
conn.restart()
# 由于MySQL服务器恶心的8小时一清session规则,导致每八小时需要重连一次。
# 在请求过多时也有可能造成堵塞,需要重启解决。
# 目前我们采取最简单的异常捕获,未来将使用连接池等方式避免问题出现。
# 关闭连接
conn.close()
| StarcoderdataPython |
5065929 | #PDF imports
from pdf2image import convert_from_path
import IPython
from IPython.display import Image
from IPython.display import display
import cv2
import numpy as np
# HTML imports
from IPython.core.display import HTML
import re
def loadpdf(pdfname, dpi=1000):
images = convert_from_path(pdfname, dpi=dpi)
return np.array(images[0])
def convert(pdfname, dpi=1000):
img = loadpdf(pdfname, dpi)
cv2.imwrite(pdfname.replace('.pdf', '.png'), cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
return pdfname.replace('.pdf', '.png')
def displaypdf(pdfname):
filename = convert(pdfname)
IPython.display.display(Image(filename))
def loadhtml(html_file):
with open(html_file) as f:
html = f.read()
figure = (re.findall(r"<body>(.*)</body>", html, re.DOTALL)[0])
table = {
"module": "position: absolute; ",
"title-container": "position: absolute; margin-top: 0; margin-bottom: 0;display: flex; align-items: center; justify-content: center; ",
"title-content": "margin-top: 0; margin-bottom: 0;",
"element": "position: absolute; margin: 0; "
}
for c, s in table.items():
figure = figure.replace(f'class="{c}" style="', f'class="{c}" style="{s}')
figure = figure.replace(f'class="{c}"', '')
return figure
def displayhtml(html_file):
display(HTML(data=loadhtml(html_file))) | StarcoderdataPython |
145905 | import numpy as np
from VariableUnittest import VariableUnitTest
from gwlfe.Output.Loading import StreamBankNSum
class TestStreamBankNSum(VariableUnitTest):
def test_StreamBankNSum(self):
z = self.z
np.testing.assert_array_almost_equal(
StreamBankNSum.StreamBankNSum_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area,
z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.CN,
z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z.MaxWaterCap, z.SatStor_0,
z.RecessionCoef, z.SeepCoef, z.Qretention, z.PctAreaInfil, z.n25b,
z.Landuse,
z.TileDrainDensity, z.PointFlow, z.StreamWithdrawal, z.GroundWithdrawal,
z.NumAnimals, z.AvgAnimalWt, z.StreamFlowVolAdj, z.SedAFactor_0, z.AvKF,
z.AvSlope, z.SedAAdjust, z.StreamLength, z.n42b, z.AgLength,
z.UrbBankStab, z.SedNitr, z.BankNFrac, z.n69c, z.n45, z.n69, z.n46c, z.n42),
StreamBankNSum.StreamBankNSum(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area,
z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.CN,
z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z.MaxWaterCap, z.SatStor_0,
z.RecessionCoef, z.SeepCoef, z.Qretention, z.PctAreaInfil, z.n25b, z.Landuse,
z.TileDrainDensity, z.PointFlow, z.StreamWithdrawal, z.GroundWithdrawal,
z.NumAnimals, z.AvgAnimalWt, z.StreamFlowVolAdj, z.SedAFactor_0, z.AvKF,
z.AvSlope, z.SedAAdjust, z.StreamLength, z.n42b, z.AgLength,
z.UrbBankStab, z.SedNitr, z.BankNFrac, z.n69c, z.n45, z.n69, z.n46c, z.n42),
decimal=7)
| StarcoderdataPython |
9783027 | <gh_stars>0
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class TrackerFilter(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
chart_type = models.IntegerField(default=1)
number_of_data = models.IntegerField(default=7) | StarcoderdataPython |
8145429 | <filename>geo/geophys/gpr/metadata.py<gh_stars>0
from os.path import isfile, join
from collections import OrderedDict
import pandas as pd
class MetaData:
"""A base class to create a csv for metadata logging and loading. The attributes of the metadata are defined by the child class.
Attributes:
path <str>: A filesystem path to the required csv location;
cols <list>: A list of strings representing the header columns of
the csv.
"""
def __init__(self, rdir, name, columns):
self.csv = csv = join(rdir, name + '.csv')
self.name = name
cols = self._columns(columns)
if not isfile(csv):
df = pd.DataFrame(columns=cols)
self.write(df)
def _columns(self, columns):
"""Add an 'id' and 'note' field as standard to the list of column names."""
c = ['id'] + columns + [self.name + '_note']
return(c)
def write(self, df):
""""""
df.to_csv(self.csv, index=False)
def read(self):
""""""
df = pd.read_csv(self.csv)
return(df)
def if_empty(self, bf=None):
df = self.read()
if df.empty:
df['id'] = bf['id']
self.write(df)
| StarcoderdataPython |
285506 | <reponame>BeorEdain/BillMe
class Bills:
"""A class that is used to build the Bills"""
def __init__(self, digest):
self.title = digest.get("title")
self.short_title = digest.get("shortTitle")
self.collection_code = digest.get("collectionCode")
self.collection_name = digest.get("collectionName")
self.category = digest.get("category")
self.date_issued = digest.get("dateIssued")
self.details_link = digest.get("detailsLink")
self.package_ID = digest.get("packageId")
self.download = digest.get("download")
self.related = digest.get("related")
self.branch = digest.get("branch")
self.pages = digest.get("pages")
self.government_author_1 = digest.get("governmentAuthor1")
self.government_author_2 = digest.get("governmentAuthor2")
self.SuDoc_class_number = digest.get("suDocClassNumber")
self.bill_type = digest.get("billType")
self.congress = digest.get("congress")
self.origin_chamber = digest.get("originChamber")
self.current_chamber = digest.get("currentChamber")
self.session = digest.get("session")
self.bill_number = digest.get("billNumber")
self.bill_version = digest.get("billVersion")
self.is_appropriation = digest.get("isAppropriation")
self.is_private = digest.get("isPrivate")
self.publisher = digest.get("publisher")
self.committees = digest.get("committees")
self.members = digest.get("members")
self.other_identifier = digest.get("otherIdentifier")
self.references = digest.get("references")
self.last_modified = digest.get("lastModified") | StarcoderdataPython |
1668138 | from typing import List
from pygls.lsp.types import Model
class LanguageServerConfiguration(Model): # type: ignore
enable_lint_on_save: bool
enable_code_action: bool
lint_targets: List[str]
format_targets: List[str]
@classmethod
def default(cls) -> "LanguageServerConfiguration":
return cls(
enable_lint_on_save=True,
enable_code_action=True,
lint_targets=["lint"],
format_targets=["format", "lint"],
)
| StarcoderdataPython |
366319 | """
A watchdog is a little piece of software that monitors our filesystem looking for any changes (like the creation,
change or deletion of a file or of a directory). When a change occurs, the watchdog report it to us raising a
specific event that we can handle.
For example, let’s suppose you have developed a program that use a configuration file.
Your program could set a watchdog to monitor that file and if the configuration file is modified you could
think to reload it and apply the new configuration at runtime, without the need of restarting your program.
Concept/Code: <NAME> (<EMAIL>)
github repository: https://github.com/sangamsyabil/hemAutomation
Happy coding !!!
"""
import argparse
import os
import time
from datetime import datetime
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
def current_time():
now = datetime.now()
return now.strftime("%m/%d/%Y, %H:%M:%S")
def on_created(event):
print(">> [{}] {} has been created!".format(current_time(), event.src_path))
def on_deleted(event):
print(">> [{}] Please watch out! Someone deleted {}!".format(current_time(), event.src_path))
def on_modified(event):
print(">> [{}] {} has been modified".format(current_time(), event.src_path))
def on_moved(event):
print(">> [{}] Someone moved {} to {}".format(current_time(), event.src_path, event.dest_path))
def parse_arguments():
parser = argparse.ArgumentParser(description='Filesystem check using watchdog')
parser.add_argument('--path', type=dir_path, default=os.path.join(default_path),
help="Provide full path, if not specified it will take 'downloadFiles' as default")
return parser.parse_args()
def dir_path(path):
if os.path.isdir(path):
return path
else:
raise argparse.ArgumentTypeError(f"readable_dir:{path} is not a valid path")
def main(args):
patterns = "*"
ignore_patterns = ""
ignore_directories = False
case_sensitive = True
my_event_handler = PatternMatchingEventHandler(patterns, ignore_patterns, ignore_directories, case_sensitive)
my_event_handler.on_created = on_created
my_event_handler.on_deleted = on_deleted
my_event_handler.on_modified = on_modified
my_event_handler.on_moved = on_moved
go_recursively = True
my_observer = Observer()
my_observer.schedule(my_event_handler, args.path, recursive=go_recursively)
my_observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
my_observer.stop()
my_observer.join()
if __name__ == "__main__":
default_path = "/Users/hemrajregmi/PycharmProjects/random_for_test"
arsed_args = parse_arguments()
main(arsed_args)
| StarcoderdataPython |
6480290 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
def reserve(s):
return s[::-1]
# 引数「s」に格納されている文字列の最後から1文字ずつさかのぼって要素(文字)を取り出す
orig = "good"
result = reserve(orig)
print(result)
# 要件1:文字列を反転する関数「reverse」を書く
# 出力2:doog
| StarcoderdataPython |
259242 | import requests
from bs4 import BeautifulSoup
from time import sleep, strftime, gmtime
from random import randint
#returns the unique semester identifier
def getSemester():
#start a new web scraping session
s = requests.session()
#download the main page of classes
html = s.get("https://ntst.umd.edu/soc")
#parse the html of the class page
options = BeautifulSoup(html.text,"html.parser")
options = options.find("select", {"id":"term-id-input"})
options = str(options).split("</option>")
#find the option with the semester code in it
for option in options:
if '"selected"' in option:
semester = option
#extract the semester code
semester = semester[semester.index('value="')+7:]
semester = semester[:semester.index('"')]
#close the session
s.close()
return semester
#returns a list of sections
def getSections(course):
#start a new web scraping session
s = requests.session()
#begin composing the url
url = "https://ntst.umd.edu/soc/search"
url += "?courseId=" + course
url += "§ionId="
url += "&termId="+getSemester()
url += "&_openSectionsOnly=on"
url += "&creditCompare="
url += "&credits="
url += "&courseLevelFilter=ALL"
url += "&instructor="
url += "&_facetoface=on"
url += "&_blended=on"
url += "&_online=on"
url += "&courseStartCompare="
url += "&courseStartHour="
url += "&courseStartMin="
url += "&courseStartAM="
url += "&courseEndHour="
url += "&courseEndMin="
url += "&courseEndAM="
url += "&teachingCenter=ALL"
url += "&_classDay1=on"
url += "&_classDay2=on"
url += "&_classDay3=on"
url += "&_classDay4=on"
url += "&_classDay5=on"
#download the list of classes
html = s.get(url).text
#parse the html with bs4
courses = BeautifulSoup(html,"html.parser").find_all("div", {"class":"section"})
#make an empty list to contain all sections
sections = []
#loop through every section in the course list
for course in courses:
#declare a blank list to hold section and time info
section = []
times = []
#get the times avaiable
slots = course.find("div", {"class":"class-days-container"})
slots = slots.find_all("div", {"class":"row"})
#loops thorugh and add all time to the list
for slot in slots:
time = slot.find("div", {"class":"section-day-time-group"})
time = " ".join(time.text.strip().split("\n"))
times.append(time)
#get the name of the course
name = str(course.find("div", {"class":"section-action-links-container"}))
name = name[name.index('value="')+7:]
name = name[:name.index('"')]
#append the name of the course to the list
section.append(name)
#get the amount of open seats
openSeatsCount = int(course.find("span", {"class":"open-seats-count"}).text)
#say whether class is open
if openSeatsCount > 0:
section.append("open")
else:
section.append("closed")
#get the section number, and the instructor
section.append(course.find("span", {"class":"section-id"}).text.strip())
section.append(course.find("span", {"class":"section-instructor"}).text)
#add the section information and the times
sections.append(section)
section.append(times)
#close the current session
s.close()
#return all sections
return sections
#returns if a section is open
def isOpen(section):
if section[1] != "open":
return False
else:
return True
#main function, continuously checks for openings
def testudo(course):
#get all sections for the course
sections = getSections(course)
#loop through and list all sections
for index, value in enumerate(sections):
if index < 9:
print("(0"+str(index+1)+") "+str(value))
else:
print("("+str(index+1)+") "+str(value))
#get the section wanted, and check if open
section = int(input("Type the list number for the section wanted: "))-1
output = isOpen(sections[section])
#if section not open, continuously check
while output == False:
output = isOpen(getSections(course)[section])
print("["+strftime("%Y-%m-%d %H:%M:%S", gmtime())+"] (section closed)")
sleep(randint(35, 45))
if output == True:
print("["+strftime("%Y-%m-%d %H:%M:%S", gmtime())+"] (section open)")
'''
Place custom notification code in this area!
'''
#define the command line arguments
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
sys.stderr.write('usage: python3 testudo.py <course>\n')
sys.exit(1)
else:
testudo(sys.argv[1].lower()) | StarcoderdataPython |
4862173 | import tarfile
from galaxy.model.unittest_utils.store_fixtures import (
deferred_hda_model_store_dict,
history_model_store_dict,
one_hda_model_store_dict,
)
from galaxy_test.api.test_histories import ImportExportTests
from galaxy_test.base.api_asserts import assert_has_keys
from galaxy_test.base.populators import (
DatasetCollectionPopulator,
DatasetPopulator,
)
from galaxy_test.driver.integration_util import (
IntegrationTestCase,
setup_celery_includes,
UsesCeleryTasks,
)
celery_includes = setup_celery_includes()
class ImportExportHistoryOutputsToWorkingDirIntegrationTestCase(ImportExportTests, IntegrationTestCase):
task_based = False
framework_tool_and_types = True
@classmethod
def handle_galaxy_config_kwds(cls, config):
config["outputs_to_working_directory"] = True
config["metadata_strategy"] = "extended"
def setUp(self):
super().setUp()
self._set_up_populators()
class ImportExportHistoryViaTasksIntegrationTestCase(ImportExportTests, IntegrationTestCase, UsesCeleryTasks):
task_based = True
framework_tool_and_types = True
@classmethod
def handle_galaxy_config_kwds(cls, config):
cls.setup_celery_config(config)
def setUp(self):
super().setUp()
self._set_up_populators()
def test_import_from_model_store_async(self):
async_history_name = "Model store imported history"
store_dict = history_model_store_dict()
store_dict["history"]["name"] = async_history_name
response = self.dataset_populator.create_from_store_async(store_dict=store_dict)
assert_has_keys(response, "id")
self.dataset_populator.wait_for_history_with_name(
async_history_name,
"task based import history",
)
class ImportExportHistoryContentsViaTasksIntegrationTestCase(IntegrationTestCase, UsesCeleryTasks):
task_based = True
framework_tool_and_types = True
@classmethod
def handle_galaxy_config_kwds(cls, config):
cls.setup_celery_config(config)
def setUp(self):
super().setUp()
self._set_up_populators()
self.history_id = self.dataset_populator.new_history()
def _set_up_populators(self):
self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
self.dataset_collection_populator = DatasetCollectionPopulator(self.galaxy_interactor)
def test_export_and_imported_discarded(self):
hda1 = self.dataset_populator.new_dataset(self.history_id, wait=True)
second_history_id, as_list = self.dataset_populator.reupload_contents(hda1)
assert len(as_list) == 1
new_hda = as_list[0]
assert new_hda["model_class"] == "HistoryDatasetAssociation"
assert new_hda["state"] == "discarded"
assert not new_hda["deleted"]
def test_export_and_imported_discarded_bam(self):
contents = self.dataset_populator.new_dataset(
self.history_id,
content=open(self.test_data_resolver.get_filename("1.bam"), "rb"),
file_type="bam",
wait=True,
)
second_history_id, as_list = self.dataset_populator.reupload_contents(contents)
assert len(as_list) == 1
new_hda = as_list[0]
assert new_hda["model_class"] == "HistoryDatasetAssociation"
assert new_hda["state"] == "discarded"
assert not new_hda["deleted"]
def test_import_as_discarded_from_dict(self):
as_list = self.dataset_populator.create_contents_from_store(
self.history_id,
store_dict=one_hda_model_store_dict(
include_source=False,
),
)
assert len(as_list) == 1
new_hda = as_list[0]
assert new_hda["model_class"] == "HistoryDatasetAssociation"
assert new_hda["state"] == "discarded"
assert not new_hda["deleted"]
contents_response = self._get(f"histories/{self.history_id}/contents?v=dev")
contents_response.raise_for_status()
def test_import_as_deferred_from_discarded_with_source_dict(self):
as_list = self.dataset_populator.create_contents_from_store(
self.history_id,
store_dict=one_hda_model_store_dict(
include_source=True,
),
)
assert len(as_list) == 1
new_hda = as_list[0]
assert new_hda["model_class"] == "HistoryDatasetAssociation"
assert new_hda["state"] == "deferred"
assert not new_hda["deleted"]
contents_response = self._get(f"histories/{self.history_id}/contents?v=dev")
contents_response.raise_for_status()
def test_export_and_imported_discarded_collection(self):
create_response = self.dataset_collection_populator.create_list_in_history(
history_id=self.history_id,
direct_upload=True,
wait=True,
).json()
self.dataset_populator.wait_for_history(self.history_id)
contents = create_response["outputs"][0]
temp_tar = self.dataset_populator.download_contents_to_store(self.history_id, contents, "tgz")
with tarfile.open(name=temp_tar) as tf:
assert "datasets_attrs.txt" in tf.getnames()
assert "collections_attrs.txt" in tf.getnames()
second_history_id = self.dataset_populator.new_history()
as_list = self.dataset_populator.create_contents_from_store(
second_history_id,
store_path=temp_tar,
)
as_list = self.dataset_populator.get_history_contents(second_history_id)
assert len(as_list) == 4
hdcas = [e for e in as_list if e["history_content_type"] == "dataset_collection"]
assert len(hdcas) == 1
def test_import_as_deferred_from_dict(self):
as_list = self.dataset_populator.create_contents_from_store(
self.history_id,
store_dict=deferred_hda_model_store_dict(),
)
assert len(as_list) == 1
new_hda = as_list[0]
assert new_hda["model_class"] == "HistoryDatasetAssociation"
assert new_hda["state"] == "deferred"
assert not new_hda["deleted"]
contents_response = self._get(f"histories/{self.history_id}/contents?v=dev")
contents_response.raise_for_status()
| StarcoderdataPython |
3558820 | <filename>dags/ethereum_kovan_export_dag.py
from __future__ import print_function
from ethereumetl_airflow.build_export_dag import build_export_dag
from ethereumetl_airflow.variables import read_export_dag_vars
# airflow DAG
DAG = build_export_dag(
dag_id='ethereum_kovan_export_dag',
**read_export_dag_vars(
var_prefix='ethereum_kovan_',
export_schedule_interval='20 12 * * *',
export_start_date='2017-03-02',
export_max_workers=10,
export_batch_size=10,
export_retries=5,
export_daofork_traces_option=False,
export_genesis_traces_option=False,
)
)
| StarcoderdataPython |
4800296 | <filename>validation.py
# validation.py
# A tool to quantify the similarity between extracted pen annotated regions and manually annotated regions from WSI thumbnails.
# Created by <NAME>, MSKCC
# <EMAIL>
#
# <NAME>, Yarlagadda DVK, <NAME>, Fuchs TJ: Overcoming an Annotation Hurdle: Digitizing Pen Annotations from Whole Slide Images. Journal of Pathology Informatics, 2021
import os
import imageio
import glob
import shutil
import numpy as np
import sklearn.metrics
from tqdm import tqdm
import math
import pandas as pd
dat = [['Run', 'ImageID', 'Dice', 'Jaccard', 'Precision', 'Recall', 'Kappa']]
dims = []
for i in range(4) :
mask_pattern = "data/extractions/" + str(i) + "/*step15.jpg"
for extraction_file in tqdm(glob.glob(mask_pattern)):
# take the extraction mask
imageID = os.path.basename(os.path.basename(extraction_file)).split('_')[1]
# match with manual annotation
annotation_file = "data/annotations/resized/labels_rs_" + imageID + ".png"
# if both exist ...
if (os.path.exists(annotation_file)) :
# get the mask
extraction_mask = imageio.imread(extraction_file)
# get the annotation mask
annotation_mask = 255-imageio.imread(annotation_file)[:,:,2]
# verify dimensions
if (annotation_mask.shape == extraction_mask.shape) :
# save dimensions
dims.append(extraction_mask.shape)
mask1 = np.asarray(annotation_mask).astype(np.bool).flatten()
mask2 = np.asarray(extraction_mask).astype(np.bool).flatten()
# calculate Dice coefficiant = F-Score
dice = sklearn.metrics.f1_score(mask1, mask2)
# calculate Jaccard index = IoU
jacc = sklearn.metrics.jaccard_score(mask1, mask2)
# calculate Precision
prec = sklearn.metrics.precision_score(mask1, mask2)
# calculate Recall
rec = sklearn.metrics.recall_score(mask1, mask2)
# calcuate Kappa
kappa = sklearn.metrics.cohen_kappa_score(mask1, mask2)
dat.append([str(i), imageID, str(dice), str(jacc), str(prec), str(rec), str(kappa)])
else:
print("\nUnequal size: " + imageID)
else:
print("\nNo annotation file: " + imageID)
# show statistics of thumbnail dimensions
print(pd.DataFrame(dims).describe())
# show statistics of metrics
print(pd.DataFrame(dat[1:])[:][range(2,7)].astype(str).astype(float).describe())
# save the table
np.savetxt("metrics.csv", dat, delimiter=",", fmt='%s')
| StarcoderdataPython |
8060579 | <reponame>aserhiychuk/pyreinforce<filename>pyreinforce/distributed/distributed.py<gh_stars>10-100
import random
import logging
import time
from uuid import uuid4
from threading import Thread
import multiprocessing as mp
from multiprocessing import connection, Process, Pipe, Barrier, Value
from multiprocessing.managers import SharedMemoryManager
import numpy as np
from pyreinforce.core import Agent, SimpleAgent, Callback
class DistributedAgent(Agent):
def __init__(self, n_episodes, env, brain, experience_buffer, train_freq,
validation_freq=None, validation_episodes=None,
converter=None, callback=None, n_workers=None):
self._logger = logging.getLogger(f'{__name__}.{type(self).__name__}')
self._n_episodes = n_episodes
self._env = env
self._brain = brain
self._experience_buffer = experience_buffer
self._train_freq = train_freq
self._converter = converter
self._callback = callback
if n_workers is None:
self._n_workers = mp.cpu_count()
else:
self._n_workers = n_workers
self._validation_freq = validation_freq
if validation_episodes:
# Distribute validation load between workers as equally as possible.
# Each worker will grab value that corresponds to its number.
if validation_episodes < n_workers:
# n_workers = 4, validation_episodes = 3
# [1, 1, 1, 0]
self._validation_episodes = [1] * validation_episodes + [0] * (n_workers - validation_episodes)
elif n_workers < validation_episodes:
# n_workers = 4, validation_episodes = 5
# [2, 1, 1, 1]
#
# n_workers = 4, validation_episodes = 15
# [4, 4, 4, 3]
self._validation_episodes = [validation_episodes // n_workers + 1] * (validation_episodes % n_workers)
self._validation_episodes += [validation_episodes // n_workers] * (n_workers - validation_episodes % n_workers)
else:
# n_workers = 4, validation_episodes = 4
# [1, 1, 1, 1]
self._validation_episodes = [1] * n_workers
assert n_workers == len(self._validation_episodes)
assert validation_episodes == sum(self._validation_episodes)
else:
self._validation_episodes = None
self._rewards = None
self._info = None
self._grads_queue_sizes = []
def run(self):
self._rewards = {}
self._info = {
'workers': {}
}
barrier = Barrier(self._n_workers)
brain = self._brain()
weights = brain.get_weights()
weights_metadata = [(w.shape, w.dtype, w.nbytes) for w in weights]
weights_size = sum([w.nbytes for w in weights])
with SharedMemoryManager() as smm:
shared_memory = smm.SharedMemory(size=self._n_workers * weights_size)
self._logger.info(f'allocated shared memory of size {shared_memory.size}. total workers weights size {self._n_workers * weights_size}')
shared_weights = SharedWeights(weights_metadata, shared_memory)
worker_processes = []
conns_to_workers = []
for worker_no in range(self._n_workers):
conn_to_parent, conn_to_worker = Pipe()
shared_weights.write(worker_no, weights)
worker_process = Process(name=f'worker-{worker_no}', target=self._worker_process, args=(worker_no, conn_to_parent, shared_weights, barrier))
worker_process.start()
# worker will be the only one who has reference to parent
conn_to_parent.close()
worker_processes.append(worker_process)
# TODO _workers_listener mutates this list
conns_to_workers.append(conn_to_worker)
self._logger.info('workers have been spun up')
# persisting the brain after spinning up sub processes
self._brain = brain
workers_listener = Thread(name='workers-listener', daemon=True,
target=self._workers_listener, args=(conns_to_workers, shared_weights))
workers_listener.start()
self._logger.info('started workers listener thread')
# TODO consider using futures to join sub processes
for worker_no, worker_process in enumerate(worker_processes):
worker_process.join()
self._logger.info('workers finished')
workers_listener.join()
self._logger.info('workers listener thread stopped')
self._info.update({
'grads_queue_sizes': self._grads_queue_sizes
})
if self._validation_freq:
rewards = [np.array(worker_rewards) for _, worker_rewards in self._rewards.items()]
rewards = np.concatenate(rewards, axis=1)
rewards = rewards.tolist()
self._rewards = rewards
return self._rewards, self._info
def _worker_process(self, worker_no, conn_to_parent, shared_weights, barrier):
self._logger.info(f'[worker_no {worker_no}] starting...')
_env = self._env(worker_no) if callable(self._env) else self._env
_brain = self._brain(worker_no) if callable(self._brain) else self._brain
worker = self._create_worker(worker_no, conn_to_parent, shared_weights, barrier, _env, _brain)
rewards, info = worker.run()
conn_to_parent.send({
'type' : 'result',
'ref_id' : uuid4(),
'worker_no': worker_no,
'payload' : (rewards, info)
})
conn_to_parent.close()
self._logger.info(f'[worker_no {worker_no}] done')
def _create_worker(self, worker_no, conn_to_parent, shared_weights, barrier, env, brain, *args, **kwargs):
raise NotImplementedError
def _workers_listener(self, conns, shared_weights):
while conns:
# TODO check performance of connectin.wait()
ready_conns = connection.wait(conns)
self._grads_queue_sizes.append(len(ready_conns))
# shuffle to increase randomness
random.shuffle(ready_conns)
for conn in ready_conns:
try:
msg = conn.recv()
except EOFError:
conns.remove(conn)
conn.close()
else:
if msg['type'] == 'gradients':
ref_id = msg['ref_id']
worker_no = msg['worker_no']
# read and apply the gradients from shared memory
grads = shared_weights.read(worker_no)
self._brain.apply_gradients(grads)
# update worker's shared memory with new weights
weights = self._brain.get_weights()
shared_weights.write(worker_no, weights)
# let worker know new weights are available
conn.send({
'type' : 'weights',
'ref_id': ref_id
})
elif msg['type'] == 'result':
ref_id = msg['ref_id']
worker_no = msg['worker_no']
rewards, info = msg['payload']
self._rewards[worker_no] = rewards
self._info['workers'][worker_no] = info
self._logger.info(f'received result from worker_no {worker_no}')
else:
raise RuntimeError(f'Unsupported message type: {msg["type"]}')
self._logger.info('exiting workers listener thread')
class WorkerAgent(SimpleAgent):
def __init__(self, worker_no, conn_to_parent, shared_weights, barrier,
n_episodes, env, brain, experience_buffer, train_freq,
validation_freq=None, validation_episodes=None,
converter=None, callback=None):
if isinstance(callback, Callback):
callback = WorkerCallback(worker_no, callback)
super().__init__(n_episodes, env, validation_freq, validation_episodes,
converter, callback)
self._worker_no = worker_no
self._conn_to_parent = conn_to_parent
self._shared_weights = shared_weights
self._barrier = barrier
self._brain = brain
weights = self._shared_weights.read(self._worker_no)
self._brain.set_weights(weights)
self._experience_buffer = experience_buffer
self._train_freq = train_freq
self._latencies = []
def run(self):
rewards, stats = super().run()
info = {
'stats' : stats,
'latencies': self._latencies
}
return rewards, info
def _observe(self, experience):
self._experience_buffer.add(experience)
if (len(self._experience_buffer) == self._train_freq) or (experience[-1] is True):
batch = self._experience_buffer.get_batch_and_reset()
if batch is not None:
self._train(batch)
def _train(self, batch):
grads = self._compute_grads(batch)
# persist gradients in shared memory
self._shared_weights.write(self._worker_no, grads)
# let the learner know gradients are available
req_ref_id = uuid4()
t1 = time.perf_counter()
self._conn_to_parent.send({
'type' : 'gradients',
'ref_id' : req_ref_id,
'worker_no': self._worker_no
})
# wait until gradients are applied and updated weights become available
# TODO use timeout and raise error if no response received
res = self._conn_to_parent.recv()
t2 = time.perf_counter()
self._latencies.append((t1, t2))
assert res['type'] == 'weights'
assert req_ref_id == res['ref_id']
# TODO assert response status (i.e. OK? not OK?)
# update worker's weights
weights = self._shared_weights.read(self._worker_no)
self._brain.set_weights(weights, 'training')
def _compute_grads(self, batch):
raise NotImplementedError
def _after_episode(self, episode_no, reward):
batch = self._experience_buffer.get_batch_and_reset(True)
if batch is not None:
self._train(batch)
super()._after_episode(episode_no, reward)
def _before_validation(self):
super()._before_validation()
self._barrier.wait()
latest_weights = self._shared_weights.read_latest()
self._brain.set_weights(latest_weights)
class ExperienceBuffer:
def __init__(self):
self._buffer = []
def add(self, experience):
self._buffer.append(experience)
def get_batch_and_reset(self, is_terminal=False):
batch_size = len(self)
if batch_size == 0:
return None
s = [s for s, _, _, _, _ in self._buffer]
s = np.array(s)
s = np.reshape(s, (batch_size, -1))
a = [a for _, a, _, _, _ in self._buffer]
a = np.array(a)
a = np.reshape(a, (batch_size, 1))
r = [r for _, _, r, _, _ in self._buffer]
r = np.array(r)
r = np.reshape(r, (batch_size, 1))
s1 = [s1 for _, _, _, s1, _ in self._buffer]
s1 = np.array(s1)
s1 = np.reshape(s1, (batch_size, -1))
s1_mask = [1 - done for _, _, _, _, done in self._buffer]
s1_mask = np.array(s1_mask)
s1_mask = np.reshape(s1_mask, (batch_size, 1))
self._buffer = []
return s, a, r, s1, s1_mask
def __len__(self):
return len(self._buffer)
class RecurrentExperienceBuffer(ExperienceBuffer):
def __init__(self, n_time_steps):
super().__init__()
self._n_time_steps = n_time_steps
def get_batch_and_reset(self, is_terminal=False):
batch_size = len(self)
if batch_size == 0:
if is_terminal:
self._buffer = []
return None
batch = [self._buffer[i:i + self._n_time_steps] for i in range(batch_size)]
s = [[s for s, _, _, _, _ in experiences] for experiences in batch]
s = np.array(s)
a = [a for _, a, _, _, _ in self._buffer[-batch_size:]]
a = np.array(a)
a = np.reshape(a, (batch_size, 1))
r = [r for _, _, r, _, _ in self._buffer[-batch_size:]]
r = np.array(r)
r = np.reshape(r, (batch_size, 1))
s1 = [[s1 for _, _, _, s1, _ in experiences] for experiences in batch]
s1 = np.array(s1)
s1_mask = [1 - done for _, _, _, _, done in self._buffer[-batch_size:]]
s1_mask = np.array(s1_mask)
s1_mask = np.reshape(s1_mask, (batch_size, 1))
self._buffer = self._buffer[batch_size:] if not is_terminal else []
return s, a, r, s1, s1_mask
def __len__(self):
return max(0, len(self._buffer) - self._n_time_steps + 1)
class WorkerCallback(Callback):
def __init__(self, worker_no, callback):
self._worker_no = worker_no
self._callback = callback
def on_before_run(self, **kwargs):
self._callback.on_before_run(worker_no=self._worker_no, **kwargs)
def on_after_run(self, **kwargs):
self._callback.on_after_run(worker_no=self._worker_no, **kwargs)
def on_state_change(self, s, **kwargs):
self._callback.on_state_change(s, worker_no=self._worker_no, **kwargs)
def on_before_episode(self, episode_no, **kwargs):
self._callback.on_before_episode(episode_no, worker_no=self._worker_no, **kwargs)
def on_after_episode(self, episode_no, reward, **kwargs):
self._callback.on_after_episode(episode_no, reward, worker_no=self._worker_no, **kwargs)
def on_before_validation(self, **kwargs):
self._callback.on_before_validation(worker_no=self._worker_no, **kwargs)
def on_after_validation(self, rewards, **kwargs):
self._callback.on_after_validation(rewards, worker_no=self._worker_no, **kwargs)
class SharedWeights:
def __init__(self, metadata, shared_memory):
self._metadata = metadata
self._weights_size = sum([nbytes for _, _, nbytes in metadata])
assert shared_memory.size >= self._weights_size, f'Shared memory size too small. Shared memory: {shared_memory.size}, weights: {self._weights_size}'
self._shared_memory = shared_memory
self._latest_worker_no = Value('i', -1)
def write(self, worker_no, data):
offset = worker_no * self._weights_size
for weights in data:
shared_weights = np.ndarray(weights.shape, weights.dtype, self._shared_memory.buf, offset)
shared_weights[:] = weights[:]
offset += weights.nbytes
self._latest_worker_no.value = worker_no
def read(self, worker_no):
weights = []
offset = worker_no * self._weights_size
for shape, dtype, nbytes in self._metadata:
shared_weights = np.ndarray(shape, dtype, self._shared_memory.buf, offset)
weights.append(shared_weights)
offset += nbytes
return weights
def read_latest(self):
latest_worker_no = self._latest_worker_no.value
if latest_worker_no == -1:
raise ValueError('Weights are not initialized')
return self.read(latest_worker_no)
| StarcoderdataPython |
6447858 | <reponame>lucasaciole/projetoProntuario<gh_stars>0
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^pacientes/$', views.paciente_index, name='paciente_index'),
url(r'^paciente/(?P<id>\d+)/$', views.paciente_detalhes, name='paciente_detalhes'),
url(r'^medicos/$', views.medico_index, name='medico_index'),
url(r'^cuidadores/$', views.cuidador_index, name='cuidador_index'),
url(r'^cuidador/(?P<id>\d+)/$', views.cuidador_detalhes, name='cuidador_detalhes'),
url(r'^cuidador/contratos/$', views.cuidador_contratos, name='cuidador_contratos'),
url(r'^cuidador/contrato/(?P<id>\d+)/$', views.cuidador_contrato_detalhes, name='contrato_detalhes'),
url(r'^cuidador/contrato/novo/$', views.cuidador_contrato_novo, name='contrato_novo'),
url(r'^cuidador/novo/$', views.cuidador_novo, name='cuidador_novo'),
url(r'^cuidador/atendimentos/$', views.cuidador_atendimentos, name='cuidador_atendimentos'),
url(r'^cuidador/planos/$', views.cuidador_planos, name='cuidador_planos'),
url(r'^cuidador/plano/(?P<id>\d+)/$', views.cuidador_planos_detalhes, name='plano_detalhes'),
url(r'^cuidador/plano/novo/$', views.cuidador_planos_novo, name='plano_novo'),
url(r'^cuidador/plano/novo/atividade/(?P<id>\d+)/$', views.cuidador_atividades_novo, name='atividade_novo'),
url(r'^cuidador/atendimento/(?P<id>\d+)/$', views.cuidador_atendimentos_detalhes,
name='cuidador_atendimentos_detalhes'),
url(r'^cuidador/atendimento/(?P<id>\d+)/nova_intercorrencia$', views.atendimento_nova_intercorrencia,
name='nova_intercorrencia'),
url(r'^cuidador/atendimento/(?P<id>\d+)/nova_atividade$', views.atendimento_nova_atividade,
name='nova_atividade'),
url(r'^cuidador/atendimento/(?P<id_atendimento>\d+)/atividade/(?P<id_atividade>\d+)/nova_medida/$', views.atendimento_nova_medida,
name='nova_medida'),
url(r'^cuidador/atendimento/novo$', views.novo_atendimento, name='novo_atendimento'),
url(r'^responsabilidades/$', views.responsavel_responsabilidades, name='responsabilidades'),
url(r'^admin/$', views.admin_index, name='admin')
]
| StarcoderdataPython |
11335927 | <gh_stars>100-1000
def rewrite(text):
print("\r" + text, end="")
def next_line(text=""):
print(text)
| StarcoderdataPython |
9724116 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model configurations for CNN benchmarks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from cnn_quantization.tf_cnn_benchmarks.models import alexnet_model
from cnn_quantization.tf_cnn_benchmarks.models import densenet_model
from cnn_quantization.tf_cnn_benchmarks.models import googlenet_model
from cnn_quantization.tf_cnn_benchmarks.models import inception_model
from cnn_quantization.tf_cnn_benchmarks.models import lenet_model
from cnn_quantization.tf_cnn_benchmarks.models import overfeat_model
from cnn_quantization.tf_cnn_benchmarks.models import resnet_model
from cnn_quantization.tf_cnn_benchmarks.models import ssd_model
from cnn_quantization.tf_cnn_benchmarks.models import trivial_model
from cnn_quantization.tf_cnn_benchmarks.models import vgg_model
_model_name_to_imagenet_model = {
'vgg11': vgg_model.Vgg11Model,
'vgg16': vgg_model.Vgg16Model,
'vgg19': vgg_model.Vgg19Model,
'lenet': lenet_model.Lenet5Model,
'googlenet': googlenet_model.GooglenetModel,
'overfeat': overfeat_model.OverfeatModel,
'alexnet': alexnet_model.AlexnetModel,
'trivial': trivial_model.TrivialModel,
'inception3': inception_model.Inceptionv3Model,
'inception4': inception_model.Inceptionv4Model,
'resnet50': resnet_model.create_resnet50_model,
'resnet50_v1.5': resnet_model.create_resnet50_v1_5_model,
'resnet50_v2': resnet_model.create_resnet50_v2_model,
'resnet101': resnet_model.create_resnet101_model,
'resnet101_v2': resnet_model.create_resnet101_v2_model,
'resnet152': resnet_model.create_resnet152_model,
'resnet152_v2': resnet_model.create_resnet152_v2_model,
}
_model_name_to_cifar_model = {
'alexnet': alexnet_model.AlexnetCifar10Model,
'resnet20': resnet_model.create_resnet20_cifar_model,
'resnet20_v2': resnet_model.create_resnet20_v2_cifar_model,
'resnet32': resnet_model.create_resnet32_cifar_model,
'resnet32_v2': resnet_model.create_resnet32_v2_cifar_model,
'resnet44': resnet_model.create_resnet44_cifar_model,
'resnet44_v2': resnet_model.create_resnet44_v2_cifar_model,
'resnet56': resnet_model.create_resnet56_cifar_model,
'resnet56_v2': resnet_model.create_resnet56_v2_cifar_model,
'resnet110': resnet_model.create_resnet110_cifar_model,
'resnet110_v2': resnet_model.create_resnet110_v2_cifar_model,
'trivial': trivial_model.TrivialCifar10Model,
'densenet40_k12': densenet_model.create_densenet40_k12_model,
'densenet100_k12': densenet_model.create_densenet100_k12_model,
'densenet100_k24': densenet_model.create_densenet100_k24_model,
}
_model_name_to_object_detection_model = {
'ssd300': ssd_model.SSD300Model,
'trivial': trivial_model.TrivialSSD300Model,
}
def _get_model_map(dataset_name):
"""Get name to model map for specified dataset."""
if dataset_name == 'cifar10':
return _model_name_to_cifar_model
elif dataset_name in ('imagenet', 'synthetic'):
return _model_name_to_imagenet_model
elif dataset_name == 'coco':
return _model_name_to_object_detection_model
else:
raise ValueError('Invalid dataset name: %s' % dataset_name)
def get_model_config(model_name, dataset, params):
"""Map model name to model network configuration."""
model_map = _get_model_map(dataset.name)
if model_name not in model_map:
raise ValueError('Invalid model name \'%s\' for dataset \'%s\'' %
(model_name, dataset.name))
else:
return model_map[model_name](params=params)
def register_model(model_name, dataset_name, model_func):
"""Register a new model that can be obtained with `get_model_config`."""
model_map = _get_model_map(dataset_name)
if model_name in model_map:
raise ValueError('Model "%s" is already registered for dataset "%s"' %
(model_name, dataset_name))
model_map[model_name] = model_func
| StarcoderdataPython |
145715 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright 2018-2022 the orix developers
#
# This file is part of orix.
#
# orix is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# orix is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with orix. If not, see <http://www.gnu.org/licenses/>.
"""Rotations respecting symmetry.
An orientation is simply a rotation with respect to some reference
frame. In this respect, an orientation is in fact a *misorientation* -
a change of orientation - with respect to a reference of the identity
rotation.
In orix, orientations and misorientations are distinguished from
rotations only by the inclusion of a notion of symmetry. Consider the
following example:
.. image:: /_static/img/orientation.png
:width: 200px
:alt: Two objects with two different rotations each. The square, with
fourfold symmetry, has the same orientation in both cases.
:align: center
Both objects have undergone the same *rotations* with respect to the
reference. However, because the square has fourfold symmetry, it is
indistinguishable in both cases, and hence has the same orientation.
"""
from itertools import combinations_with_replacement as icombinations
from itertools import product as iproduct
import warnings
import dask.array as da
from dask.diagnostics import ProgressBar
import numpy as np
from tqdm import tqdm
from orix._util import deprecated
from orix.quaternion.orientation_region import OrientationRegion
from orix.quaternion.rotation import Rotation
from orix.quaternion.symmetry import C1, Symmetry, _get_unique_symmetry_elements
from orix.vector import AxAngle
def _distance(misorientation, verbose, split_size=100):
"""Private function to find the symmetry reduced distance between
all pairs of (mis)orientations
Parameters
----------
misorientation : orix.quaternion.Misorientation
The misorientation to be considered.
verbose : bool
Output progress bar while computing.
split_size : int
Size of block to compute at a time.
Returns
-------
distance : numpy.ndarray
2D matrix containing the angular distance between every
orientation, considering symmetries.
"""
num_orientations = misorientation.shape[0]
S_1, S_2 = misorientation._symmetry
distance = np.full(misorientation.shape + misorientation.shape, np.infty)
split_size = split_size // S_1.shape[0]
outer_range = range(0, num_orientations, split_size)
if verbose:
outer_range = tqdm(outer_range, total=np.ceil(num_orientations / split_size))
S_1_outer_S_1 = S_1.outer(S_1)
# Calculate the upper half of the distance matrix block by block
for start_index_b in outer_range:
# we use slice object for compactness
index_slice_b = slice(
start_index_b, min(num_orientations, start_index_b + split_size)
)
o_sub_b = misorientation[index_slice_b]
for start_index_a in range(0, start_index_b + split_size, split_size):
index_slice_a = slice(
start_index_a, min(num_orientations, start_index_a + split_size)
)
o_sub_a = misorientation[index_slice_a]
axis = (len(o_sub_a.shape), len(o_sub_a.shape) + 1)
mis2orientation = (~o_sub_a).outer(S_1_outer_S_1).outer(o_sub_b)
# This works through all the identity rotations
for s_2_1, s_2_2 in icombinations(S_2, 2):
m = s_2_1 * mis2orientation * s_2_2
angle = m.angle.min(axis=axis)
distance[index_slice_a, index_slice_b] = np.minimum(
distance[index_slice_a, index_slice_b], angle
)
# Symmetrize the matrix for convenience
i_lower = np.tril_indices(distance.shape[0], -1)
distance[i_lower] = distance.T[i_lower]
return distance
class Misorientation(Rotation):
r"""Misorientation object.
Misorientations represent transformations from one orientation,
:math:`o_1` to another, :math:`o_2`: :math:`o_2 \cdot o_1^{-1}`.
They have symmetries associated with each of the starting
orientations.
"""
_symmetry = (C1, C1)
def __init__(self, data, symmetry=None):
super().__init__(data)
if symmetry:
self.symmetry = symmetry
@property
def symmetry(self):
"""Tuple of :class:`~orix.quaternion.Symmetry`."""
return self._symmetry
@symmetry.setter
def symmetry(self, value):
if not isinstance(value, (list, tuple)):
raise TypeError("Value must be a 2-tuple of Symmetry objects.")
if len(value) != 2 or not all(isinstance(s, Symmetry) for s in value):
raise ValueError("Value must be a 2-tuple of Symmetry objects.")
self._symmetry = tuple(value)
def __getitem__(self, key):
m = super().__getitem__(key)
m._symmetry = self._symmetry
return m
def __eq__(self, other):
v1 = super().__eq__(other)
if not v1:
return v1
else:
# check symmetries are also equivalent
v2 = []
for sym_s, sym_o in zip(self._symmetry, other._symmetry):
v2.append(sym_s == sym_o)
return all(v2)
def reshape(self, *shape):
m = super().reshape(*shape)
m._symmetry = self._symmetry
return m
def flatten(self):
m = super().flatten()
m._symmetry = self._symmetry
return m
def squeeze(self):
m = super().squeeze()
m._symmetry = self._symmetry
return m
def transpose(self, *axes):
m = super().transpose(*axes)
m._symmetry = self._symmetry
return m
def equivalent(self, grain_exchange=False):
r"""Equivalent misorientations.
grain_exchange : bool
If True the rotation $g$ and $g^{-1}$ are considered to be
identical. Default is False.
Returns
-------
Misorientation
"""
Gl, Gr = self._symmetry
if grain_exchange and (Gl._tuples == Gr._tuples):
orientations = Orientation.stack([self, ~self]).flatten()
else:
orientations = Orientation(self)
equivalent = Gr.outer(orientations.outer(Gl))
return self.__class__(equivalent).flatten()
def map_into_symmetry_reduced_zone(self, verbose=False):
"""Computes equivalent transformations which have the smallest
angle of rotation and return these as a new Misorientation object.
Returns
-------
Misorientation
A new misorientation object with the assigned symmetry.
Examples
--------
>>> from orix.quaternion.symmetry import C4, C2
>>> data = np.array([[0.5, 0.5, 0.5, 0.5], [0, 1, 0, 0]])
>>> m = Misorientation(data)
>>> m.symmetry = (C4, C2)
>>> m.map_into_symmetry_reduced_zone()
Misorientation (2,) 4, 2
[[-0.7071 0.7071 0. 0. ]
[ 0. 1. 0. 0. ]]
"""
Gl, Gr = self._symmetry
symmetry_pairs = iproduct(Gl, Gr)
if verbose:
symmetry_pairs = tqdm(symmetry_pairs, total=Gl.size * Gr.size)
orientation_region = OrientationRegion.from_symmetry(Gl, Gr)
o_inside = self.__class__.identity(self.shape)
outside = np.ones(self.shape, dtype=bool)
for gl, gr in symmetry_pairs:
o_transformed = gl * self[outside] * gr
o_inside[outside] = o_transformed
outside = ~(o_inside < orientation_region)
if not np.any(outside):
break
o_inside._symmetry = (Gl, Gr)
return o_inside
@deprecated(
since="0.9",
alternative="orix.quaternion.Misorientation.get_distance_matrix",
removal="0.10",
)
def distance(self, verbose=False, split_size=100):
"""Symmetry reduced distance.
Compute the shortest distance between all orientations
considering symmetries.
Parameters
---------
verbose : bool
Output progress bar while computing. Default is False.
split_size : int
Size of block to compute at a time. Default is 100.
Returns
-------
distance : numpy.ndarray
2D matrix containing the angular distance between every
orientation, considering symmetries.
Examples
--------
>>> import numpy as np
>>> from orix.quaternion import Misorientation, symmetry
>>> data = np.array([[0.5, 0.5, 0.5, 0.5], [0, 1, 0, 0]])
>>> m = Misorientation(data)
>>> m.symmetry = (symmetry.C4, symmetry.C2)
>>> m = m.map_into_symmetry_reduced_zone()
>>> m.distance()
array([[3.14159265, 1.57079633],
[1.57079633, 0. ]])
"""
distance = _distance(self, verbose, split_size)
return distance.reshape(self.shape + self.shape)
def __repr__(self):
"""String representation."""
cls = self.__class__.__name__
shape = str(self.shape)
s1, s2 = self._symmetry[0].name, self._symmetry[1].name
s2 = "" if s2 == "1" else s2
symm = s1 + (s2 and ", ") + s2
data = np.array_str(self.data, precision=4, suppress_small=True)
rep = "{} {} {}\n{}".format(cls, shape, symm, data)
return rep
def scatter(
self,
projection="axangle",
figure=None,
position=None,
return_figure=False,
wireframe_kwargs=None,
size=None,
figure_kwargs=None,
**kwargs,
):
"""Plot misorientations in axis-angle space or the Rodrigues
fundamental zone.
Parameters
----------
projection : str, optional
Which misorientation space to plot misorientations in,
either "axangle" (default) or "rodrigues".
figure : matplotlib.figure.Figure
If given, a new plot axis :class:`~orix.plot.AxAnglePlot` or
:class:`~orix.plot.RodriguesPlot` is added to the figure in
the position specified by `position`. If not given, a new
figure is created.
position : int, tuple of int, matplotlib.gridspec.SubplotSpec,
optional
Where to add the new plot axis. 121 or (1, 2, 1) places it
in the first of two positions in a grid of 1 row and 2
columns. See :meth:`~matplotlib.figure.Figure.add_subplot`
for further details. Default is (1, 1, 1).
return_figure : bool, optional
Whether to return the figure. Default is False.
wireframe_kwargs : dict, optional
Keyword arguments passed to
:meth:`orix.plot.AxAnglePlot.plot_wireframe` or
:meth:`orix.plot.RodriguesPlot.plot_wireframe`.
size : int, optional
If not given, all misorientations are plotted. If given, a
random sample of this `size` of the misorientations is
plotted.
figure_kwargs : dict, optional
Dictionary of keyword arguments passed to
:func:`matplotlib.pyplot.figure` if `figure` is not given.
kwargs
Keyword arguments passed to
:meth:`orix.plot.AxAnglePlot.scatter` or
:meth:`orix.plot.RodriguesPlot.scatter`.
Returns
-------
figure : matplotlib.figure.Figure
Figure with the added plot axis, if `return_figure` is True.
See Also
--------
orix.plot.AxAnglePlot, orix.plot.RodriguesPlot
"""
from orix.plot.rotation_plot import _setup_rotation_plot
figure, ax = _setup_rotation_plot(
figure=figure,
projection=projection,
position=position,
figure_kwargs=figure_kwargs,
)
# Plot wireframe
if wireframe_kwargs is None:
wireframe_kwargs = {}
if isinstance(self.symmetry, tuple):
fundamental_zone = OrientationRegion.from_symmetry(
s1=self.symmetry[0], s2=self.symmetry[1]
)
ax.plot_wireframe(fundamental_zone, **wireframe_kwargs)
else:
# Orientation via inheritance
fundamental_zone = OrientationRegion.from_symmetry(self.symmetry)
ax.plot_wireframe(fundamental_zone, **wireframe_kwargs)
# Correct the aspect ratio of the axes according to the extent
# of the boundaries of the fundamental region, and also restrict
# the data limits to these boundaries
ax._correct_aspect_ratio(fundamental_zone, set_limits=True)
ax.axis("off")
figure.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0, wspace=0)
if size is not None:
to_plot = self.get_random_sample(size)
else:
to_plot = self
ax.scatter(to_plot, fundamental_zone=fundamental_zone, **kwargs)
if return_figure:
return figure
def get_distance_matrix(self, chunk_size=20, progressbar=True):
r"""The symmetry reduced smallest angle of rotation transforming
every misorientation in this instance to every other
misorientation :cite:`johnstone2020density`.
This is an alternative implementation of
:meth:`~orix.quaternion.Misorientation.distance` for
a single :class:`Misorientation` instance, using :mod:`dask`.
Parameters
----------
chunk_size : int, optional
Number of misorientations per axis to include in each
iteration of the computation. Default is 20.
progressbar : bool, optional
Whether to show a progressbar during computation. Default is
True.
Returns
-------
numpy.ndarray
Notes
-----
Given two misorientations :math:`m_i` and :math:`m_j` with the
same two symmetry groups, the smallest angle is considered as
the geodesic distance
.. math::
d(m_i, m_j) = \arccos(2(m_i \cdot m_j)^2 - 1),
where :math:`(m_i \cdot m_j)` is the highest dot product
between symmetrically equivalent misorientations to
:math:`m_{i,j}`, given by
.. math::
\max_{s_k \in S_k} s_k m_i s_l s_k m_j^{-1} s_l,
where :math:`s_k \in S_k` and :math:`s_l \in S_l`, with
:math:`S_k` and :math:`S_l` being the two symmetry groups.
Examples
--------
>>> import numpy as np
>>> from orix.quaternion import Misorientation, symmetry
>>> m = Misorientation.from_axes_angles([1, 0, 0], [0, np.pi/2])
>>> m.symmetry = (symmetry.D6, symmetry.D6)
>>> d = m.get_distance_matrix() # doctest: +SKIP
>>> d
[[0. 1.57079633]
[1.57079633 0. ]]
"""
# Reduce symmetry operations to the unique ones
symmetry = _get_unique_symmetry_elements(*self.symmetry)
# Perform "s_k m_i s_l s_k m_j" (see Notes)
misorientation1 = symmetry.outer(self).outer(symmetry)
misorientation2 = misorientation1._outer_dask(~self, chunk_size=chunk_size)
# Perform last outer product and reduce to all dot products at
# the same time
warnings.filterwarnings("ignore", category=da.PerformanceWarning)
str1 = "abcdefghijklmnopqrstuvwxy"[: misorientation2.ndim]
str2 = "z" + str1[-1] # Last axis has shape (4,)
sum_over = f"{str1},{str2}->{str1[:-1] + str2[0]}"
all_dot_products = da.einsum(sum_over, misorientation2, symmetry.data)
# Get highest dot product
axes = (0, self.ndim + 1, 2 * self.ndim + 2)
dot_products = da.max(abs(all_dot_products), axis=axes)
# Round because some dot products are slightly above 1
dot_products = da.round(dot_products, 12)
# Calculate disorientation angles
angles_dask = da.arccos(2 * dot_products**2 - 1)
angles_dask = da.nan_to_num(angles_dask)
angles = np.zeros(angles_dask.shape)
if progressbar:
with ProgressBar():
da.store(sources=angles_dask, targets=angles)
else:
da.store(sources=angles_dask, targets=angles)
return angles
class Orientation(Misorientation):
"""Orientations represent misorientations away from a reference of
identity and have only one associated symmetry.
Orientations support binary subtraction, producing a misorientation.
That is, to compute the misorientation from :math:`o_1` to
:math:`o_2`, call :code:`o_2 - o_1`.
"""
@property
def symmetry(self):
"""Symmetry."""
return self._symmetry[1]
@symmetry.setter
def symmetry(self, value):
if not isinstance(value, Symmetry):
raise TypeError("Value must be an instance of orix.quaternion.Symmetry.")
self._symmetry = (C1, value)
@property
def unit(self):
"""Unit orientations."""
o = super().unit
o.symmetry = self.symmetry
return o
def __invert__(self):
o = super().__invert__()
o.symmetry = self.symmetry
return o
def __neg__(self):
o = super().__neg__()
o.symmetry = self.symmetry
return o
def __repr__(self):
"""String representation."""
data = np.array_str(self.data, precision=4, suppress_small=True)
return f"{self.__class__.__name__} {self.shape} {self.symmetry.name}\n{data}"
def __sub__(self, other):
if isinstance(other, Orientation):
# Call to Object3d.squeeze() doesn't carry over symmetry
misorientation = Misorientation(self * ~other).squeeze()
misorientation.symmetry = (self.symmetry, other.symmetry)
return misorientation.map_into_symmetry_reduced_zone()
return NotImplemented
# TODO: Remove use of **kwargs in 1.0
@classmethod
def from_euler(cls, euler, symmetry=None, direction="lab2crystal", **kwargs):
"""Creates orientation(s) from an array of Euler angles.
Parameters
----------
euler : array-like
Euler angles in the Bunge convention.
symmetry : Symmetry, optional
Symmetry of orientation(s). If None (default), no symmetry
is set.
direction : str
"lab2crystal" (default) or "crystal2lab". "lab2crystal"
is the Bunge convention. If "MTEX" is provided then the
direction is "crystal2lab".
"""
o = super().from_euler(euler=euler, direction=direction, **kwargs)
if symmetry:
o.symmetry = symmetry
return o
@classmethod
def from_matrix(cls, matrix, symmetry=None):
"""Creates orientation(s) from orientation matrices
:cite:`rowenhorst2015consistent`.
Parameters
----------
matrix : array_like
Array of orientation matrices.
symmetry : Symmetry, optional
Symmetry of orientation(s). If None (default), no symmetry
is set.
"""
o = super().from_matrix(matrix)
if symmetry:
o.symmetry = symmetry
return o
@classmethod
def from_neo_euler(cls, neo_euler, symmetry=None):
"""Creates orientation(s) from a neo-euler (vector)
representation.
Parameters
----------
neo_euler : NeoEuler
Vector parametrization of orientation(s).
symmetry : Symmetry, optional
Symmetry of orientation(s). If None (default), no symmetry
is set.
"""
o = super().from_neo_euler(neo_euler)
if symmetry:
o.symmetry = symmetry
return o
@classmethod
def from_axes_angles(cls, axes, angles, symmetry=None):
"""Creates orientation(s) from axis-angle pair(s).
Parameters
----------
axes : Vector3d or array_like
The axis of rotation.
angles : array_like
The angle of rotation, in radians.
symmetry : Symmetry, optional
Symmetry of orientation(s). If None (default), no symmetry
is set.
Returns
-------
Orientation
Examples
--------
>>> import numpy as np
>>> from orix.quaternion import Orientation, symmetry
>>> ori = Orientation.from_axes_angles((0, 0, -1), np.pi / 2, symmetry.Oh)
>>> ori
Orientation (1,) m-3m
[[ 0.7071 0. 0. -0.7071]]
See Also
--------
from_neo_euler
"""
axangle = AxAngle.from_axes_angles(axes, angles)
return cls.from_neo_euler(axangle, symmetry)
def angle_with(self, other):
"""The smallest symmetry reduced angle of rotation transforming
this orientation to the other.
Parameters
----------
other : orix.quaternion.Orientation
Returns
-------
numpy.ndarray
See also
--------
angle_with_outer
"""
dot_products = self.unit.dot(other.unit)
angles = np.nan_to_num(np.arccos(2 * dot_products**2 - 1))
return angles
def angle_with_outer(self, other, lazy=False, chunk_size=20, progressbar=True):
r"""The symmetry reduced smallest angle of rotation transforming
every orientation in this instance to every orientation in
another instance.
This is an alternative implementation of
:meth:`~orix.quaternion.Misorientation.distance` for
a single :class:`Orientation` instance, using :mod:`dask`.
Parameters
----------
lazy : bool, optional
Whether to perform the computation lazily with Dask. Default
is False.
chunk_size : int, optional
Number of orientations per axis to include in each iteration
of the computation. Default is 20. Only applies when `lazy`
is True.
progressbar : bool, optional
Whether to show a progressbar during computation if `lazy`
is True. Default is True.
Returns
-------
numpy.ndarray
See also
--------
angle_with
Notes
-----
Given two orientations :math:`g_i` and :math:`g_j`, the smallest
angle is considered as the geodesic distance
.. math::
d(g_i, g_j) = \arccos(2(g_i \cdot g_j)^2 - 1),
where :math:`(g_i \cdot g_j)` is the highest dot product between
symmetrically equivalent orientations to :math:`g_{i,j}`.
Examples
--------
>>> import numpy as np
>>> from orix.quaternion import Orientation, symmetry
>>> ori1 = Orientation.random((5, 3))
>>> ori2 = Orientation.random((6, 2))
>>> dist1 = ori1.angle_with_outer(ori2)
>>> dist1.shape
(6, 2, 5, 3)
>>> ori1.symmetry = symmetry.Oh
>>> ori2.symmetry = symmetry.Oh
>>> dist_sym = ori1.angle_with_outer(ori2)
>>> np.allclose(dist1.data, dist_sym.data)
False
"""
ori = self.unit
if lazy:
dot_products = ori._dot_outer_dask(other, chunk_size=chunk_size)
# Round because some dot products are slightly above 1
n_decimals = np.finfo(dot_products.dtype).precision
dot_products = da.round(dot_products, n_decimals)
angles_dask = da.arccos(2 * dot_products**2 - 1)
angles_dask = da.nan_to_num(angles_dask)
# Create array in memory and overwrite, chunk by chunk
angles = np.zeros(angles_dask.shape)
if progressbar:
with ProgressBar():
da.store(sources=angles_dask, targets=angles)
else:
da.store(sources=angles_dask, targets=angles)
else:
dot_products = ori.dot_outer(other)
angles = np.arccos(2 * dot_products**2 - 1)
angles = np.nan_to_num(angles)
return angles
def get_distance_matrix(self, lazy=False, chunk_size=20, progressbar=True):
r"""The symmetry reduced smallest angle of rotation transforming
every orientation in this instance to every other orientation
:cite:`johnstone2020density`.
This is an alternative implementation of
:meth:`~orix.quaternion.Misorientation.distance` for
a single :class:`Orientation` instance, using :mod:`dask`.
Parameters
----------
lazy : bool, optional
Whether to perform the computation lazily with Dask. Default
is False.
chunk_size : int, optional
Number of orientations per axis to include in each iteration
of the computation. Default is 20. Only applies when `lazy`
is True.
progressbar : bool, optional
Whether to show a progressbar during computation if `lazy`
is True. Default is True.
Returns
-------
numpy.ndarray
Notes
-----
Given two orientations :math:`g_i` and :math:`g_j`, the smallest
angle is considered as the geodesic distance
.. math::
d(g_i, g_j) = \arccos(2(g_i \cdot g_j)^2 - 1),
where :math:`(g_i \cdot g_j)` is the highest dot product between
symmetrically equivalent orientations to :math:`g_{i,j}`.
"""
angles = self.angle_with_outer(
self, lazy=lazy, chunk_size=chunk_size, progressbar=progressbar
)
return angles
def dot(self, other):
"""Symmetry reduced dot product of orientations in this instance
to orientations in another instance, returned as numpy.ndarray.
See Also
--------
dot_outer
"""
symmetry = _get_unique_symmetry_elements(self.symmetry, other.symmetry)
misorientation = other * ~self
all_dot_products = Rotation(misorientation).dot_outer(symmetry)
highest_dot_product = np.max(all_dot_products, axis=-1)
return highest_dot_product
def dot_outer(self, other):
"""Symmetry reduced dot product of every orientation in this
instance to every orientation in another instance, returned as
numpy.ndarray.
See Also
--------
dot
"""
symmetry = _get_unique_symmetry_elements(self.symmetry, other.symmetry)
misorientation = other.outer(~self)
all_dot_products = Rotation(misorientation).dot_outer(symmetry)
highest_dot_product = np.max(all_dot_products, axis=-1)
# need to return axes order so that self is first
order = tuple(range(self.ndim, self.ndim + other.ndim)) + tuple(
range(self.ndim)
)
return highest_dot_product.transpose(*order)
@deprecated(
since="0.7",
alternative="orix.quaternion.Orientation.get_distance_matrix",
removal="0.8",
)
def distance(self, verbose=False, split_size=100):
return super().distance(verbose=verbose, split_size=split_size)
def plot_unit_cell(
self,
c="tab:blue",
return_figure=False,
axes_length=0.5,
structure=None,
crystal_axes_loc="origin",
**arrow_kwargs,
):
"""Plot the unit cell orientation, showing the sample and
crystal reference frames.
Parameters
----------
c : str, optional
Unit cell edge color.
return_figure : bool, optional
Return the plotted figure.
axes_length : float, optional
Length of the reference axes in Angstroms, by default 0.5.
structure : diffpy.structure.Structure or None, optional
Structure of the unit cell, only orthorhombic lattices are currently
supported. If not given, a cubic unit cell with a lattice parameter of
2 Angstroms will be plotted.
crystal_axes_loc : str, optional
Plot the crystal reference frame axes at the "origin" (default) or
"center" of the plotted cell.
arrow_kwargs : dict, optional
Keyword arguments passed to
:class:`matplotlib.patches.FancyArrowPatch`, for example "arrowstyle".
Returns
-------
fig: matplotlib.figure.Figure
The plotted figure.
Raises
------
ValueError
If self.size > 1.
"""
if self.size > 1:
raise ValueError("Can only plot a single unit cell, so *size* must be 1")
from orix.plot.unit_cell_plot import _plot_unit_cell
fig = _plot_unit_cell(
self,
c=c,
axes_length=axes_length,
structure=structure,
crystal_axes_loc=crystal_axes_loc,
**arrow_kwargs,
)
if return_figure:
return fig
def in_euler_fundamental_region(self):
"""Euler angles in the fundamental Euler region of the proper
subgroup.
The Euler angle ranges of each proper subgroup are given in
:attr:`~orix.quaternion.Symmetry.euler_fundamental_region`.
From the procedure in MTEX' :code:`quaternion.project2EulerFR`.
Returns
-------
euler_in_region : numpy.ndarray
Euler angles in radians.
"""
pg = self.symmetry.proper_subgroup
# Symmetrize every orientation by operations of the proper
# subgroup different from rotation about the c-axis
ori = pg._special_rotation.outer(self)
alpha, beta, gamma = ori.to_euler().T
gamma = np.mod(gamma, 2 * np.pi / pg._primary_axis_order)
# Find the first triplet among the symmetrically equivalent ones
# inside the fundamental region
max_alpha, max_beta, max_gamma = np.radians(pg.euler_fundamental_region)
is_inside = (alpha <= max_alpha) * (beta <= max_beta) * (gamma <= max_gamma)
first_nonzero = np.argmax(is_inside, axis=1)
euler_in_region = np.column_stack(
(
np.choose(first_nonzero, alpha.T),
np.choose(first_nonzero, beta.T),
np.choose(first_nonzero, gamma.T),
)
)
return euler_in_region
def scatter(
self,
projection="axangle",
figure=None,
position=None,
return_figure=False,
wireframe_kwargs=None,
size=None,
direction=None,
figure_kwargs=None,
**kwargs,
):
"""Plot orientations in axis-angle space, the Rodrigues
fundamental zone, or an inverse pole figure (IPF) given a sample
direction.
Parameters
----------
projection : str, optional
Which orientation space to plot orientations in, either
"axangle" (default), "rodrigues" or "ipf" (inverse pole
figure).
figure : matplotlib.figure.Figure
If given, a new plot axis :class:`~orix.plot.AxAnglePlot` or
:class:`~orix.plot.RodriguesPlot` is added to the figure in
the position specified by `position`. If not given, a new
figure is created.
position : int, tuple of int, matplotlib.gridspec.SubplotSpec,
optional
Where to add the new plot axis. 121 or (1, 2, 1) places it
in the first of two positions in a grid of 1 row and 2
columns. See :meth:`~matplotlib.figure.Figure.add_subplot`
for further details. Default is (1, 1, 1).
return_figure : bool, optional
Whether to return the figure. Default is False.
wireframe_kwargs : dict, optional
Keyword arguments passed to
:meth:`orix.plot.AxAnglePlot.plot_wireframe` or
:meth:`orix.plot.RodriguesPlot.plot_wireframe`.
size : int, optional
If not given, all orientations are plotted. If given, a
random sample of this `size` of the orientations is plotted.
direction : Vector3d, optional
Sample direction to plot with respect to crystal directions.
If not given, the out of plane direction, sample Z, is used.
Only used when plotting IPF(s).
figure_kwargs : dict, optional
Dictionary of keyword arguments passed to
:func:`matplotlib.pyplot.figure` if `figure` is not given.
kwargs
Keyword arguments passed to
:meth:`orix.plot.AxAnglePlot.scatter`,
:meth:`orix.plot.RodriguesPlot.scatter`, or
:meth:`orix.plot.InversePoleFigurePlot.scatter`.
Returns
-------
figure : matplotlib.figure.Figure
Figure with the added plot axis, if `return_figure` is True.
See Also
--------
orix.plot.AxAnglePlot, orix.plot.RodriguesPlot,
orix.plot.InversePoleFigurePlot
"""
if projection.lower() != "ipf":
figure = super().scatter(
projection=projection,
figure=figure,
position=position,
return_figure=return_figure,
wireframe_kwargs=wireframe_kwargs,
size=size,
figure_kwargs=figure_kwargs,
**kwargs,
)
else:
from orix.plot.inverse_pole_figure_plot import (
_setup_inverse_pole_figure_plot,
)
if figure is None:
# Determine which hemisphere(s) to show
symmetry = self.symmetry
sector = symmetry.fundamental_sector
if np.any(sector.vertices.polar > np.pi / 2):
hemisphere = "both"
else:
hemisphere = "upper"
figure, axes = _setup_inverse_pole_figure_plot(
symmetry=symmetry,
direction=direction,
hemisphere=hemisphere,
figure_kwargs=figure_kwargs,
)
else:
axes = np.asarray(figure.axes)
for ax in axes:
ax.scatter(self, **kwargs)
figure.tight_layout()
if return_figure:
return figure
def _dot_outer_dask(self, other, chunk_size=20):
"""Symmetry reduced dot product of every orientation in this
instance to every orientation in another instance, returned as a
Dask array.
Parameters
----------
other : orix.quaternion.Orientation
chunk_size : int, optional
Number of orientations per axis in each orientation instance
to include in each iteration of the computation. Default is
20.
Returns
-------
dask.array.Array
Notes
-----
To read the dot products array `dparr` into memory, do
`dp = dparr.compute()`.
"""
symmetry = _get_unique_symmetry_elements(self.symmetry, other.symmetry)
misorientation = other._outer_dask(~self, chunk_size=chunk_size)
# Summation subscripts
str1 = "abcdefghijklmnopqrstuvwxy"[: misorientation.ndim]
str2 = "z" + str1[-1] # Last elements have shape (4,)
sum_over = f"{str1},{str2}->{str1[:-1] + str2[0]}"
warnings.filterwarnings("ignore", category=da.PerformanceWarning)
all_dot_products = da.einsum(sum_over, misorientation, symmetry.data)
highest_dot_product = da.max(abs(all_dot_products), axis=-1)
return highest_dot_product
| StarcoderdataPython |
3583449 | import json
from vaccineAvailabilityNotifier.client.actionsImpl import ActionsImpl
from vaccineAvailabilityNotifier.processors.state_info_processor import StateIdProcessor
def get_url(state_id):
return 'https://cdn-api.co-vin.in/api/v2/admin/location/districts/' + str(state_id)
class DistrictIdProcessor:
__action_processor = ActionsImpl()
def __init__(self, state_name, district_name, all):
self.state_name = state_name
self.district_name = district_name
self.all = all
def process(self):
state_id = StateIdProcessor(state_name=self.state_name, all=False) \
.process()["state_id"]
print("state id: " + str(state_id))
print("state name: " + str(self.state_name))
response = self.__action_processor.get(
url=get_url(state_id)
)
if response is not None and response.status_code is not None and response.status_code == 200:
dists = json.loads(response.content.decode('utf-8'))
if self.all:
return dists
else:
districts = list(filter(lambda x: x.get("district_name") == self.district_name, dists['districts']))
if len(districts) == 0:
print('unable to find district: ' + self.state_name)
exit()
return districts[0]
| StarcoderdataPython |
11354666 | <gh_stars>0
import json
from sqlalchemy import Column
from sqlalchemy import JSON as SQLAlchemy_JSON
from aurora.models import Data
class JSON(Data):
__pattern__ = '(?i).*\.json$'
data = Column(SQLAlchemy_JSON)
def __init__(self, file):
self.file = file
with open(file.fullpath, 'r') as f:
self.data = json.loads(f.read()) | StarcoderdataPython |
8037061 | <filename>feincms_handlers/__init__.py
""" This is for FeinCMS >= 1.5. For older versions use the legacy module.
Usage Example:
from feincms_handlers import handlers
handler = handlers.MasterHandler([handlers.AjaxHandler, handlers.FeinCMSHandler])
urlpatterns += patterns('',
url(r'^$', handlers.FeinCMSHandler.as_view(), name='feincms_home'),
url(r'^(.*)/$', handler, name='feincms_handler'),
)
"""
class NotMyJob(Exception):
def __init__(self, author):
self.author = author
def __str__(self):
return repr(self.author)
| StarcoderdataPython |
9742948 | #!~/anaconda3/bin/python3
# ******************************************************
# Author: <NAME>
# Last modified: 2021-08-04 15:10
# Email: <EMAIL>
# Filename: utils.py
# Description:
# auxillary functions
# ******************************************************
import os
def check_directory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def get_bad_slide_list(txt_path):
f = open(txt_path,'r')
bad_slide_list = []
for l in f.readlines():
name = l.strip()
bad_slide_list.append(name)
return bad_slide_list
def remove_bad_slide(slide_list, bad_list):
good_slide_list = []
for item in slide_list:
name = item.split('.')[0]
if name not in bad_list:
good_slide_list.append(item)
return good_slide_list
| StarcoderdataPython |
3543934 | from typing import Any, Dict
from django.contrib.auth import authenticate
from rest_framework import permissions, serializers, status
from rest_framework.response import Response
from rest_framework.views import APIView
from kite_runner.utils import tokens
from .renderer import UserJSONRenderer
class LoginSerializer(serializers.Serializer):
email = serializers.EmailField()
username = serializers.CharField(max_length=False, read_only=True)
password = serializers.CharField(max_length=128, write_only=True)
class Meta:
fields = ("email", "username", "password", "token")
def to_representation(self, value: Any) -> Any:
return value
def validate(self, data: Dict) -> Dict:
email = data.get("email", None)
password = data.get("password", None)
if not email or not password:
raise serializers.ValidationError("Email and password are required")
user = authenticate(email=email, password=password)
if user is None:
raise serializers.ValidationError("wrong username or password")
return {
"email": user.email,
"username": user.username,
"token": tokens.get_user_token(user),
}
class LoginViewSet(APIView):
serializer_class = LoginSerializer
permission_classes = (permissions.AllowAny,)
renderer_classes = (UserJSONRenderer,)
def post(self, request: Any) -> Response:
user = request.data.get("user", None)
serializer = self.serializer_class(data=user)
serializer.is_valid(raise_exception=True)
return Response(serializer.data, status=status.HTTP_200_OK)
| StarcoderdataPython |
9735342 | from setuptools import setup
import argz
author = 'bnbdr'
setup(
name='argz',
version='.'.join(map(str, argz.version)),
author=author,
author_email='<EMAIL>',
url='https://github.com/{}/argz'.format(author),
description="Argument parsing for the lazy",
long_description=argz.__doc__,
long_description_content_type="text/markdown",
license='MIT',
keywords='argument parse args',
py_modules=['argz'],
classifiers=(
"Intended Audience :: Developers",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
)
| StarcoderdataPython |
11352660 | import os
import onnx
from shutil import copyfile
from hdfg import hdfgutils
from hdfg import load_store
from hdfg.passes.flatten import flatten_graph, is_literal
import codegen as c
from hdfg.hdfg_pb2 import Component, Program
from hdfg.visualize import *
from .serial.DataFlowGraph import *
from codegen.tabla import tabla_utils
def read_config(filename):
with open(filename, 'r') as f:
contents = f.read()
return json.loads(contents)
class TablaTranslation(object):
func_table = {
'pi' : 2,
'sum' : 2,
'norm' : 2,
'gaussian' : 1,
'sigmoid' : 1,
'sig_sym' : 1,
'log' : 1
}
op_map = {
"sub" : "-",
"add" : "+",
"div" : "/",
"mul" : "*",
"sigmoid" : "sigmoid",
"sum" : "sum",
"tlt" : "<",
"tgt" : ">",
}
def __init__(self, onnx_proto, run_async=False):
self.input_proto = onnx_proto
self.output_dir, self.output_file = os.path.split(self.input_proto)
self.proto_name = self.output_file.split('.')[0]
self.program = load_store.load_program(self.input_proto)
self.graph = self.program.graph
self.templates = self.program.templates
self.components = {}
self.includes = []
self.functions = []
self.structs = []
self.signature_map = {}
self.initializer = None
self.header = []
self.exec = []
self.run_async = run_async
self.add_flattened_graph()
# self.visualize()
self.create_node_map()
self.dfg = DataFlowGraph()
# Create source and sink nodes first.
source = DFGNode()
source.operation = 'source'
self.dfg.add(source)
sink = DFGNode()
sink.operation = 'sink'
sink.dist2sink = 0
self.dfg.add(sink)
self.translate_graph()
self.set_d2sink(sink)
removedNodes = []
for node in self.dfg.nodes:
if node.dist2sink is None:
for child in node.children:
child.parents.remove(node)
for parent in node.parents:
parent.children.remove(node)
removedNodes.append(node)
for node in removedNodes:
self.dfg.remove(node)
self.dfg.updateId()
dir_path = os.path.dirname(os.path.realpath(__file__))
if not os.path.exists(dir_path + '/artifacts/'):
os.makedirs(dir_path + '/artifacts/')
macro_dfg_file = dir_path + '/artifacts/macro_dfg.json'
self.write_dfg(macro_dfg_file)
tabla_utils.compile(self.dfg)
def write_dfg(self, path):
self.dfg.save(path)
def create_dfg(self):
for sg in self.flattened_graph.statement_graphs:
nodes = sg.statement_node
node_cats = [self.node_map[n].op_cat for n in nodes]
if 'assign' in node_cats:
self.idx_map = {}
if node_cats[-1] == 'read_write':
assign_node = self.node_map[nodes[-2]]
assignee_edge = self.flattened_graph.edge_info[self.node_map[nodes[-2]].output[0]]
context, var = self.get_var_context(assignee_edge.name)
assignee_key = nodes[-1]
else:
assign_node = self.node_map[nodes[-1]]
assignee_edge = self.flattened_graph.edge_info[self.node_map[nodes[-1]].output[0]]
context, var = self.get_var_context(assignee_edge.name)
assignee_key = context + '/' + str(assignee_edge.vid)
if assignee_edge.iid:
iid = assignee_edge.iid
if iid in list(self.flattened_graph.edge_info):
iid_info = self.flattened_graph.edge_info[iid]
elif (context + "/" + iid) in list(self.flattened_graph.edge_info):
iid_info = self.flattened_graph.edge_info[context + "/" + iid]
else:
logging.error("Index id not in edge map: context {}, id: {}".format(context, iid))
dimensions = hdfgutils.get_attribute_value(iid_info.attributes['dimensions'])
if len(dimensions) == 0:
dimensions.append(iid)
node_strings = self.generate_index_nodes([assignee_key], dimensions, context, '')
else:
node_strings = [assignee_key]
result = self.create_statement_nodes(nodes)
if len(result) != len(node_strings):
print("Assignee key for unequal nodes: {}".format(assignee_key))
else:
var_id = var[:var.find('[')]
if var_id in self.gradient_table.keys():
dtype = 'gradient'
else:
dtype = result[0].dataType
if assignee_key not in self.symbol_table.keys():
self.symbol_table[assignee_key] = []
for n in range(len(node_strings)):
result[n].dataType = dtype
self.symbol_table[assignee_key].append(result[n])
for key in self.symbol_table.keys(): # Connect outputs
if key in list(self.flattened_graph.state):
for node in self.symbol_table[key]:
if len(node.children) is 0 and len(node.parents) is not 1:
node.dataType = 'model'
self.connect_node(node, self.dfg.get(1))
def create_statement_nodes(self, nodes):
result = []
for n in nodes:
node_info = self.node_map[n]
if node_info.op_type in self.op_map.keys():
if len(node_info.input) > 2:
logging.error("Too many inputs for TABLA op - Node: {}, op: {}".format(n, node_info.op_type))
elif node_info.op_type in self.func_table.keys():
self.create_func_node(node_info)
else:
self.create_bin_node(node_info)
if node_info.output[0] not in self.symbol_table.keys():
logging.error("Output {} not in symbol table".format(node_info.output[0]))
exit(0)
else:
result = self.symbol_table[node_info.output[0]]
return result
def create_func_node(self,node):
if node.op_type == 'sum':
expr = self.get_node(node.input[0])
acc_range = node.input[1]
result = self.accum_nodes('+', expr, node.output[0])
self.symbol_table[node.output[0]] = [result]
elif node.op_type == 'sigmoid':
parent = self.get_node(node.input[0])
dfg_node = DFGNode()
dfg_node.operation = node.op_type
dfg_node.name = node.output[0]
self.dfg.add(dfg_node)
self.connect_node(parent[0], dfg_node)
self.symbol_table[node.output[0]] = [dfg_node]
def accum_nodes(self,op, nodes, name):
if len(nodes) == 1:
return nodes[0]
elif len(nodes) == 2:
node = DFGNode()
node.operation = op
node.name =name
self.dfg.add(node)
self.connect_node(nodes[0], node)
self.connect_node(nodes[1], node)
return node
else:
middle = len(nodes)//2
left = nodes[middle:]
right = nodes[:middle]
node = DFGNode()
node.operation = op
node.name= name
self.dfg.add(node)
self.connect_node(self.accum_nodes(op, left, name), node)
self.connect_node(self.accum_nodes(op, right, name), node)
return node
def create_bin_node(self,node):
left = self.get_node(node.input[0])
right = self.get_node(node.input[1])
op = self.op_map[node.op_type]
dst = node.output[0]
contextleft, varleft = self.get_var_context(node.input[0])
contextright, varright = self.get_var_context(node.input[1])
varleft = varleft[:varleft.find('[')]
varright = varright[:varright.find('[')]
if varleft in self.gradient_table.keys():
dtype = 'gradient'
elif varright in self.gradient_table.keys():
dtype = 'gradient'
else:
dtype = None
indices_left =node.input[0].count('[')
indices_right =node.input[1].count('[')
if dst in self.symbol_table.keys():
logging.error("Dst already in symbol table: {}".format(dst))
else:
self.symbol_table[dst] = []
if indices_left > 0:
lindex = []
ltext = node.input[0]
while ltext.find('[') != -1:
lindex.append(ltext[ltext.find('[')+1:ltext.find(']')])
ltext = ltext[ltext.find(']')+1:]
if indices_right > 0:
rindex = []
rtext = node.input[1]
while rtext.find('[') != -1:
rindex.append(rtext[rtext.find('[')+1:rtext.find(']')])
rtext = rtext[rtext.find(']')+1:]
if len(left) == 1 and len(right) > 1:
for n in range(len(right)):
dfg_node = DFGNode()
dfg_node.operation = op
dfg_node.name = node.output[0]
dfg_node.dataType = dtype
self.dfg.add(dfg_node)
self.connect_node(left[0], dfg_node)
self.connect_node(right[n], dfg_node)
self.symbol_table[dst].append(dfg_node)
elif len(right) == 1 and len(left) > 1:
for n in range(len(left)):
dfg_node = DFGNode()
dfg_node.operation = op
dfg_node.name = node.output[0]
dfg_node.dataType = dtype
self.dfg.add(dfg_node)
self.connect_node(left[n], dfg_node)
self.connect_node(right[0], dfg_node)
self.symbol_table[dst].append(dfg_node)
elif len(left) != len(right):
if indices_left == indices_right:
for l in left:
for r in right:
dfg_node = DFGNode()
dfg_node.operation = op
dfg_node.name = node.output[0]
dfg_node.dataType = dtype
self.dfg.add(dfg_node)
self.connect_node(l, dfg_node)
self.connect_node(r, dfg_node)
self.symbol_table[dst].append(dfg_node)
elif indices_right > indices_left:
lval = 0
for r in (right):
if lval == len(left):
lval = 0
dfg_node = DFGNode()
dfg_node.operation = op
dfg_node.name = node.output[0]
dfg_node.dataType = dtype
self.dfg.add(dfg_node)
self.connect_node(left[lval], dfg_node)
self.connect_node(r, dfg_node)
self.symbol_table[dst].append(dfg_node)
lval +=1
else:
rval = 0
for l in (left):
if rval == len(right):
rval = 0
dfg_node = DFGNode()
dfg_node.operation = op
dfg_node.name = node.output[0]
dfg_node.dataType = dtype
self.dfg.add(dfg_node)
self.connect_node(right[rval], dfg_node)
self.connect_node(l, dfg_node)
self.symbol_table[dst].append(dfg_node)
rval +=1
else:
for n in range(len(left)):
dfg_node = DFGNode()
dfg_node.operation = op
dfg_node.name = node.output[0]
dfg_node.dataType = dtype
self.dfg.add(dfg_node)
self.connect_node(left[n], dfg_node)
self.connect_node(right[n], dfg_node)
self.symbol_table[dst].append(dfg_node)
def get_node(self, edge):
if is_literal(edge):
if edge not in self.symbol_table.keys():
node = DFGNode()
node.operation = edge
node.name = edge
node.dataType = 'constant'
self.dfg.add(node)
self.connect_node(self.dfg.get(0), node)
self.symbol_table[edge] = [node]
return self.symbol_table[edge]
else:
return self.symbol_table[edge]
elif edge in self.symbol_table.keys():
return self.symbol_table[edge]
else:
edge_info =self.flattened_graph.edge_info[edge]
if 'alias' in list(edge_info.attributes):
alias = hdfgutils.get_attribute_value(edge_info.attributes['alias'])
print("Alias is: {}".format(alias))
vid = edge_info.vid
context, var = self.get_var_context(edge)
if edge_info.iid:
iid = edge_info.iid
if iid in list(self.flattened_graph.edge_info):
iid_info = self.flattened_graph.edge_info[iid]
elif (context + "/" + iid) in list(self.flattened_graph.edge_info):
iid_info = self.flattened_graph.edge_info[context + "/" + iid]
else:
logging.error("Index id not in edge map: context {}, id: {}".format(context, iid))
dimensions = hdfgutils.get_attribute_value(iid_info.attributes['dimensions'])
if len(dimensions) == 0:
dimensions.append(iid)
_ = self.generate_index_nodes([''], dimensions, context, '')
vid_hash = context + '/' + vid
if len(self.symbol_table[vid_hash]) != len(self.idx_map[iid]):
logging.error("Trying to use wrong dimensional input for: {} with index {}, {} and {} ".format(vid, iid, len(self.symbol_table[vid_hash]),len(self.idx_map[iid]) ))
exit(1)
else:
self.symbol_table[edge] = self.symbol_table[vid_hash].copy()
return self.symbol_table[edge]
else:
print(self.symbol_table.keys())
logging.error("Error, edge not found in symbol table and doesnt have index: {}, vid: {} edge name: {}".format(edge, vid, edge_info.name))
def create_node_map(self):
self.node_map = {}
for n in self.flattened_graph.sub_graph:
self.node_map[n.name] = n
def translate_graph(self):
self.link_table = {}
self.const_table = {}
self.iter_table = {}
self.symbol_table = {}
self.gradient_table = {}
self.create_constant_table()
self.create_iter_table()
self.create_symbol_table()
self.create_gradient_table()
self.create_dfg()
def add_flattened_graph(self):
self.flattened_graph = Component(name="flattened_" + str(self.proto_name))
edge_node_ids = {'edges': {},
'nodes': {}}
self.flattened_graph.statement_graphs.extend([])
flatten_graph(self.flattened_graph, self.graph, self.templates, '', edge_node_ids, {})
flattened_graph_attr = hdfgutils.make_attribute('flattened', self.flattened_graph)
self.program.attributes['flattened_graph'].CopyFrom(flattened_graph_attr)
def visualize(self):
rankdir = "TB"
pydot_graph = pydot.Dot(name=self.input_proto, rankdir=rankdir)
out_graph = GetPydotGraph(self.flattened_graph, name=self.graph.name, rankdir=rankdir)
filename = self.output_dir + '/' + self.output_file[:-3] + '.dot'
pydot_graph.add_subgraph(out_graph)
pydot_graph.write(filename, format='raw')
pdf_filename = filename[:-3] + 'png'
try:
pydot_graph.write_png(pdf_filename)
except Exception:
print(
'Error when writing out the png file. Pydot requires graphviz '
'to convert dot files to pdf, and you may not have installed '
'graphviz. On ubuntu this can usually be installed with "sudo '
'apt-get install graphviz". We have generated the .dot file '
'but will not be able to generate png file for now.'
)
def create_constant_table(self):
for e in list(self.flattened_graph.edge_info):
edge = self.flattened_graph.edge_info[e]
vtype = hdfgutils.get_attribute_value(edge.attributes['vtype'])
dtype = hdfgutils.get_attribute_value(edge.attributes['type'])
if 'alias' in list(edge.attributes) and vtype == 'scalar':
context, var = self.get_var_context(e)
if context not in self.const_table.keys():
self.const_table[context] = {}
alias = hdfgutils.get_attribute_value(edge.attributes['alias'])
if var not in self.const_table[context].keys():
if dtype == 'int':
self.const_table[context][var] = int(alias)
elif dtype == 'float':
self.const_table[context][var] = float(alias)
else:
self.const_table[context][var] = alias
def create_gradient_table(self):
for sg in self.flattened_graph.statement_graphs:
nodes = sg.statement_node
node_cats = [self.node_map[n].op_cat for n in nodes]
if node_cats[-1] == 'read_write':
assignee_edge = self.flattened_graph.edge_info[self.node_map[nodes[-1]].output[0]]
assignee_key = nodes[-1]
else:
continue
assignee_update = False
gradient = None
gradient_key = None
for n in nodes:
node_info = self.node_map[n]
if n == assignee_key:
assignee_update = True
else:
cat = node_info.op_cat
for i in node_info.input:
if i in list(self.flattened_graph.edge_info):
in_edge = self.flattened_graph.edge_info[i]
vcat = hdfgutils.get_attribute_value(in_edge.attributes['vcat'])
if vcat == 'assign':
gradient = in_edge.name
gradient_key = in_edge.vid
if assignee_update and gradient:
self.gradient_table[gradient_key] = True
self.link_table[gradient] = assignee_key
def create_iter_table(self):
for e in list(self.flattened_graph.edge_info):
edge = self.flattened_graph.edge_info[e]
vtype = hdfgutils.get_attribute_value(edge.attributes['vtype'])
vcat = hdfgutils.get_attribute_value(edge.attributes['vcat'])
if vcat == 'declaration' and vtype == 'index':
context = "/".join(e.split('/')[:-1])
lower = hdfgutils.get_attribute_value(edge.attributes['lower'])
upper = hdfgutils.get_attribute_value(edge.attributes['upper'])
lower_val = self.try_eval(lower, context)
upper_val = self.try_eval(upper, context)
if not is_literal(str(lower_val)):
logging.error("Error, indices not evaluated for {}, lower val {}".format(e, lower))
if not is_literal(str(upper_val)):
logging.error("Error, indices not evaluated for {}, upper val {}".format(e, upper))
self.iter_table[e] = (lower_val, upper_val)
def create_symbol_table(self):
for c in self.const_table.keys():
for k in self.const_table[c].keys():
const = c + '/' + k
node = DFGNode()
node.operation = const
node.name = const
node.dataType = 'constant'
self.dfg.add(node)
self.connect_node(self.dfg.get(0), node)
self.symbol_table[const] = [node]
for ename in list(self.flattened_graph.edge_info):
context, var = self.get_var_context(ename)
context_expr = context + "/" + var
if var in list(self.flattened_graph.edge_info):
e = var
edge = self.flattened_graph.edge_info[var]
elif context_expr in list(self.flattened_graph.edge_info):
e = context_expr
edge = self.flattened_graph.edge_info[context_expr]
else:
logging.error("Error! Edge name not in edge map: {}, context: {}".format(var, context))
vtype = hdfgutils.get_attribute_value(edge.attributes['vtype'])
vcat = hdfgutils.get_attribute_value(edge.attributes['vcat'])
if e in self.flattened_graph.input or e in self.flattened_graph.state:
if e in self.flattened_graph.input:
dtype = 'model_input'
elif e in self.flattened_graph.state:
dtype = 'model'
dims = hdfgutils.get_attribute_value(edge.attributes['dimensions'])
if len(dims) == 0:
if 'alias' in list(edge.attributes):
name = hdfgutils.get_attribute_value(edge.attributes['alias'])
else:
name = e
node = DFGNode()
node.operation = name
node.dataType = dtype
self.dfg.add(node)
self.connect_node(self.dfg.get(0), node)
self.symbol_table[name] = [node]
else:
iters = []
for d in dims:
if d.isdigit():
iter = int(d)
iters.append(iter)
elif d in self.const_table[context].keys():
iter = self.const_table[context][d]
iters.append(iter)
else:
logging.error("Dimension not in constants: {}".format(d))
node_strings = self.generate_array_nodes([e], iters)
self.symbol_table[e] = []
for n in node_strings:
node = DFGNode()
node.operation = n
node.name = n
node.dataType = dtype
self.dfg.add(node)
self.connect_node(self.dfg.get(0), node)
self.symbol_table[e].append(node)
def generate_array_nodes(self,nodes, iters):
if len(iters) == 0:
return nodes
else:
current_iter = iters[0]
new_nodes = []
for n in nodes:
for i in range(current_iter):
key = n + '[' + str(i) + ']'
new_nodes.append(key)
return self.generate_array_nodes(new_nodes, iters[1:])
def generate_index_nodes(self,nodes, iters, context, multi_index):
if len(iters) == 0:
return nodes
else:
curr = iters[0]
update_index = True
if curr not in self.idx_map.keys():
self.idx_map[curr] = []
else:
update_index = False
multi_index += '[' + curr + ']'
if multi_index not in self.idx_map.keys():
self.idx_map[multi_index] = []
else:
return self.generate_index_nodes(self.idx_map[multi_index], iters[1:], context, multi_index)
if curr in self.const_table[context].keys():
low = self.const_table[context][curr]
high = self.const_table[context][curr] + 1
elif curr.isdigit():
low = int(curr)
high = low + 1
elif curr in self.iter_table.keys():
low, high = self.iter_table[curr]
elif context + "/" + curr in self.iter_table.keys():
low, high = self.iter_table[context + "/" + curr]
else:
logging.error("Could not find index for {}".format(curr))
new_nodes = []
for n in nodes:
for i in range(low, high+1):
key = n + '[' + str(i) + ']'
indices = key[key.find('['):]
self.idx_map[multi_index].append(indices)
new_nodes.append(key)
if update_index:
self.idx_map[curr].append('[' + str(i) + ']')
update_index = False
return self.generate_index_nodes(new_nodes, iters[1:], context, multi_index)
def connect_node(self, parent, child):
child.parents.insert(0, parent)
parent.children.append(child)
def get_var_context(self, expr):
context = expr.split("/")
if len(context) > 1:
var = context[-1]
context = "/".join(context[:-1])
else:
var = context[0]
context = "main"
return context, var
def try_eval(self, expr, context):
context_expr = context + "/" + expr
if expr in list(self.flattened_graph.edge_info):
edge_expr = self.flattened_graph.edge_info[expr]
elif context_expr in list(self.flattened_graph.edge_info):
edge_expr = self.flattened_graph.edge_info[(context_expr)]
else:
print("Error! No expression in edges: {}".format(expr))
vtype = hdfgutils.get_attribute_value(edge_expr.attributes['vtype'])
if vtype == 'scalar':
return int(expr)
else:
result = eval(expr, self.const_table[context].copy())
return result
def set_d2sink(self, curr_node):
for parent in curr_node.parents:
if parent.dist2sink is None or parent.dist2sink < curr_node.dist2sink + 1:
parent.dist2sink = curr_node.dist2sink + 1
self.set_d2sink(parent)
| StarcoderdataPython |
8022757 | import tkinter as tk
root = tk.Tk()
# GUI logic here
label1 = tk.Label(root, text="Hello tkinter")
label1.pack()
root.mainloop() | StarcoderdataPython |
11201725 | """
DFTD3 program need to be installed to test this method.
"""
from copy import deepcopy
from typing import List
import numpy as np
import pytest
import torch
from ase import Atoms
from ase.build import fcc111, molecule
from torch_dftd.testing.damping import damping_method_list
from torch_dftd.torch_dftd3_calculator import TorchDFTD3Calculator
def _create_atoms() -> List[Atoms]:
"""Initialization"""
atoms = molecule("CH3CH2OCH3")
slab = fcc111("Au", size=(2, 1, 3), vacuum=80.0)
slab.pbc = np.array([True, True, True])
return [atoms, slab]
def _assert_energy_equal_batch(calc1, atoms_list: List[Atoms]):
expected_results_list = []
for atoms in atoms_list:
calc1.reset()
atoms.calc = calc1
calc1.calculate(atoms, properties=["energy"])
expected_results_list.append(deepcopy(calc1.results))
results_list = calc1.batch_calculate(atoms_list, properties=["energy"])
for exp, actual in zip(expected_results_list, results_list):
assert np.allclose(exp["energy"], actual["energy"], atol=1e-4, rtol=1e-4)
def _test_calc_energy(damping, xc, old, atoms_list, device="cpu", dtype=torch.float64):
cutoff = 25.0 # Make test faster
torch_dftd3_calc = TorchDFTD3Calculator(
damping=damping, xc=xc, device=device, dtype=dtype, old=old, cutoff=cutoff
)
_assert_energy_equal_batch(torch_dftd3_calc, atoms_list)
def _assert_energy_force_stress_equal_batch(calc1, atoms_list: List[Atoms]):
expected_results_list = []
for atoms in atoms_list:
calc1.reset()
atoms.calc = calc1
calc1.calculate(atoms, properties=["energy", "forces", "stress"])
expected_results_list.append(deepcopy(calc1.results))
results_list = calc1.batch_calculate(atoms_list, properties=["energy", "forces", "stress"])
for exp, actual in zip(expected_results_list, results_list):
assert np.allclose(exp["energy"], actual["energy"], atol=1e-4, rtol=1e-4)
assert np.allclose(exp["forces"], actual["forces"], atol=1e-5, rtol=1e-5)
if hasattr(exp, "stress"):
assert np.allclose(exp["stress"], actual["stress"], atol=1e-5, rtol=1e-5)
def _test_calc_energy_force_stress(
damping, xc, old, atoms_list, device="cpu", dtype=torch.float64, abc=False, cnthr=15.0
):
cutoff = 22.0 # Make test faster
torch_dftd3_calc = TorchDFTD3Calculator(
damping=damping,
xc=xc,
device=device,
dtype=dtype,
old=old,
cutoff=cutoff,
cnthr=cnthr,
abc=abc,
)
_assert_energy_force_stress_equal_batch(torch_dftd3_calc, atoms_list)
@pytest.mark.parametrize("damping,old", damping_method_list)
@pytest.mark.parametrize("device", ["cpu", "cuda:0"])
@pytest.mark.parametrize("dtype", [torch.float32, torch.float64])
def test_calc_energy_device_batch(damping, old, device, dtype):
"""Test2-1: check device, dtype dependency. with only various damping method."""
xc = "pbe"
atoms_list = _create_atoms()
_test_calc_energy(damping, xc, old, atoms_list, device=device, dtype=dtype)
@pytest.mark.parametrize("damping,old", damping_method_list)
@pytest.mark.parametrize("device", ["cpu", "cuda:0"])
@pytest.mark.parametrize("dtype", [torch.float32, torch.float64])
def test_calc_energy_force_stress_device_batch(damping, old, device, dtype):
"""Test2-2: check device, dtype dependency. with only various damping method."""
xc = "pbe"
atoms_list = _create_atoms()
_test_calc_energy_force_stress(damping, xc, old, atoms_list, device=device, dtype=dtype)
@pytest.mark.parametrize("damping,old", damping_method_list)
@pytest.mark.parametrize("device", ["cpu", "cuda:0"])
@pytest.mark.parametrize("dtype", [torch.float64])
def test_calc_energy_force_stress_device_batch_abc(damping, old, device, dtype):
"""Test2-2: check device, dtype dependency. with only various damping method."""
xc = "pbe"
abc = True
atoms_list = _create_atoms()
_test_calc_energy_force_stress(
damping, xc, old, atoms_list, device=device, dtype=dtype, cnthr=7.0
)
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"])
| StarcoderdataPython |
8117407 | '''
* Copyright 2018 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
'''
import numpy as np
class RangeFromBatchMinMax:
def __call__(self, sess, tensor, dataset, is_weights=False):
batch = sess.run(tensor, dataset)
minv = min(batch.flatten())
maxv = max(batch.flatten())
return minv, maxv, batch
class RangeFromBatchMinMax98:
def __call__(self, sess, tensor, dataset, is_weights=False):
batch = sess.run(tensor, dataset)
batch_s = sorted(batch.flatten())
assert(batch.size > 100)
minv = batch_s[round(len(batch_s)*0.01)]
maxv = batch_s[round(len(batch_s)*0.99)]
return minv, maxv, batch
class RangeFromBatchMinMax90:
def __call__(self, sess, tensor, dataset, is_weights=False):
batch = sess.run(tensor, dataset)
batch_s = sorted(batch.flatten())
assert(batch.size > 100)
minv = batch_s[round(len(batch_s)*0.05)]
maxv = batch_s[round(len(batch_s)*0.95)]
return minv, maxv, batch
class RangeFromBatchMinMax80:
def __call__(self, sess, tensor, dataset, is_weights=False):
batch = sess.run(tensor, dataset)
batch_s = sorted(batch.flatten())
assert(batch.size > 100)
minv = batch_s[round(len(batch_s)*0.1)]
maxv = batch_s[round(len(batch_s)*0.9)]
return minv, maxv, batch
class RangeFromBatchMeanMinsMaxs:
def __call__(self, sess, tensor, dataset, is_weights=False):
if is_weights:
return RangeFromBatchMinMax()(sess, tensor,dataset,is_weights)
else:
batch = sess.run(tensor, dataset)
n_batch = np.reshape(batch, [batch.shape[0], np.prod(batch.shape[1:])])
minv = n_batch.min(axis=1).mean()
maxv = n_batch.max(axis=1).mean()
return minv, maxv, batch
from copy import deepcopy
import scipy.stats
class RangeFromBatchKL:
BINS_NUMBER = 8192
QUANTIZE_SIZE = 256
def chunks(self, l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def smooth(self, y, box_pts):
box = np.ones(box_pts) / box_pts
y_smooth = np.convolve(y, box, mode='same')
return y_smooth
def quantize_x(self, origin, x):
chunked_data = list(self.chunks(origin, len(origin) // x))
foo = [sum(i) for i in chunked_data]
final_array = []
for m, piece in enumerate(chunked_data):
weight = foo[m]
if weight == 0:
final_array += [0] * len(piece)
continue
binary_piece = np.array(piece > 0)
replace_val = foo[m] / sum(binary_piece)
final_array += list(replace_val * binary_piece)
return final_array
def calc_kld(self, P, start_bin_max, end_bin_max, start_bin_min, end_bin_min, delta, max_val, min_val):
klds = {}
for i in range(start_bin_max, end_bin_max + 1, self.QUANTIZE_SIZE):
for j in range(start_bin_min, end_bin_min + 1, self.QUANTIZE_SIZE):
reference_distribution_P = deepcopy(P[j:i])
left_outliers_count = np.sum(P[0:j])
right_outliers_count = np.sum(P[i:self.BINS_NUMBER])
reference_distribution_P[0] += left_outliers_count
reference_distribution_P[-1] += right_outliers_count
candidate_distribution_Q = self.quantize_x(reference_distribution_P, self.QUANTIZE_SIZE)
left_outliers_P = deepcopy(P[:j + (i - j) // self.QUANTIZE_SIZE])
right_outliers_P = deepcopy(P[i - (i - j) // self.QUANTIZE_SIZE:])
left_replace_val = 0
if sum(left_outliers_P > 0) > 0:
left_replace_val = sum(left_outliers_P) / sum(left_outliers_P > 0)
right_replace_val = 0
if sum(right_outliers_P > 0) > 0:
right_replace_val = sum(right_outliers_P) / sum(right_outliers_P > 0)
candidate_distribution_Q = list(left_replace_val * (left_outliers_P > 0)) + candidate_distribution_Q[(i - j) // self.QUANTIZE_SIZE:i - j - ( i - j) // self.QUANTIZE_SIZE] + list(right_replace_val * (right_outliers_P > 0))
Q = np.array(candidate_distribution_Q)
kld = scipy.stats.entropy(P, Q)
# print((j,i), kld, (j + 0.5) * delta + (min_val - delta), (i + 0.5) * delta + (min_val - delta))
klds[(j, i)] = kld
return klds
def convert_layer_output(self, data):
image_num = data.shape[0]
max_all = np.max(data)
min_all = np.min(data)
delta = (max_all - min_all) / (self.BINS_NUMBER + 1)
bins_all = np.arange(min_all, max_all, delta) # fixed bin size
P = np.zeros(self.BINS_NUMBER)
for image_idx in range(image_num):
data_curr_image = np.ndarray.flatten(data[image_idx])
n, bins = np.histogram(data, bins=bins_all)
P = P + n
return (P, min_all, max_all, delta)
def find_min_max_kld(self, data):
(P, min_data, max_data, delta) = self.convert_layer_output(data)
P = self.smooth(P, 512)
# find max first
klds_max = self.calc_kld(P, self.QUANTIZE_SIZE, self.BINS_NUMBER, 0, 0, delta, max_data, min_data)
(tmp, max_bin) = min(zip(klds_max.values(), klds_max.keys()))[1]
klds_min = self.calc_kld(P, max_bin, max_bin, 0, max_bin - 1, delta, max_data, min_data)
(min_bin, tmp) = min(zip(klds_min.values(), klds_min.keys()))[1]
threshold_min = (min_bin) * delta + (min_data)
threshold_max = (max_bin) * delta + (min_data)
print('Min data', 'idx', threshold_min)
print('Max data', 'idx', threshold_max)
return (threshold_min, threshold_max)
def __call__(self, sess, tensor, dataset, is_weights=False):
if is_weights:
return RangeFromBatchMinMax()(sess, tensor,dataset,is_weights)
else:
batch = sess.run(tensor, dataset)
minv, maxv = self.find_min_max_kld(batch)
return minv, maxv, batch
| StarcoderdataPython |
3519384 | import os, sys, json, re
import cStringIO, StringIO, io
from flask import Flask, jsonify, abort, request, make_response
import subprocess, requests
import logging
#planner path - application
optic_path='/home/swarup/Documents/optic/debug/optic/optic-clp'
ff_path='/home/swarup/Documents/Metric-FF-v2.0/ff'
# planner
planner = "FF"
#Warehouse Domain File
warehouseDomainFile = "whdomain-2.pddl"
# use end point instead of path
ff_end_point = "http://ff-metric_1:5000"
#ff_end_point = "http://127.0.0.1:5000"
app = Flask(__name__)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.route('/warehouse/api/v1.0/test', methods=['GET'])
def test_service():
return make_response(jsonify({'Tested ok': 'Workflow Gen Service found'}), 200)
def extractGoals(inp):
return inp['goal']
def extractHints(inp):
return [inp['responseTimeout'], inp['missionDeadline']]
def queryKB(spec, obj, level):
tempKBJsonFile = 'kb.json'
with open(tempKBJsonFile) as data:
kb = (json.load(data))
#print kb
return kb
def generatePddlProblemFile(pddlProblemFileName, entity, level, whState, missionGoal, missionHints):
pddlProblemHeaderString = "(define (problem " + entity + "-" + level + ")\n" \
"\t (:domain warehouse-domain)\n"
objectString = "\n\t(:objects"
initString = "\n\t(:init"
goalString = "\n\t(:goal\n\t\t (and"
metricString = "\n\t(:metric minimize (total-time))"
# Robot Current State
robotList = whState["Robot"]
for robot in robotList.keys():
robotDetails = robotList[robot]
objectString = objectString + "\n\t\t" + robot + " - " + "Robot"
initString = initString + "\n\t\t" + "(is-at " + robot + " " + robotDetails["location"] + ")"
initString = initString + "\n\t\t" + "(= (charge-level " + robot + ") " + robotDetails["charge-level"] + ")"
initString = initString + "\n\t\t" + "(= (capacity " + robot + ") " + robotDetails["capacity"] + ")"
initString = initString + "\n\t\t" + "(= (max-charge " + robot + ") " + robotDetails["max-charge"] + ")"
initString = initString + "\n\t\t" + "(= (high-charge " + robot + ") " + robotDetails["high-charge"] + ")"
initString = initString + "\n\t\t" + "(= (low-charge " + robot + ") " + robotDetails["low-charge"] + ")"
if robotDetails["charging-state"] == "0":
initString = initString + "\n\t\t" + "(not (is-recharging " + robot + "))"
else:
initString = initString + "\n\t\t" + "(is-recharging " + robot + ")"
#Place current situation
placeList = whState["Place"]
for place in placeList.keys():
placeDetails = placeList[place]
objectString = objectString + "\n\t\t" + place + " - " + "Place"
initString = initString + "\n\t\t" + "(situated-at " + place + " " + placeDetails["location"] + ")"
#Objects
objectList = whState["Object"]
for object in objectList.keys():
objectDetails = objectList[object]
objectString = objectString + "\n\t\t" + object + " - " + "Object"
initString = initString + "\n\t\t" + "(is-on " + object + " " + objectDetails["location"] + ")"
#Waypoints
waypointList = whState["Waypoint"]
print waypointList
for wpt in waypointList.keys():
objectString = objectString + "\n\t\t" + wpt + " - " + "Waypoint"
for w in waypointList[wpt]["can-move"]:
initString = initString + "\n\t\t" + "(can-move " + wpt + " " + w + ")"
#Goals : this will come from the mission
for goal in missionGoal:
goalString = goalString + "\n\t\t\t(is-on " + goal[0] + " " + goal[1] + ")"
OutFile = open(pddlProblemFileName, 'w') #overwrites the earlier file
OutFile.write(pddlProblemHeaderString)
OutFile.write(objectString)
OutFile.write("\n\t)")
OutFile.write(initString)
OutFile.write("\n\t)")
OutFile.write(goalString)
OutFile.write("\n\t\t)\n\t)\n")
OutFile.write(metricString)
OutFile.write("\n )")
OutFile.close()
print pddlProblemHeaderString
print objectString
print ")\n"
print initString
print ")\n"
print goalString
print "))\n"
print metricString
print ")"
def callPlanner(warehouseState, goals, hints):
pddlProblem = constructPddlProblem("warehouse", "waypoint", warehouseState, goals, hints)
# - to be taken up later addPolicies(pddlDomain, policy)
##outputPlan = subprocess.check_output([optic_path,'-E','domain.pddl','problem.pddl'])
outputPlan = subprocess.check_output([ff_path,'-p', './', '-o', 'whdomain-2.pddl','-f','warehouseProblemGen.pddl'])
plan = outputPlan.decode('utf-8')
print plan
print "Process the output of FF planner"
#extract plan, jsonify and return
planData = (StringIO.StringIO(plan)).readlines()
# print planData
# find the 1st step pf plan matching step
regex = re.compile("step*")
lst = [m for l in planData for m in [regex.search(l)]]
m1 = max(index for index, item in enumerate(lst, 0) if item is not None)
print m1, planData[m1]
regex = re.compile("plan cost*")
lst = [m for l in planData for m in [regex.search(l)]]
m2 = max(index for index, item in enumerate(lst, 0) if item is not None)
print m2, planData[m2]
finalPlan = []
if (m2 > m1) :
for i in range(m1,m2, 1):
step = planData[i].split(':')[1]
print step
planStep = (step.strip()).split(' ')
print planStep
if (planStep[0] == 'moveToWaypoint'):
print "changing the format of move to suit the MOO_Manager"
del(planStep[2])
step = " ".join(str(x) for x in planStep)
finalPlan.append(step.strip())
# print step[1]
print "Final plan"
print finalPlan
return finalPlan
@app.route('/warehouse/api/v1.0/genplan', methods=['POST'])
def compute_mission_data():
if not request.json:
abort(400)
mission = request.json
# Extract mission and hints
goals = extractGoals(mission)
hints = extractHints(mission)
print "mission, goals, hints"
print mission, goals, hints
# get waypoint model and policies from KB
warehouseState = queryKB("state", "warehouse", "waypoint")
#print warehouseState
#waypointPolicy = extractWaypointPolicy("policy", "warehouse", "waypoint")
#p waypointPolicy
#Generate problem file which is in the string warehouseProblemFile
warehouseProblemFile = "warehouseProblemGen.pddl"
generatePddlProblemFile(warehouseProblemFile, "warehouse", "waypoint", warehouseState, goals, hints)
#Get the domain file whose name is a string in warehouseDomainFile
#warehouseDomainFile =
res = requests.post(ff_end_point+"/planner/upload_domain", files={'file': open(warehouseDomainFile, 'rb')})
print res.text, warehouseDomainFile
res = requests.post(ff_end_point+"/planner/upload_problem", files={'file': open(warehouseProblemFile, 'rb')})
print res.text, warehouseProblemFile
payload = {"planner" : planner, "domain" : warehouseDomainFile, "problem" : warehouseProblemFile}
res = requests.get(ff_end_point+"/planner/generate_plan", json=payload)
print res.json()
return jsonify(res.json()), 200
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5001, debug=True)
| StarcoderdataPython |
1782992 | <filename>qtstyles/sheet.py<gh_stars>1-10
'''
Defines -
Sheet: a class representing a style sheet object
with attributes such as path and contents.
get_style_sheets: a function that returns a dictionary
with style sheet names as keys and sheet objects as values.
'''
import os
from qtstyles import errors
class Sheet(object):
'''
Keeps key information related to style sheets, particularly the path
and contents as a string.
>>> import os
>>> dirpath = os.path.dirname(os.path.abspath(__file__))
>>> path = os.path.join(dirpath, "style_sheets", "default.qss")
>>> sheet = Sheet(path)
>>> isinstance(sheet.contents, str)
True
'''
def __init__(self, path):
''' This constructor only takes one argument being the sheet path.
path = style sheet file path ending with '.qss'.
'''
if not isinstance(path, str):
raise errors.SheetPathTypeError
if not path.endswith(".qss"):
raise errors.SheetPathValueError
if not os.path.isfile(path):
raise errors.SheetPathFileDoesntExist
self._path = path
self._contents = None # to be loaded on request
@property
def path(self):
''' Collect the path as a sheet attribute. '''
return self._path
@property
def contents(self):
''' The style sheet contents will load only once when needed. '''
if self._contents is None:
self._load_contents()
return self._contents
def _load_contents(self):
''' Loads the style sheet contents (if not already loaded). '''
with open(self.path, "r") as qss_file:
self._contents = qss_file.read()
def get_style_sheets():
'''
Returns a dictionary with the style sheet names as keys and
associated Sheet objects as values.
There must be a sheet called 'default' which is empty.
>>> sheets = get_style_sheets()
>>> isinstance(sheets, dict) # returns a dictionary
True
>>> sheet_object = sheets["default"]
>>> sheet_object.path.endswith(".qss")
True
'''
dirpath = os.path.dirname(os.path.abspath(__file__))
sheets = {}
for name in os.listdir(os.path.join(dirpath, "style_sheets")):
if "__" in name:
# exclude any files with a double underscore
# (e.g. __init__, __pycache__)
continue
path = os.path.join(dirpath, "style_sheets", name)
sheets[name.replace(".qss", "")] = Sheet(path)
return sheets
| StarcoderdataPython |
1708693 | import cvxpy as cp
import math
import numpy as np
from collections import OrderedDict
from functools import partial
from multiprocessing import Pool, cpu_count
from scipy.optimize import minimize_scalar
from tqdm.auto import tqdm
def p_num_samples(epsilon, delta, n_x=3, const=None):
"""Compute the number of samples needed to satisfy the specified probabilistic guarantees for the p-norm ball reachable set estimate
:param epsilon: The accuracy parameter
:type epsilon: float
:param delta: The confidence parameter
:type delta: float
:param n_x: The state dimension, defaults to 3
:type n_x: int
:param const: The constraints placed on the parameters A and b, defaults to None
:type const: string, optional
:return: The number of samples needed to satisfy the specified probabilistic guarantees
:rtype: int
"""
if const is None:
n_theta = 0.5 * (n_x ** 2 + 3 * n_x)
elif const == "diagonal":
n_theta = 2 * n_x
elif const == "scalar":
n_theta = 1
N = math.ceil(math.e * (math.log(1 / delta) + n_theta) / (epsilon * (math.e - 1)))
return N
def solve_p_norm(sample, n_x=3, p=2, const=None):
"""Solves the scenario relaxation problem for the given sample with p-Norm Balls
:param sample: Sample from dynamical system (num_samples, n_x)
:type sample: numpy.ndarray
:param n_x: The state dimension, defaults to 3
:type n_x: int, optional
:param p: The order of p-norm, defaults to 2
:type p: int, optional
:param const: The constraints placed on the parameters A and b, defaults to None
:type const: string, optional
:return: The values of matrix A and vector b corresponding to the optimal p-Norm Ball, as well as the status of the optimizer.
:rtype: tuple
"""
if const is None:
A = cp.Variable((n_x, n_x), symmetric=True)
b = cp.Variable((n_x, 1))
elif const == "diagonal":
a = cp.Variable((n_x, 1))
A = cp.diag(a)
b = cp.Variable((n_x, 1))
elif const == "scalar":
sigma = cp.Variable()
A = sigma * np.identity(n_x)
b = np.zeros((n_x, 1))
obj = cp.Minimize(-cp.log_det(A))
constraints = [cp.pnorm(A @ r.reshape(n_x, 1) - b, p=p) <= 1 for r in sample]
prob = cp.Problem(obj, constraints)
prob.solve()
if const != "scalar":
return A.value, b.value, prob.status
else:
return A, b, prob.status
def multi_p_norm(samples, p=2, const=None):
"""Computes a the p-norm ball reachable set estimates across a series of timesteps
:param samples: The samples from a dynamic system across time, an array of shape (num_samples, timesteps, state_dim)
:type samples: numpy.ndarray
:param p: The order of p-norm, defaults to 2
:type p: int, optional
:param const: The constraints placed on the parameters A and b, defaults to None
:type const: string, optional
:raises ValueError: [description]
:return: [description]
:rtype: [type]
"""
if len(samples.shape) != 3:
raise ValueError("Samples must be of shape (num_samples, timesteps, state_dim")
n_x = samples.shape[2]
keys = ("A", "b", "status")
solve_p_norm_map = partial(solve_p_norm, n_x=n_x, p=p, const=const)
p = Pool(cpu_count())
solutions = [
dict(zip(keys, sol))
for sol in tqdm(
p.imap(solve_p_norm_map, samples.swapaxes(0, 1)), total=samples.shape[1]
)
]
return solutions
def p_norm_cont(arr, axis, default_val, n_x, A_val, b_val, p, minimum=True):
"""Solve for the optimal value that satisfies the p-Norm Ball conditions at the specified axis
:param arr: Array of shape (n_x - 1,) containing the independent variables of the p-norm condition
:type arr: numpy.ndarray
:param axis: The axis of the dependent variable for which to solve for (i.e. z -> axis=2).
:type axis: int
:param default_val: The value to return if no solution for the dependent variable is found that satisfies the p-norm conditions
:type default_val: float
:param n_x: The state dimension
:type n_x: int
:param A_val: The matrix of shape (n_x, n_x) corresponding to the optimal p-norm ball
:type A_val: numpy.ndarray
:param b_val: The vector of shape (n_x, 1) corresponding to the optimal p-norm ball
:type b_val: numpy.ndarray
:param p: The order of p-norm
:type p: int
:param minimum: True if optimizing for the minimal value of the dependent variable that satisfies the p-norm conditions, defaults to True
:type minimum: bool, optional
:return: The value at the specified axis which corresponds the the optimal value of the (n_x, 1) vector that satisfies the p-Norm Ball conditions at the specified axis
:rtype: float
"""
vec = cp.Variable((n_x, 1))
other_dims = list(range(n_x))
other_dims.remove(axis)
constraints = [vec[i][0] == arr[j] for i, j in zip(other_dims, range(n_x - 1))]
constraints.append(cp.pnorm(A_val @ vec - b_val, p=p) <= 1)
if minimum:
obj = cp.Minimize(vec[axis])
else:
obj = cp.Maximize(vec[axis])
prob = cp.Problem(obj, constraints)
try:
prob.solve()
except:
return default_val
if prob.status != "optimal":
return default_val
return vec.value[axis]
def p_norm_cont_proj(arr, axis, default_val, n_x, A_val, b_val, p):
"""Minimizes the p-Norm value with respect to value at the specified axis.
:param arr: Array of shape (n_x - 1,) containing the independent variables of the p-norm condition.
:type arr: numpy.ndarray
:param axis: The axis of the dependent variable for which to solve for (i.e. z -> axis=2).
:type axis: int
:param default_val: The value to return if no solution for the dependent variable is found that satisfies the p-norm conditions.
:type default_val: float
:param n_x: The state dimension.
:type n_x: int
:param A_val: The matrix of shape (n_x, n_x) corresponding to the optimal p-norm ball.
:type A_val: numpy.ndarray
:param b_val: The vector of shape (n_x, 1) corresponding to the optimal p-norm ball.
:type b_val: numpy.ndarray
:param p: The order of p-norm.
:type p: int
:return: The value at the specified axis which corresponds the the minimum p-Norm value of the (n_x, 1) vector.
:rtype: float
"""
vec = np.zeros((n_x))
other_dims = list(range(n_x))
other_dims.remove(axis)
for i, j in zip(other_dims, range(n_x - 1)):
vec[i] = arr[j]
def f(x):
vec[axis] = x
return np.linalg.norm(A_val @ vec.reshape((n_x, 1)) - b_val, ord=p)
res = minimize_scalar(f)
vec[axis] = res.x
if np.linalg.norm(A_val @ vec.reshape((n_x, 1)) - b_val, ord=p) <= 1:
return res.x
else:
return default_val
def p_compute_contour_2D(sample, A_val, b_val, cont_axis=2, n_x=3, p=2, grid_n=200):
"""Computes the 3D contour for 2 dimensions based on sample data and the A_val, and b_val corresponding to the optimal p-norm ball.
:param sample: Sample from dynamical system (num_samples, n_x)
:type sample: numpy.ndarray
:param A_val: The matrix of shape (n_x, n_x) corresponding to the optimal p-norm ball
:type A_val: numpy.ndarray
:param b_val: The vector of shape (n_x, 1) corresponding to the optimal p-norm ball
:type b_val: numpy.ndarray
:param cont_axis: The axis for which the contours are to be solved for, defaults to 2
:type cont_axis: int, optional
:param n_x: The state dimension, defaults to 3
:type n_x: int, optional
:param p: The order of p-norm, defaults to 2
:type p: int, optional
:param grid_n: The side length of the cube of points to be used for computing contours, defaults to 200
:type grid_n: int, optional
:return: The meshgrid, corresponding computed contour, and the extremum values for the chosen axis
:rtype: tuple
"""
x_min, x_max = sample[:, 0].min(), sample[:, 0].max()
y_min, y_max = sample[:, 1].min(), sample[:, 1].max()
z_min, z_max = sample[:, 2].min(), sample[:, 2].max()
x = np.linspace(
x_min - 0.4 * (x_max - x_min), x_max + 0.4 * (x_max - x_min), grid_n
)
y = np.linspace(
y_min - 0.4 * (y_max - y_min), y_max + 0.4 * (y_max - y_min), grid_n
)
z = np.linspace(
x_min - 0.4 * (z_max - z_min), z_max + 0.4 * (z_max - z_min), grid_n
)
if cont_axis == 2:
d0, d1 = np.meshgrid(x, y)
c_min, c_max = z_min, z_max
elif cont_axis == 1:
d0, d1 = np.meshgrid(x, z)
c_min, c_max = y_min, y_max
elif cont_axis == 0:
d0, d1 = np.meshgrid(y, z)
c_min, c_max = x_min, x_max
d2 = np.array([d0.flatten(), d1.flatten()]).T
solve_cont_d2 = partial(
p_norm_cont_proj,
axis=cont_axis,
default_val=c_max + 1,
n_x=n_x,
A_val=A_val,
b_val=b_val,
p=p,
)
cont = np.fromiter(map(solve_cont_d2, d2), dtype=np.float64).reshape(grid_n, grid_n)
return d0, d1, cont, c_min, c_max
def p_compute_contour_3D(
sample, A_val, b_val, cont_axis=2, n_x=3, p=2, grid_n=200, stretch=0.4
):
"""Computes the 3D contour for 3 dimensions based on sample data and the A_val, and b_val corresponding to the optimal p-norm ball.
:param sample: Sample from dynamical system (num_samples, n_x)
:type sample: numpy.ndarray
:param A_val: The matrix of shape (n_x, n_x) corresponding to the optimal p-norm ball
:type A_val: numpy.ndarray
:param b_val: The vector of shape (n_x, 1) corresponding to the optimal p-norm ball
:type b_val: numpy.ndarray
:param cont_axis: The axis for which the contours are to be solved for, defaults to 2
:type cont_axis: int, optional
:param n_x: The state dimension, defaults to 3
:type n_x: int, optional
:param p: The order of p-norm, defaults to 2
:type p: int, optional
:param grid_n: The side length of the cube of points to be used for computing contours, defaults to 200
:type grid_n: int, optional
:param stretch: The factor by which to stretch the grid used to compute the contour, defaults to 0.4
:type stretch: float, optional
:param minimum: True if optimizing for the minimal value of the dependent variable that satisfies the p-norm conditions, defaults to True
:type minimum: bool, optional
:return: The meshgrid, corresponding computed contour, and the extremum values for the chosen axis
:rtype: tuple
"""
x_min, x_max = sample[:, 0].min(), sample[:, 0].max()
y_min, y_max = sample[:, 1].min(), sample[:, 1].max()
z_min, z_max = sample[:, 2].min(), sample[:, 2].max()
x = np.linspace(
x_min - stretch * (x_max - x_min), x_max + stretch * (x_max - x_min), grid_n
)
y = np.linspace(
y_min - stretch * (y_max - y_min), y_max + stretch * (y_max - y_min), grid_n
)
z = np.linspace(
x_min - stretch * (z_max - z_min), z_max + stretch * (z_max - z_min), grid_n
)
if cont_axis == 2:
d0, d1 = np.meshgrid(x, y)
c_min, c_max = z_min, z_max
elif cont_axis == 1:
d0, d1 = np.meshgrid(x, z)
c_min, c_max = y_min, y_max
elif cont_axis == 0:
d0, d1 = np.meshgrid(y, z)
c_min, c_max = x_min, x_max
d2 = np.array([d0.flatten(), d1.flatten()]).T
solve_cont_d2_min = partial(
p_norm_cont,
axis=cont_axis,
default_val=c_max + 1,
n_x=n_x,
A_val=A_val,
b_val=b_val,
p=p,
minimum=True,
)
solve_cont_d2_max = partial(
p_norm_cont,
axis=cont_axis,
default_val=c_min - 1,
n_x=n_x,
A_val=A_val,
b_val=b_val,
p=p,
minimum=False,
)
cont_min = np.fromiter(map(solve_cont_d2_min, d2), dtype=np.float64).reshape(
grid_n, grid_n
)
cont_max = np.fromiter(map(solve_cont_d2_max, d2), dtype=np.float64).reshape(
grid_n, grid_n
)
return d0, d1, cont_min, cont_max, c_min, c_max
def p_compute_vals(sample, A_val, b_val, p=2, grid_n=200):
"""Computes the values within a p-norm ball in 1 dimension
:param sample: The sample from a specific time step, an array of shape (num_samples,)
:type sample: numpy.ndarray
:param A_val: The matrix of shape (1, 1) corresponding to the optimal p-norm ball
:type A_val: numpy.ndarray
:param b_val: The vector of shape (1, 1) corresponding to the optimal p-norm ball
:type b_val: numpy.ndarray
:param p: The order of p-norm, defaults to 2
:type p: int, optional
:param grid_n: The number of points to test for the p-norm ball estimation at each a given time step, defaults to 200
:type grid_n: int, optional
:return: The values within the p-norm ball
:rtype: list
"""
# assuming sample is (num_samples,) shaped array
y_min, y_max = sample.min(), sample.max()
y = np.linspace(
y_min - 0.4 * (y_max - y_min), y_max + 0.4 * (y_max - y_min), grid_n
)
vals = []
for v in y:
try:
if np.linalg.norm(A_val @ np.array([[v]]) - b_val, ord=p) <= 1:
vals.append(v)
except ValueError:
pass
return vals
def p_get_dict(i, samples, solution_list, items, grid_n=50):
"""Generates a dictionary where keys are the passed in labels and values are the output of p_compute_contour_3D, the contour information for a p-norm ball reachable set estimate at a given time
:param i: The index
:type i: int
:param samples: Array of shape (num_samples, time, 3)
:type samples: numpy.ndarray
:param solution_list: List of solutions where each solution is a dictionary with keys ("A", "b", "status"), defaults to None
:type solution_list: list, optional
:param items: A list of specific indices corresponding to the times at which the p-norm ball reachable set estimate will be chosen
:type items: list
:param grid_n: The side length of the cube of points to be used for computing contours, defaults to 50
:type grid_n: int, optional
:return: A dictionary where keys are the passed in labels and values are the output of p_compute_contour_3D, the contour information for a p-norm ball reachable set estimate at a given time
:rtype: dict
"""
labels = ["xv", "yv", "z_cont", "z_cont2", "z_min", "z_max"]
index = items[i]
A_i, b_i = solution_list[index]["A"], solution_list[index]["b"]
return dict(
zip(labels, p_compute_contour_3D(samples[:, index, :], A_i, b_i, grid_n=grid_n))
)
def p_dict_list(
samples, solution_list, num_parts, num_indices=20, logspace=True, grid_n=50
):
"""Generates a list of dictionaries, each containing the contour information for a p-norm ball reachable set estimate at a given time
:param samples: Array of shape (num_samples, time, 3)
:type samples: numpy.ndarray
:param solution_list: List of solutions where each solution is a dictionary with keys ("A", "b", "status"), defaults to None
:type solution_list: list, optional
:param num_parts: The number of total timesteps for the samples
:type num_parts: int
:param num_indices: The number of indices corresponding to the number of reachable set estimates made at different times, defaults to 20
:type num_indices: int, optional
:param logspace: If True, a logarithmic scale is used for choosing times to compute reachable set estimates, defaults to True
:type logspace: bool, optional
:param grid_n: The side length of the cube of points to be used for computing contours, defaults to 50
:type grid_n: int, optional
:return: A list of dictionaries, each containing the contour information for a p-norm ball reachable set estimate at a given time
:rtype: list
"""
if logspace:
log_ceil = np.log10(num_parts)
items = list(
OrderedDict.fromkeys(
[int(i) - 1 for i in np.logspace(0, log_ceil, num_indices)]
)
)
else:
items = [int(i) for i in np.linspace(0, num_parts, num_indices)]
get_dict = partial(
p_get_dict,
samples=samples,
solution_list=solution_list,
items=items,
grid_n=grid_n,
)
p = Pool(cpu_count())
dict_list = [
d for d in tqdm(p.imap(get_dict, np.arange(len(items))), total=len(items))
]
return dict_list
def p_emp_estimate(samples, A_val, b_val, n_x=3, p=2):
"""Computes the ratio of samples within the estimated reachable set for the p-norm ball reachable set estimation
:param samples: Sample from dynamical system (num_samples, n_x)
:type samples: numpy.ndarray
:param A_val: The matrix of shape (n_x, n_x) corresponding to the optimal p-norm ball
:type A_val: numpy.ndarray
:param b_val: The vector of shape (n_x, 1) corresponding to the optimal p-norm ball
:type b_val: numpy.ndarray
:param n_x: The state dimension, defaults to 3
:type n_x: int, optional
:param p: The order of p-norm, defaults to 2
:type p: int, optional
:return: The ratio of samples within the estimated reachability set
:rtype: float
"""
num_samples = samples.shape[0]
count = 0
for sample in samples:
vec = sample.reshape(n_x, 1)
if np.linalg.norm(A_val @ vec - b_val, ord=p) <= 1:
count += 1
return count / num_samples
def p_get_reachable_2D(samples, A_val, b_val, p=2, grid_n=50):
"""Obtains the reachable set estimate for a set of samples at a give timestep. A utility function for plotting the reachable set estimate across all timesteps for two state variables.
:param samples: Samples of shape (num_samples, 2)
:type samples: numpy.ndarray
:param A_val: Matrix corresponding to the reachable set estimate at a given timestep for two state variables, a (2, 2) array
:type A_val: numpy.ndarray
:param b_val: Vector corresponding to the reachable set estimate at a given timestep for two state variables, a (2, 1) array
:type b_val: numpy.ndarray
:param p: The order of the p-norm ball, defaults to 2
:type p: int, optional
:param grid_n: The side length of the square of points to be used for checking for points inside the p-norm ball, defaults to 50
:type grid_n: int, optional
:return: The x and y values included in the p-norm ball
:rtype: tuple
"""
x_min, x_max = samples[:, 0].min(), samples[:, 0].max()
y_min, y_max = samples[:, 1].min(), samples[:, 1].max()
x = np.linspace(
x_min - 0.4 * (x_max - x_min), x_max + 0.4 * (x_max - x_min), grid_n
)
y = np.linspace(
y_min - 0.4 * (y_max - y_min), y_max + 0.4 * (y_max - y_min), grid_n
)
d0, d1 = np.meshgrid(x, y)
d2 = np.array([d0.flatten(), d1.flatten()]).T
xs, ys = [], []
for pair in d2:
if np.linalg.norm(A_val @ pair.reshape((2, 1)) - b_val, ord=p) <= 1:
xs.append(pair[0])
ys.append(pair[1])
return xs, ys
def p_get_reachable_3D(samples, A_val, b_val, p=2, grid_n=25):
"""Obtains the reachable set estimate for a set of samples at a give timestep. A utility function for plotting the reachable set estimate across all timesteps for three state variables.
:param samples: Samples of shape (num_samples, 3)
:type samples: numpy.ndarray
:param A_val: Matrix corresponding to the reachable set estimate at a given timestep for two state variables, a (3, 3) array
:type A_val: numpy.ndarray
:param b_val: Vector corresponding to the reachable set estimate at a given timestep for two state variables, a (3, 1) array
:type b_val: numpy.ndarray
:param p: The order of the p-norm ball, defaults to 2
:type p: int, optional
:param grid_n: The side length of the square of points to be used for checking for points inside the p-norm ball, defaults to 50
:type grid_n: int, optional
:return: The x and y values included in the p-norm ball
:rtype: tuple
"""
x_min, x_max = samples[:, 0].min(), samples[:, 0].max()
y_min, y_max = samples[:, 1].min(), samples[:, 1].max()
z_min, z_max = samples[:, 2].min(), samples[:, 2].max()
x = np.linspace(
x_min - 0.4 * (x_max - x_min), x_max + 0.4 * (x_max - x_min), grid_n
)
y = np.linspace(
y_min - 0.4 * (y_max - y_min), y_max + 0.4 * (y_max - y_min), grid_n
)
z = np.linspace(
z_min - 0.4 * (z_max - z_min), z_max + 0.4 * (z_max - z_min), grid_n
)
d0, d1, d2 = np.meshgrid(x, y, z)
d3 = np.array([d0.flatten(), d1.flatten(), d2.flatten()]).T
xs, ys, zs = [], [], []
for trio in d3:
if np.linalg.norm(A_val @ trio.reshape((3, 1)) - b_val, ord=p) <= 1:
xs.append(trio[0])
ys.append(trio[1])
zs.append(trio[2])
return xs, ys, zs
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.