id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
107251 | <reponame>LaudateCorpus1/llvm-project
#!/usr/bin/env python
#===- cppreference_parser.py - ------------------------------*- python -*--===#
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===------------------------------------------------------------------------===#
from bs4 import BeautifulSoup, NavigableString
import collections
import multiprocessing
import os
import re
import signal
import sys
class Symbol:
def __init__(self, name, namespace, headers):
# unqualifed symbol name, e.g. "move"
self.name = name
# namespace of the symbol (with trailing "::"), e.g. "std::", "" (global scope)
# None for C symbols.
self.namespace = namespace
# a list of corresponding headers
self.headers = headers
def _HasClass(tag, *classes):
for c in tag.get('class', []):
if c in classes:
return True
return False
def _ParseSymbolPage(symbol_page_html, symbol_name):
"""Parse symbol page and retrieve the include header defined in this page.
The symbol page provides header for the symbol, specifically in
"Defined in header <header>" section. An example:
<tr class="t-dsc-header">
<td colspan="2"> <div>Defined in header <code><ratio></code> </div>
</td></tr>
Returns a list of headers.
"""
headers = set()
all_headers = set()
soup = BeautifulSoup(symbol_page_html, "html.parser")
# Rows in table are like:
# Defined in header <foo> .t-dsc-header
# Defined in header <bar> .t-dsc-header
# decl1 .t-dcl
# Defined in header <baz> .t-dsc-header
# decl2 .t-dcl
for table in soup.select('table.t-dcl-begin, table.t-dsc-begin'):
current_headers = []
was_decl = False
for row in table.select('tr'):
if _HasClass(row, 't-dcl', 't-dsc'):
was_decl = True
# Symbols are in the first cell.
found_symbols = row.find('td').stripped_strings
if not symbol_name in found_symbols:
continue
headers.update(current_headers)
elif _HasClass(row, 't-dsc-header'):
# If we saw a decl since the last header, this is a new block of headers
# for a new block of decls.
if was_decl:
current_headers = []
was_decl = False
# There are also .t-dsc-header for "defined in namespace".
if not "Defined in header " in row.text:
continue
# The interesting header content (e.g. <cstdlib>) is wrapped in <code>.
for header_code in row.find_all("code"):
current_headers.append(header_code.text)
all_headers.add(header_code.text)
# If the symbol was never named, consider all named headers.
return headers or all_headers
def _ParseIndexPage(index_page_html):
"""Parse index page.
The index page lists all std symbols and hrefs to their detailed pages
(which contain the defined header). An example:
<a href="abs.html" title="abs"><tt>abs()</tt></a> (int) <br>
<a href="acos.html" title="acos"><tt>acos()</tt></a> <br>
Returns a list of tuple (symbol_name, relative_path_to_symbol_page, variant).
"""
symbols = []
soup = BeautifulSoup(index_page_html, "html.parser")
for symbol_href in soup.select("a[title]"):
# Ignore annotated symbols like "acos<>() (std::complex)".
# These tend to be overloads, and we the primary is more useful.
# This accidentally accepts begin/end despite the (iterator) caption: the
# (since C++11) note is first. They are good symbols, so the bug is unfixed.
caption = symbol_href.next_sibling
variant = None
if isinstance(caption, NavigableString) and "(" in caption:
variant = caption.text.strip(" ()")
symbol_tt = symbol_href.find("tt")
if symbol_tt:
symbols.append((symbol_tt.text.rstrip("<>()"), # strip any trailing <>()
symbol_href["href"], variant))
return symbols
def _ReadSymbolPage(path, name):
with open(path) as f:
return _ParseSymbolPage(f.read(), name)
def _GetSymbols(pool, root_dir, index_page_name, namespace, variants_to_accept):
"""Get all symbols listed in the index page. All symbols should be in the
given namespace.
Returns a list of Symbols.
"""
# Workflow steps:
# 1. Parse index page which lists all symbols to get symbol
# name (unqualified name) and its href link to the symbol page which
# contains the defined header.
# 2. Parse the symbol page to get the defined header.
index_page_path = os.path.join(root_dir, index_page_name)
with open(index_page_path, "r") as f:
# Read each symbol page in parallel.
results = [] # (symbol_name, promise of [header...])
for symbol_name, symbol_page_path, variant in _ParseIndexPage(f.read()):
# Variant symbols (e.g. the std::locale version of isalpha) add ambiguity.
# FIXME: use these as a fallback rather than ignoring entirely.
variants_for_symbol = variants_to_accept.get(
(namespace or "") + symbol_name, ())
if variant and variant not in variants_for_symbol:
continue
path = os.path.join(root_dir, symbol_page_path)
results.append((symbol_name,
pool.apply_async(_ReadSymbolPage, (path, symbol_name))))
# Build map from symbol name to a set of headers.
symbol_headers = collections.defaultdict(set)
for symbol_name, lazy_headers in results:
symbol_headers[symbol_name].update(lazy_headers.get())
symbols = []
for name, headers in sorted(symbol_headers.items(), key=lambda t : t[0]):
symbols.append(Symbol(name, namespace, list(headers)))
return symbols
def GetSymbols(parse_pages):
"""Get all symbols by parsing the given pages.
Args:
parse_pages: a list of tuples (page_root_dir, index_page_name, namespace)
"""
# By default we prefer the non-variant versions, as they're more common. But
# there are some symbols, whose variant is more common. This list describes
# those symbols.
variants_to_accept = {
# std::remove<> has variant algorithm.
"std::remove": ("algorithm"),
}
symbols = []
# Run many workers to process individual symbol pages under the symbol index.
# Don't allow workers to capture Ctrl-C.
pool = multiprocessing.Pool(
initializer=lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))
try:
for root_dir, page_name, namespace in parse_pages:
symbols.extend(_GetSymbols(pool, root_dir, page_name, namespace,
variants_to_accept))
finally:
pool.terminate()
pool.join()
return symbols
| StarcoderdataPython |
3397489 | from ._graph import *
| StarcoderdataPython |
3229241 | <reponame>OrangeUtan/MCMetagen
import pytest
from mcanitexgen.animation.parser import Duration, State, StateAction, Timeframe, Weight
class Test_call:
@pytest.mark.parametrize(
"args, expected_action",
[
({}, StateAction(State(0), Duration(1))),
({"weight": 2}, StateAction(State(0), Weight(2))),
({"duration": 10}, StateAction(State(0), Duration(10))),
({"start": 2}, StateAction(State(0), Timeframe(start=2, end=3, duration=1))),
({"end": 2}, StateAction(State(0), Timeframe(end=2))),
({"mark": "test"}, StateAction(State(0), Duration(1), mark="test")),
],
)
def test_create_state_action(self, args, expected_action):
state = State(0)
assert state(**args) == expected_action
| StarcoderdataPython |
1608916 | import re
import pandas
LIBE_STATS_FIELDS = ["Worker", ": sim_id", ": sim Time:", "Start:", "End:", "Status:", "\n"]
DATAFRAME_COLUMNS = ["worker", "sim_id", "time", "start", "end", "status"]
LIBE_STATS_PATTERN = '(?<={})(.+?)(?={})'
def create_empty_persis_info(libE_specs):
"""
Create `persis_info` for libEnsemble if no persistent data needs to be transfered.
:param libE_specs: `libE_specs` datastructure.
:return:
"""
nworkers = libE_specs['nworkers']
persis_info = {i: {'worker_num': i} for i in range(1, nworkers+1)}
return persis_info
def _libe_stat_regex():
fp = []
for i in range(1, len(LIBE_STATS_FIELDS)):
p = LIBE_STATS_PATTERN.format(LIBE_STATS_FIELDS[i - 1], LIBE_STATS_FIELDS[i])
fp.append(p)
return re.compile("|".join(fp))
def parse_stat_file(filename):
"""
Parses libE_stats.txt from a libEnsemble run and formats the data into a Pandas DataFrame.
:param filename: (str) File name (usually libE_stats.txt)
:return: Returns a Pandas DataFrame
"""
parsed_lines = []
line_parse = _libe_stat_regex()
with open(filename, 'r') as ff:
for line in ff.readlines():
p = line_parse.findall(line)
f = []
for r in p:
f.append(''.join(r).strip())
parsed_lines.append(f)
df = pandas.DataFrame(parsed_lines, columns=DATAFRAME_COLUMNS)
return df
| StarcoderdataPython |
1748222 | <filename>sample/usage/sample/how_to_use_init_answers.py
# Copyright 2022 Recruit Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from codableopt import Problem, Objective, IntVariable, DoubleVariable, \
CategoryVariable, OptSolver, PenaltyAdjustmentMethod
# 変数を定義
x = IntVariable(name='x', lower=np.double(0.0), upper=np.double(2))
y = DoubleVariable(name='y', lower=np.double(0.0), upper=None)
z = CategoryVariable(name='z', categories=['a', 'b', 'c'])
# 目的関数に指定する関数
def objective_function(var_x, var_y, var_z, parameters):
obj_value = parameters['coef_x'] * var_x + parameters['coef_y'] * var_y
if var_z == 'a':
obj_value += 10.0
elif var_z == 'b':
obj_value += 8.0
else:
# var_z == 'c'
obj_value -= 3.0
return obj_value
# 問題を設定
problem = Problem(is_max_problem=True)
# 目的関数を定義
problem += Objective(objective=objective_function,
args_map={'var_x': x, 'var_y': y, 'var_z': z,
'parameters': {'coef_x': -3.0, 'coef_y': 4.0}})
# 制約式を定義
problem += 2 * x + 4 * y + 2 * (z == 'a') + 3 * (z == ('b', 'c')) <= 8
problem += 2 * x - y + 2 * (z == 'b') > 3
# 問題を確認
print(problem)
# ソルバーを生成
solver = OptSolver()
# ソルバー内で使う最適化手法を生成
method = PenaltyAdjustmentMethod(steps=40000)
# 初期解を指定
init_answers = [
{'x': 0, 'y': 1, 'z': 'a'}
]
# 最適化実施
answer, is_feasible = solver.solve(problem, method, init_answers=init_answers)
print(f'answer:{answer}, answer_is_feasible:{is_feasible}')
| StarcoderdataPython |
1734703 | <filename>tests/unit/test_persistent_routes.py
import pytest
from app.models import Resource, Language, Category
from tests.conftest import module_client, module_db
from configs import PaginatorConfig
# TODO: We need negative unit tests (what happens when bad data is sent)
def test_get_resources(module_client, module_db):
client = module_client
response = client.get('api/v1/resources')
# Status should be OK
assert (response.status_code == 200)
resources = response.json
# Default page size shouold be specified in PaginatorConfig
assert (len(resources['data']) == PaginatorConfig.per_page)
for resource in resources['data']:
assert (isinstance(resource.get('name'), str))
assert (resource.get('name') != "")
assert (isinstance(resource.get('url'), str))
assert (resource.get('url') != "")
assert (isinstance(resource.get('category'), str))
assert (resource.get('category') != "")
assert (type(resource.get('languages')) is list)
def test_get_single_resource(module_client, module_db):
client = module_client
response = client.get('api/v1/resources/5')
# Status should be OK
assert (response.status_code == 200)
resource = response.json['data']
print(resource)
assert (isinstance(resource.get('name'), str))
assert (resource.get('name') != "")
assert (isinstance(resource.get('url'), str))
assert (resource.get('url') != "")
assert (isinstance(resource.get('category'), str))
assert (resource.get('category') != "")
assert (type(resource.get('languages')) is list)
assert (resource.get('id') == 5)
def test_paginator(module_client, module_db):
client = module_client
# Test page size
response = client.get('api/v1/resources?page_size=1')
assert (len(response.json['data']) == 1)
response = client.get('api/v1/resources?page_size=5')
assert (len(response.json['data']) == 5)
response = client.get('api/v1/resources?page_size=10')
assert (len(response.json['data']) == 10)
response = client.get('api/v1/resources?page_size=100')
assert (len(response.json['data']) == 100)
# Test pages different and sequential
first_page_resource = response.json['data'][0]
assert (first_page_resource.get('id') == 1)
response = client.get('api/v1/resources?page_size=100&page=2')
second_page_resource = response.json['data'][0]
assert (second_page_resource.get('id') == 101)
response = client.get('api/v1/resources?page_size=100&page=3')
third_page_resource = response.json['data'][0]
assert (third_page_resource.get('id') == 201)
# Test bigger than max page size
too_long = PaginatorConfig.max_page_size + 1
response = client.get(f"api/v1/resources?page_size={too_long}")
assert (len(response.json['data']) == PaginatorConfig.max_page_size)
# Test farther than last page
too_far = 99999999
response = client.get(f"api/v1/resources?page_size=100&page={too_far}")
assert (len(response.json['data']) == 0)
def test_filters(module_client, module_db):
client = module_client
# Filter by language
response = client.get('api/v1/resources?language=python')
for resource in response.json['data']:
assert (type(resource.get('languages')) is list)
assert ('Python' in resource.get('languages'))
# Filter by category
response = client.get('api/v1/resources?category=Back%20End%20Dev')
for resource in response.json['data']:
assert (resource.get('category') == "Back End Dev")
# TODO: Filter by updated_after
# (Need to figure out how to manually set last_updated and created_at)
def test_languages(module_client, module_db):
client = module_client
response = client.get('api/v1/languages')
for language in response.json['data']:
assert (isinstance(language.get('id'), int))
assert (isinstance(language.get('name'), str))
assert (len(language.get('name')) > 0)
def test_categories(module_client, module_db):
client = module_client
response = client.get('api/v1/categories')
for category in response.json['data']:
assert (isinstance(category.get('id'), int))
assert (isinstance(category.get('name'), str))
assert (len(category.get('name')) > 0)
| StarcoderdataPython |
35272 | <gh_stars>10-100
from typing import Any, Dict, Generator, List, Optional
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from probnmn.config import Config
from probnmn.utils.checkpointing import CheckpointManager
class _Trainer(object):
r"""
A base class for generic training of models. This class can have multiple models interacting
with each other, rather than a single model, which is suitable to our use-case (for example,
``module_training`` phase has two models:
:class:`~probnmn.models.program_generator.ProgramGenerator` and
:class:`~probnmn.models.nmn.NeuralModuleNetwork`). It offers full flexibility, with sensible
defaults which may be changed (or disabled) while extending this class.
Extended Summary
----------------
1. Default :class:`~torch.optim.Adam` Optimizer, updates parameters of all models in this
trainer. Learning rate and weight decay for this optimizer are picked up from the provided
config.
2. Default :class:`~torch.optim.lr_scheduler.ReduceLROnPlateau` learning rate scheduler. Gamma
and patience arguments are picked up from the provided config. Observed metric is assumed
to be of type "higher is better". For 'lower is better" metrics, make sure to reciprocate.
3. Tensorboard logging of loss curves, metrics etc.
4. Serialization of models and optimizer as checkpoint (.pth) files after every validation.
The observed metric for keeping track of best checkpoint is of type "higher is better",
follow (2) above if the observed metric is of type "lower is better".
Extend this class and override suitable methods as per requirements, some important ones are:
1. :meth:`step`, provides complete customization, this is the method which comprises of one
full training iteration, and internally calls (in order) - :meth:`_before_iteration`,
:meth:`_do_iteration` and :meth:`_after_iteration`. Most of the times you may not require
overriding this method, instead one of the mentioned three methods called by `:meth:`step`.
2. :meth:`_do_iteration`, with core training loop - what happens every iteration, given a
``batch`` from the dataloader this class holds.
3. :meth:`_before_iteration` and :meth:`_after_iteration`, for any pre- or post-processing
steps. Default behaviour:
* :meth:`_before_iteration` - call ``optimizer.zero_grad()``
* :meth:`_after_iteration` - call ``optimizer.step()`` and do tensorboard logging.
4. :meth:`after_validation`, to specify any steps after evaluation. Default behaviour is to
do learning rate scheduling and log validation metrics on tensorboard.
Notes
-----
All models are `passed by assignment`, so they could be shared with an external evaluator.
Do not set ``self._models = ...`` anywhere while extending this class.
Parameters
----------
config: Config
A :class:`~probnmn.Config` object with all the relevant configuration parameters.
dataloader: torch.utils.data.DataLoader
A :class:`~torch.utils.data.DataLoader` which provides batches of training examples. It
wraps one of :mod:`probnmn.data.datasets` depending on the evaluation phase.
models: Dict[str, Type[nn.Module]]
All the models which interact with each other during training. These are one or more from
:mod:`probnmn.models` depending on the training phase.
serialization_dir: str
Path to a directory for tensorboard logging and serializing checkpoints.
gpu_ids: List[int], optional (default=[0])
List of GPU IDs to use or evaluation, ``[-1]`` - use CPU.
"""
def __init__(
self,
config: Config,
dataloader: DataLoader,
models: Dict[str, nn.Module],
serialization_dir: str,
gpu_ids: List[int] = [0],
):
self._C = config
# Make dataloader cyclic for sampling batches perpetually.
self._dataloader = self._cycle(dataloader)
self._models = models
# Set device according to specified GPU ids.
self._device = torch.device(f"cuda:{gpu_ids[0]}" if gpu_ids[0] >= 0 else "cpu")
# Shift models to device, and wrap in DataParallel for Multi-GPU execution (if needed).
for model_name in self._models:
self._models[model_name] = self._models[model_name].to(self._device)
if len(gpu_ids) > 1 and -1 not in gpu_ids:
# Don't wrap to DataParallel if single GPU ID or -1 (CPU) is provided.
self._models[model_name] = nn.DataParallel(self._models[model_name], gpu_ids)
# Accumulate parameters of all models to construct Adam Optimizer.
all_parameters: List[Any] = []
for model_name in self._models:
all_parameters.extend(list(self._models[model_name].parameters()))
self._optimizer = optim.Adam(
all_parameters, lr=self._C.OPTIM.LR_INITIAL, weight_decay=self._C.OPTIM.WEIGHT_DECAY
)
# Default learning rate scheduler: (lr *= gamma) when observed metric plateaus for
# "patience" number of validation steps.
self._lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
self._optimizer,
mode="max",
factor=self._C.OPTIM.LR_GAMMA,
patience=self._C.OPTIM.LR_PATIENCE,
threshold=1e-3,
)
# Tensorboard summary writer for logging losses and metrics.
self._tensorboard_writer = SummaryWriter(log_dir=serialization_dir)
# Checkpoint manager to serialize model, optimizer and lr scheduler periodically.
self._checkpoint_manager = CheckpointManager(
serialization_dir=serialization_dir,
keep_recent=100,
optimizer=self._optimizer,
scheduler=self._lr_scheduler,
**models,
)
# Initialize a counter to keep track of the iteration number.
# This increments everytime ``step`` is called.
self._iteration: int = -1
def step(self, iteration: Optional[int] = None):
r"""
Perform one iteration of training.
Parameters
----------
iteration: int, optional (default = None)
Iteration number (useful to hard set to any number when loading checkpoint).
If ``None``, use the internal :attr:`self._iteration` counter.
"""
self._before_iteration()
batch = next(self._dataloader)
output_dict = self._do_iteration(batch)
self._after_iteration(output_dict)
self._iteration = iteration or self._iteration + 1
def _before_iteration(self):
r"""
Steps to do before doing the forward pass of iteration. Default behavior is to simply
call :meth:`zero_grad` for optimizer. Called inside :meth:`step`.
"""
self._optimizer.zero_grad()
def _do_iteration(self, batch: Dict[str, Any]) -> Dict[str, Any]:
r"""
Forward and backward passes on models, given a batch sampled from dataloader.
Parameters
----------
batch: Dict[str, Any]
A batch of training examples sampled from dataloader. See :func:`step` and
:meth:`_cycle` on how this batch is sampled.
Returns
-------
Dict[str, Any]
An output dictionary typically returned by the models. This would be passed to
:meth:`_after_iteration` for tensorboard logging.
"""
# What a single iteration usually would look like.
iteration_output_dict = self._models["model"](batch)
batch_loss = iteration_output_dict["loss"].mean()
batch_loss.backward()
return {"loss": batch_loss}
def _after_iteration(self, output_dict: Dict[str, Any]):
r"""
Steps to do after doing the forward pass of iteration. Default behavior is to simply
do gradient update through ``optimizer.step()``, and log metrics to tensorboard.
Parameters
----------
output_dict: Dict[str, Any]
This is exactly the object returned by :meth:_do_iteration`, which would contain all
the required losses for tensorboard logging.
"""
self._optimizer.step()
# keys: {"loss"} + ... {other keys such as "elbo"}
for key in output_dict:
if isinstance(output_dict[key], dict):
# Use ``add_scalars`` for dicts in a nested ``output_dict``.
self._tensorboard_writer.add_scalars(
f"train/{key}", output_dict[key], self._iteration
)
else:
# Use ``add_scalar`` for floats / zero-dim tensors in ``output_dict``.
self._tensorboard_writer.add_scalar(
f"train/{key}", output_dict[key], self._iteration
)
def after_validation(self, val_metrics: Dict[str, Any], iteration: Optional[int] = None):
r"""
Steps to do after an external :class:`~probnmn.evaluators._evaluator._Evaluator` performs
evaluation. This is not called by :meth:`step`, call it from outside at appropriate time.
Default behavior is to perform learning rate scheduling, serializaing checkpoint and to
log validation metrics to tensorboard.
Since this implementation assumes a key ``"metric"`` in ``val_metrics``, it is convenient
to set this key while overriding this method, when there are multiple models and multiple
metrics and there is one metric which decides best checkpoint.
Parameters
----------
val_metrics: Dict[str, Any]
Validation metrics for all the models. Returned by ``evaluate`` method of
:class:`~probnmn.evaluators._evaluator._Evaluator` (or its extended class).
iteration: int, optional (default = None)
Iteration number. If ``None``, use the internal :attr:`self._iteration` counter.
"""
if iteration is not None:
self._iteration = iteration
# Serialize model and optimizer and keep track of best checkpoint.
self._checkpoint_manager.step(self._iteration, val_metrics["metric"])
# Perform learning rate scheduling based on validation perplexity.
self._lr_scheduler.step(val_metrics["metric"])
# Log learning rate after scheduling.
self._tensorboard_writer.add_scalar(
"train/lr", self._optimizer.param_groups[0]["lr"], self._iteration
)
# Log all validation metrics to tensorboard (pop the "metric" key, which was only relevant
# to learning rate scheduling and checkpointing).
val_metrics.pop("metric")
for model_name in val_metrics:
for metric_name in val_metrics[model_name]:
self._tensorboard_writer.add_scalar(
f"val/metrics/{model_name}/{metric_name}",
val_metrics[model_name][metric_name],
self._iteration,
)
def load_checkpoint(self, checkpoint_path: str, iteration: Optional[int] = None):
r"""
Load a checkpoint to continue training from. The iteration when this checkpoint was
serialized, is inferred from its name (so do not rename after serialization).
Parameters
----------
checkpoint_path: str
Path to a checkpoint containing models and optimizers of the phase which is being
trained on.
iteration: int, optional (default = None)
Iteration number. If ``None``, get it from the checkpoint.
"""
_iteration = self._checkpoint_manager.load(checkpoint_path)
# By default, the provided iteration overrides what is found in checkpoint.
iteration = iteration or _iteration
self._iteration = iteration
def _cycle(self, dataloader: DataLoader) -> Generator[Dict[str, torch.Tensor], None, None]:
r"""
A generator which yields a random batch from dataloader perpetually. This generator is
used in the constructor.
Extended Summary
----------------
This is done so because we train for a fixed number of iterations, and do not have the
notion of 'epochs'. Using ``itertools.cycle`` with dataloader is harmful and may cause
unexpeced memory leaks.
"""
while True:
for batch in dataloader:
for key in batch:
batch[key] = batch[key].to(self._device)
yield batch
@property
def iteration(self):
return self._iteration
@property
def models(self):
return self._models
| StarcoderdataPython |
34546 | """ Data Transfer Objects """
from pydantic import BaseModel
class WarehouseDto(BaseModel):
name: str # this is our unique identifier!
location: str
capacity: int | StarcoderdataPython |
58563 | <reponame>toddnguyen47/utility-files
import uuid
import hashlib
import getpass
import argparse
import sys
hashed_password_file = "hashedPassword.txt"
def hash_password(password: str):
# uuid is used to generate a random number
salt = uuid.uuid4().hex
return hashlib.sha256(salt.encode() + password.encode()).hexdigest() + ":" + salt
def check_password(hashed_password, user_password):
password, salt = hashed_password.split(":")
return password == hashlib.sha256(salt.encode() + user_password.encode()).hexdigest()
def handle_hash_password():
pwd = getpass.getpass(prompt="Password: ")
pwd2 = getpass.getpass(prompt="Enter password again: ")
if pwd != pwd2:
print("Password do not match. Now exiting.")
exit(1)
hashed = hash_password(pwd)
print("Exported to {}".format(hashed_password_file))
with open(hashed_password_file, "w") as file:
file.write(hashed)
def handle_check_password():
with open(hashed_password_file, "r") as file:
hashed_password = file.read()
if hashed_password.strip() == "":
raise RuntimeError("`{}` file is empty or nonexistent".format(hashed_password_file))
user_input = input("Please enter a password: ")
if check_password(hashed_password, user_input):
print('You entered the right password')
else:
print('ERROR: I am sorry but the password does not match')
# Create argparse
parser = argparse.ArgumentParser(
description="Hash password or check hashed password")
# Create subparsers
subparsers = parser.add_subparsers(title="Valid commands", dest="command_name")
# Hash password
parser_hash_pwd = subparsers.add_parser("hash", help="Hash a password")
# Check password
parser_check_pwd = subparsers.add_parser("check", help="Check a password")
# Compile all the command line parser and subparsers
args = parser.parse_args()
# If no outputs are supplied, print help
if len(sys.argv) == 1:
parser.print_help()
sys.exit(0)
if args.command_name == "hash":
handle_hash_password()
elif args.command_name == "check":
handle_check_password()
| StarcoderdataPython |
1749295 | <reponame>apcarrik/kaggle
def findDecision(obj): #obj[0]: Passanger, obj[1]: Weather, obj[2]: Time, obj[3]: Coupon, obj[4]: Coupon_validity, obj[5]: Gender, obj[6]: Age, obj[7]: Maritalstatus, obj[8]: Children, obj[9]: Education, obj[10]: Occupation, obj[11]: Income, obj[12]: Bar, obj[13]: Coffeehouse, obj[14]: Restaurant20to50, obj[15]: Direction_same, obj[16]: Distance
# {"feature": "Income", "instances": 34, "metric_value": 0.9975, "depth": 1}
if obj[11]>0:
# {"feature": "Occupation", "instances": 29, "metric_value": 0.9923, "depth": 2}
if obj[10]>1:
# {"feature": "Distance", "instances": 25, "metric_value": 0.9988, "depth": 3}
if obj[16]<=2:
# {"feature": "Gender", "instances": 20, "metric_value": 0.971, "depth": 4}
if obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 10, "metric_value": 0.7219, "depth": 5}
if obj[14]>1.0:
return 'True'
elif obj[14]<=1.0:
# {"feature": "Time", "instances": 5, "metric_value": 0.971, "depth": 6}
if obj[2]<=2:
# {"feature": "Age", "instances": 3, "metric_value": 0.9183, "depth": 7}
if obj[6]<=5:
return 'False'
elif obj[6]>5:
return 'True'
else: return 'True'
elif obj[2]>2:
return 'True'
else: return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Age", "instances": 10, "metric_value": 0.971, "depth": 5}
if obj[6]<=3:
# {"feature": "Time", "instances": 6, "metric_value": 0.9183, "depth": 6}
if obj[2]<=2:
# {"feature": "Maritalstatus", "instances": 5, "metric_value": 0.7219, "depth": 7}
if obj[7]<=1:
return 'True'
elif obj[7]>1:
return 'False'
else: return 'False'
elif obj[2]>2:
return 'False'
else: return 'False'
elif obj[6]>3:
return 'False'
else: return 'False'
else: return 'False'
elif obj[16]>2:
return 'False'
else: return 'False'
elif obj[10]<=1:
return 'True'
else: return 'True'
elif obj[11]<=0:
return 'False'
else: return 'False'
| StarcoderdataPython |
103010 | norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
type='PoseDetDetector',
pretrained='pretrained/dla34-ba72cf86.pth',
# pretrained='open-mmlab://msra/hrnetv2_w32',
backbone=dict(
type='DLA',
return_levels=True,
levels=[1, 1, 1, 2, 2, 1],
channels=[16, 32, 64, 128, 256, 512],
ouput_indice=[3,4,5,6],
),
neck=dict(
type='FPN',
in_channels=[64, 128, 256, 512],
out_channels=128,
start_level=1,
add_extra_convs='on_input',
num_outs=4,
# num_outs=3,
norm_cfg=norm_cfg,),
bbox_head=dict(
# type='PoseDetHead',
type='PoseDetHeadHeatMapMl',
norm_cfg=norm_cfg,
num_classes=1,
in_channels=128,
feat_channels=128,
embedding_feat_channels=128,
init_convs=3,
refine_convs=2,
cls_convs=2,
gradient_mul=0.1,
dcn_kernel=(1,17),
refine_num=1,
point_strides=[8, 16, 32, 64],
point_base_scale=4,
num_keypoints=17,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_keypoints_init=dict(type='KeypointsLoss',
d_type='L2',
weight=.1,
stage='init',
normalize_factor=1,
),
loss_keypoints_refine=dict(type='KeypointsLoss',
d_type='L2',
weight=.2,
stage='refine',
normalize_factor=1,
),
loss_heatmap=dict(type='HeatmapLoss', weight=.1, with_sigmas=False),
)
)
# training and testing settings
train_cfg = dict(
init=dict(
assigner=dict(type='KeypointsAssigner',
scale=4,
pos_num=1,
number_keypoints_thr=3,
num_keypoints=17,
center_type='keypoints',
# center_type='box'
),
allowed_border=-1,
pos_weight=-1,
debug=False),
refine=dict(
assigner=dict(
type='OksAssigner',
pos_PD_thr=0.7,
neg_PD_thr=0.7,
min_pos_iou=0.52,
ignore_iof_thr=-1,
match_low_quality=True,
num_keypoints=17,
number_keypoints_thr=3, #
),
allowed_border=-1,
pos_weight=-1,
debug=False
),
cls=dict(
assigner=dict(
type='OksAssigner',
pos_PD_thr=0.6,
neg_PD_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1,
match_low_quality=False,
num_keypoints=17,
number_keypoints_thr=3,
),
allowed_border=-1,
pos_weight=-1,
debug=False
),
)
test_cfg = dict(
nms_pre=500,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='keypoints_nms', iou_thr=0.2),
max_per_img=100) | StarcoderdataPython |
1726811 | <gh_stars>100-1000
#No Trig paths defined
import FWCore.ParameterSet.Config as cms
process = cms.Process("PROD")
import FWCore.Framework.test.cmsExceptionsFatalOption_cff
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True),
Rethrow = FWCore.Framework.test.cmsExceptionsFatalOption_cff.Rethrow
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
process.source = cms.Source("EmptySource")
process.a1 = cms.EDAnalyzer("TestResultAnalyzer",
name = cms.untracked.string('a1'),
dump = cms.untracked.bool(True),
numbits = cms.untracked.int32(0),
)
process.testout1 = cms.OutputModule("TestOutputModule",
expectTriggerResults = cms.untracked.bool(False),
bitMask = cms.int32(0),
name = cms.string('testout1')
)
process.e1 = cms.EndPath(process.a1)
process.e2 = cms.EndPath(process.testout1)
| StarcoderdataPython |
1793867 | <filename>ahkpy/exceptions.py
class Error(Exception):
"""The runtime error that was raised in the AutoHotkey.
Contains the following attributes:
.. attribute:: message
The error message.
.. attribute:: what
The name of the command, function or label which was executing
or about to execute when the error occurred.
.. attribute:: extra
Additional information about the error, if available.
.. attribute:: file
The full path of the AHK script file which contains the line
where the error occurred.
.. attribute:: line
The line number in the AHK script where the error occurred.
"""
def __init__(self, message, what=None, extra=None, file=None, line=None):
super().__init__(message)
# TODO: Add AHK exception info to Python traceback?
self.message = message
self.what = what
self.extra = extra
self.file = file
self.line = line
def __setattr__(self, name, value):
if name == "message":
super().__setattr__("message", value)
super().__setattr__("args", (value,))
return
super().__setattr__(name, value)
| StarcoderdataPython |
3322537 | <gh_stars>0
# -*- coding: utf-8 -*-
"""?
:copyright: Copyright (c) 2020 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from rssynergia.base_diagnostics import options
import math
import matplotlib.pyplot as plt
import numpy as np
import sys
import tables
coords = {}
coords['x'] = 0
coords['xp'] = 1
coords['y'] = 2
coords['yp'] = 3
coords['cdt'] = 4
coords['dpop'] = 5
coords['id'] = 6
def plotbeam(opts, particles, header):
'''Returns a 2D plot of particle distribution in the choosen coordinate space
Arguments:
opts (options.Options): A Synergia options instance
particles (ndarray): An array of particles, organized according to the coords dictionary
header (dict): A Python dictionary with metadata for the particle array
'''
h = particles[:,coords[opts.hcoord]]
v = particles[:,coords[opts.vcoord]]
#vcoords = particles[:,coords[vc]] for vc in opts.vcoord
fig = plt.figure()
ax = plt.gca()
ax.scatter(h,v, c ='b')
ax.set_aspect('equal', 'datalim')
#plt.plot(h,v, 'o')
plt.xlabel(opts.hcoord,fontsize=12)
plt.ylabel(opts.vcoord,fontsize=12)
title = 'Particle distribution at s = ' + str(header['t_len'])
if not opts.lattice_name== None:
title = title + ' for lattice ' + opts.lattice_name
plt.title(title, y=1.05, fontsize=14)
plt.show()
if opts.save:
sv_title = 'Beam_' + opts.lattice_name + '.pdf'
fig.savefig(sv_title, bbox_inches='tight')
#define an option to replicate the pltbunch.plot_bunch function?
def get_particles(opts):
'''
Reads an input file and returns a numpy array of particles and a dictionary of root values
Arguments:
opts (options.Options): A Synergia options instance
'''
f = tables.open_file(opts.inputfile, 'r')
particles = f.root.particles.read()
#get appropriate reference properties from file root
npart = particles.shape[0]
mass = f.root.mass[()]
p_ref = f.root.pz[()]
sn = f.root.s_n[()] #period length
tn = f.root.tlen[()] #cumulative tracked length for this file
f.close()
header = dict()
header['n_part'] = npart
header['mass'] = mass
header['p_ref'] = p_ref
header['s_val'] = sn
header['t_len'] = tn
return header,particles
def plot_beam(opts):
'''
Plot a beam of particles given an options object input.
Arguments:
opts (options.Options): A Synergia options instance
'''
header, particles = get_particles(opts)
if opts.plots ==1:
plotbeam(opts, particles,header)
| StarcoderdataPython |
101548 | <gh_stars>0
import os
import unittest
from pynwb.form.data_utils import DataChunkIterator
from pynwb.form.backends.hdf5.h5tools import HDF5IO
from pynwb.form.build import DatasetBuilder
import h5py
import tempfile
import numpy as np
class H5IOTest(unittest.TestCase):
"""Tests for h5tools IO tools"""
def setUp(self):
self.test_temp_file = tempfile.NamedTemporaryFile()
# On Windows h5py cannot truncate an open file in write mode.
# The temp file will be closed before h5py truncates it
# and will be removed during the tearDown step.
name = self.test_temp_file.name
self.test_temp_file.close()
self.f = h5py.File(name, 'w')
self.io = HDF5IO(self.test_temp_file.name)
def tearDown(self):
path = self.f.filename
self.f.close()
os.remove(path)
del self.f
del self.test_temp_file
self.f = None
self.test_temp_file = None
##########################################
# __chunked_iter_fill__(...) tests
##########################################
def test__chunked_iter_fill_iterator_matched_buffer_size(self):
dci = DataChunkIterator(data=range(10), buffer_size=2)
my_dset = HDF5IO.__chunked_iter_fill__(self.f, 'test_dataset', dci)
self.assertListEqual(my_dset[:].tolist(), list(range(10)))
def test__chunked_iter_fill_iterator_unmatched_buffer_size(self):
dci = DataChunkIterator(data=range(10), buffer_size=3)
my_dset = HDF5IO.__chunked_iter_fill__(self.f, 'test_dataset', dci)
self.assertListEqual(my_dset[:].tolist(), list(range(10)))
def test__chunked_iter_fill_numpy_matched_buffer_size(self):
a = np.arange(30).reshape(5, 2, 3)
dci = DataChunkIterator(data=a, buffer_size=1)
my_dset = HDF5IO.__chunked_iter_fill__(self.f, 'test_dataset', dci)
self.assertTrue(np.all(my_dset[:] == a))
self.assertTupleEqual(my_dset.shape, a.shape)
def test__chunked_iter_fill_numpy_unmatched_buffer_size(self):
a = np.arange(30).reshape(5, 2, 3)
dci = DataChunkIterator(data=a, buffer_size=3)
my_dset = HDF5IO.__chunked_iter_fill__(self.f, 'test_dataset', dci)
self.assertTrue(np.all(my_dset[:] == a))
self.assertTupleEqual(my_dset.shape, a.shape)
def test__chunked_iter_fill_list_matched_buffer_size(self):
a = np.arange(30).reshape(5, 2, 3)
dci = DataChunkIterator(data=a.tolist(), buffer_size=1)
my_dset = HDF5IO.__chunked_iter_fill__(self.f, 'test_dataset', dci)
self.assertTrue(np.all(my_dset[:] == a))
self.assertTupleEqual(my_dset.shape, a.shape)
def test__chunked_iter_fill_numpy_unmatched_buffer_size(self): # noqa: F811
a = np.arange(30).reshape(5, 2, 3)
dci = DataChunkIterator(data=a.tolist(), buffer_size=3)
my_dset = HDF5IO.__chunked_iter_fill__(self.f, 'test_dataset', dci)
self.assertTrue(np.all(my_dset[:] == a))
self.assertTupleEqual(my_dset.shape, a.shape)
##########################################
# write_dataset tests
##########################################
def test_write_dataset_scalar(self):
a = 10
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', a, attributes={}))
dset = self.f['test_dataset']
self.assertTupleEqual(dset.shape, ())
self.assertEqual(dset[()], a)
def test_write_dataset_string(self):
a = 'test string'
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', a, attributes={}))
dset = self.f['test_dataset']
self.assertTupleEqual(dset.shape, ())
# self.assertEqual(dset[()].decode('utf-8'), a)
self.assertEqual(dset[()], a)
def test_write_dataset_list(self):
a = np.arange(30).reshape(5, 2, 3)
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', a.tolist(), attributes={}))
dset = self.f['test_dataset']
self.assertTrue(np.all(dset[:] == a))
def test_write_table(self):
cmpd_dt = np.dtype([('a', np.int32), ('b', np.float64)])
data = np.zeros(10, dtype=cmpd_dt)
data['a'][1] = 101
data['b'][1] = 0.1
dt = [{'name': 'a', 'dtype': 'int32', 'doc': 'a column'},
{'name': 'b', 'dtype': 'float64', 'doc': 'b column'}]
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', data, attributes={}, dtype=dt))
dset = self.f['test_dataset']
self.assertEqual(dset['a'].tolist(), data['a'].tolist())
self.assertEqual(dset['b'].tolist(), data['b'].tolist())
def test_write_table_nested(self):
b_cmpd_dt = np.dtype([('c', np.int32), ('d', np.float64)])
cmpd_dt = np.dtype([('a', np.int32), ('b', b_cmpd_dt)])
data = np.zeros(10, dtype=cmpd_dt)
data['a'][1] = 101
data['b']['c'] = 202
data['b']['d'] = 10.1
b_dt = [{'name': 'c', 'dtype': 'int32', 'doc': 'c column'},
{'name': 'd', 'dtype': 'float64', 'doc': 'd column'}]
dt = [{'name': 'a', 'dtype': 'int32', 'doc': 'a column'},
{'name': 'b', 'dtype': b_dt, 'doc': 'b column'}]
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', data, attributes={}, dtype=dt))
dset = self.f['test_dataset']
self.assertEqual(dset['a'].tolist(), data['a'].tolist())
self.assertEqual(dset['b'].tolist(), data['b'].tolist())
def test_write_dataset_iterable(self):
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', range(10), attributes={}))
dset = self.f['test_dataset']
self.assertListEqual(dset[:].tolist(), list(range(10)))
def test_write_dataset_iterable_multidimensional_array(self):
a = np.arange(30).reshape(5, 2, 3)
aiter = iter(a)
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', aiter, attributes={}))
dset = self.f['test_dataset']
self.assertListEqual(dset[:].tolist(), a.tolist())
def test_write_dataset_data_chunk_iterator(self):
dci = DataChunkIterator(data=np.arange(10), buffer_size=2)
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', dci, attributes={}))
dset = self.f['test_dataset']
self.assertListEqual(dset[:].tolist(), list(range(10)))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3322032 | cursor1=conn.cursor();
cursor1.execute("SELECT department_id,department_name "+
" FROM departments")
allrows=cursor1.fetchall()
for row in allrows:
print "%6d %-20s" % (row[0],row[1])
cursor1.close()
| StarcoderdataPython |
1689605 | # Generated by Django 3.1.3 on 2020-12-07 21:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("peeringdb", "0012_peerrecord_visible"),
]
def flush_peeringdb_tables(apps, schema_editor):
apps.get_model("peeringdb", "Contact").objects.all().delete()
apps.get_model("peeringdb", "Network").objects.all().delete()
apps.get_model("peeringdb", "NetworkIXLAN").objects.all().delete()
apps.get_model("peeringdb", "PeerRecord").objects.all().delete()
apps.get_model("peeringdb", "Synchronization").objects.all().delete()
operations = [migrations.RunPython(flush_peeringdb_tables)]
| StarcoderdataPython |
91235 | import unittest
from torchvision import transforms
# (Ugly) Path hack. #TODO - get rid of it
import sys, os; sys.path.insert(0, os.path.abspath('.'))
from dataprocessor import MyDatasetDoc
from dataprocessor.dataset import MyDatasetCorner, SmartDoc, SmartDocCorner
from utils import draw_circle_pil, get_concat_h
class DataDoc_TestCase(unittest.TestCase):
def test_init_MyDataset(self):
ds = MyDatasetDoc('/home/mhadar/projects/doc_scanner/data/data_generator/v1')
assert isinstance(ds.data, list)
assert ds.target.shape[1] == 8
assert len(ds.data) == len(ds.target)
def test_MyDataset_sandbox(self):
ds = MyDatasetDoc('/home/mhadar/projects/doc_scanner/data/data_generator/sandbox')
im, target = ds[0]
assert isinstance(ds.data, list)
assert ds.target.shape[1] == 8
assert len(ds.data) == 7
assert len(ds.target) == 7
def test_init_SmartDoc(self):
ds = SmartDoc()
class DataCorner_TestCase(unittest.TestCase):
def test_init_MyDataset(self):
ds = MyDatasetCorner.from_directory('/home/mhadar/projects/doc_scanner/data/data_generator/v1_corners')
assert isinstance(ds.data, list)
assert ds.target.shape[1] == 2
assert len(ds.data) == len(ds.target)
def test_MyDataset_sandbox(self):
ds = MyDatasetCorner.from_directory('/home/mhadar/projects/doc_scanner/data/data_generator/sandbox_corners')
im, target = ds[0]
assert isinstance(ds.data, list)
assert ds.target.shape[1] == 2
assert len(ds.data) == 5
assert len(ds.target) == 5
def test_rotation(self):
ds = MyDatasetCorner.from_directory('/home/mhadar/projects/doc_scanner/data/data_generator/v2_corners')
ds_rotated = MyDatasetCorner.from_directory('/home/mhadar/projects/doc_scanner/data/data_generator/v2_corners', is_rotating=True)
trans_to_pil = transforms.ToPILImage()
for i in range(4):
orig, target_orig = ds[i]
rotated, target_rotated = ds_rotated[i]
orig, rotated = trans_to_pil(orig), trans_to_pil(rotated)
draw_circle_pil(orig, (target_orig * orig.size).astype(int), radious=5, outline='yellow', width=2)
draw_circle_pil(rotated, (target_rotated * rotated.size).astype(int), radious=5, outline='yellow', width=2)
t = get_concat_h(orig, rotated)
t.save(f'results/debug/{i}_rotated.jpg')
def test_init_SmartDoc(self):
ds = SmartDocCorner()
def test_random_sample(self):
ds = MyDatasetCorner.from_directory('/home/mhadar/projects/doc_scanner/data/data_generator/v2_corners')
train_dataset1, test_dataset1 = ds.random_split(0.5)
train_dataset2, test_dataset2 = ds.random_split(0.5)
# assert (train_dataset1.__getitem__(0)[0] == train_dataset2.__getitem__(0)[0]).all() #turn off color jitter before!
# assert (train_dataset1.__getitem__(0)[1] == train_dataset2.__getitem__(0)[1]).all()
# assert (test_dataset1.__getitem__(10)[1] == test_dataset2.__getitem__(10)[1]).all()
# assert (test_dataset1.__getitem__(10)[1] == test_dataset2.__getitem__(10)[1]).all()
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3236258 | from __future__ import absolute_import
from __future__ import unicode_literals
from django.conf.urls import url
from custom.m4change.views import update_service_status
urlpatterns = [
url(r'^update_service_status/$', update_service_status, name='update_service_status'),
]
| StarcoderdataPython |
49814 | <reponame>homeostasie/petits-pedestres
# On est sur la première case.
# On a un grain de blé sur la première case.
# On a un grain de blé sur l'échéquier.
# Nombre de grain de blé par case.
# case est un nombre entier.
case = 1
# Nombre de grain blé au total sur l'échiquier.
# blé est un nombre entier.
ble = 1
# Coefficient d'augmentation du nombre de grain de blé par case.
# coef est un nombre entier
coef = 2
# Il y a 8*8=64 cases sur un échiquier.
# Il nous reste 63 cases à remplir de grain de blé.
# Si on veut connaitre pour toutes les cases.
print("Sur la première case, il y a : ", case, "grains de blé, pour un total de : ", ble , "grains sur l'échiquier.")
# On parcours les 63 cases restantes
for i in range(2,65):
# Les cases suivantes contienent deux fois plus de grain de blé.
case = case * coef
# À chaque fois on rajoute les grains de la case dans le sac de blé.
ble = ble + case
# Si on veut connaitre pour toutes les cases.
print("Sur la case : ", i , " , il y a : ", case, "grains de blé, pour un total de : ", ble , "grains sur l'échiquier.")
print("Nombre de grain de blé sur l'échéquier", ble)
print("Écriture scientifique : ", ble*1.)
# Poids en g. On multiplie le nombre de grain par 0.05g
poids = ble * 0.05
# Poids en tonnes. On divise par 10^6 ou on multiplie par 10^-6
poids = ble * 0.05 * 10**(-6)
print("Poids du blé total en tonnes : ", poids, " tonnes")
# Le nombre d'année nécéssaire.
# Par an, la production mondiale de blé est : 600 millions.
prod_an = 600 * 10 **6
print("La production annuelle de blé est de ", prod_an, "tonnes de blé")
# Le nombre d'année nécéssaire =
# notre poids de blé sur l'échiquier diviser par la production annuelle.
nombre_an = poids / prod_an
print("Il faudra : ", nombre_an, "années à l'empereur afin de combler ça promesse.")
| StarcoderdataPython |
3233658 | <reponame>nparkstar/nauta
#
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from operator import itemgetter
from typing import List, Tuple
from util.k8s.kubectl import get_top_for_pod
from util.k8s.k8s_info import get_pods, sum_cpu_resources_unformatted, sum_mem_resources_unformatted, \
format_mem_resources, format_cpu_resources, PodStatus
from util.logger import initialize_logger
logger = initialize_logger(__name__)
class ResourceUsage():
def __init__(self, user_name: str, cpu_usage: int, mem_usage: int):
self.user_name = user_name
self.cpu_usage = cpu_usage
self.mem_usage = mem_usage
def get_formatted_cpu_usage(self):
return format_cpu_resources(self.cpu_usage)
def get_formatted_mem_usage(self):
return format_mem_resources(self.mem_usage)
def __str__(self):
return self.user_name+":"+self.get_formatted_cpu_usage()+":"+self.get_formatted_mem_usage()
def get_highest_usage() -> Tuple[List[ResourceUsage], List[ResourceUsage]]:
available_pods = get_pods(label_selector=None)
CPU_KEY = "cpu"
MEM_KEY = "mem"
NAME_KEY = "name"
users_data: dict = {}
summarized_usage = []
for item in available_pods:
name = item.metadata.name
namespace = item.metadata.namespace
# omit technical namespaces
if namespace not in ["nauta", "kube-system"] and item.status.phase.upper() == PodStatus.RUNNING.value:
try:
cpu, mem = get_top_for_pod(name=name, namespace=namespace)
if namespace in users_data:
users_data.get(namespace, {}).get(CPU_KEY).append(cpu)
users_data.get(namespace, {}).get(MEM_KEY).append(mem)
else:
users_data[namespace] = {CPU_KEY: [cpu], MEM_KEY: [mem]}
except Exception as exe:
logger.exception("Error during gathering pod resources usage.")
for user_name, usage in users_data.items():
summarized_usage.append({NAME_KEY: user_name,
CPU_KEY: sum_cpu_resources_unformatted(usage.get(CPU_KEY)),
MEM_KEY: sum_mem_resources_unformatted(usage.get(MEM_KEY))})
top_cpu_users = sorted(summarized_usage, key=itemgetter(CPU_KEY), reverse=True)
top_mem_users = sorted(summarized_usage, key=itemgetter(MEM_KEY), reverse=True)
return [ResourceUsage(item[NAME_KEY], item[CPU_KEY], item[MEM_KEY]) for item in top_cpu_users], \
[ResourceUsage(item[NAME_KEY], item[CPU_KEY], item[MEM_KEY]) for item in top_mem_users]
| StarcoderdataPython |
3286244 | from bakery.views import BuildableTemplateView
from django.core.urlresolvers import reverse
class MooView(BuildableTemplateView):
template_name = "moo.html"
@property
def build_path(cls):
return '/'.join((reverse('moo')[1:], "index.html",))
| StarcoderdataPython |
4801862 | from collections import OrderedDict
from rest_framework import status
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase
from apps.common.tests import BaseTestCase, GetResponseMixin
from apps.app.models import App
from apps.auth.models import User
class GrayTasksTests(BaseTestCase):
def setUp(self):
self.normal_user = User.objects.get(username='normal_user')
self.admin_user = User.objects.get(username='admin_user')
self.app1_owner_user = User.objects.get(username='app_owner_user')
self.app2_owner_user = User.objects.get(username='test_app_owner')
self.app = App.objects.get(pk=1)
self.test_data = {"name": "任务管理测试1",
"appId": 1,
"startDate": "2017-5-20",
"endDate": "2017-5-27",
"innerStrategyList": [{"id": 1, "pushContent": "innerStrategy1"},
{"id": 1, "pushContent": "innerStrategy2"},
{"id": 1, "pushContent": "innerStrategy3"},
{"id": 1, "pushContent": "innerStrategy4"}],
"outerStrategyList": [1, 2],
"imageId": "aabbceadfdfdfdfdfdf",
"versionDesc": "11111111111111111111111",
"awardRule": "222222222222222222222",
"contact": "33333333333333333333333333"}
self.super_user = self.app1_owner_user
self.for_list_url = 'tasks-list'
self.for_detail_url = 'tasks-detail'
def test_normal_user_can_not_create_gray_task(self):
# Todo: when the auth is enable, the expect_status code should be status.HTTP_403_FORBIDDEN
self.do_test_user_create_permission(self.normal_user, status.HTTP_403_FORBIDDEN)
def test_admin_user_can_create_gray_task(self):
self.do_test_user_create_permission(self.admin_user, status.HTTP_403_FORBIDDEN)
def test_app_owner_user_can_create_gray_task(self):
self.do_test_user_create_permission(self.app1_owner_user, status.HTTP_200_OK)
def test_other_app_owner_user_can_create_gray_task(self):
self.do_test_user_create_permission(self.app2_owner_user, status.HTTP_403_FORBIDDEN)
# delete test cases
def test_normal_user_can_not_delete_gray_task(self):
# Todo: when the auth is enable, the expect_status code should be status.HTTP_403_FORBIDDEN
self.do_test_user_delete_permission(user=self.normal_user,
expect_status=status.HTTP_200_OK,
extra_verify=lambda res: self.assertEqual(res.data.get('msg', {}),
'1: You do not have permission to perform this action.'))
def test_admin_user_can_delete_gray_task(self):
self.do_test_user_delete_permission(user=self.admin_user,
expect_status=status.HTTP_200_OK,
extra_verify=lambda res: self.assertEqual(res.data.get('msg', {}),
'1: You do not have permission to perform this action.'))
def test_app_owner_can_delete_gray_task(self):
self.do_test_user_delete_permission(user=self.app1_owner_user,
expect_status=status.HTTP_200_OK,
extra_verify=lambda res: self.assertEqual(res.data.get('msg', {}), u'成功'))
def test_other_app_owner_can_not_delete_gray_task(self):
# Todo: when the auth is enable, the expect_status code should be status.HTTP_403_FORBIDDEN
self.do_test_user_delete_permission(user=self.app2_owner_user,
expect_status=status.HTTP_200_OK,
extra_verify=lambda res: self.assertEqual(res.data.get('msg', {}),
'1: You do not have permission to perform this action.'))
# get test cases
def test_normal_user_can_get_task(self):
# Todo: when the auth is enable, the expect_status code should be status.HTTP_200_OK
self.do_test_user_get_permission(self.normal_user, status.HTTP_200_OK)
class GrayTasksDetailTests(APITestCase, GetResponseMixin):
fixtures = [
"apps/app/fixtures/tests/apps.json",
"apps/common/fixtures/tests/images.json",
"apps/app/fixtures/tests/app_types.json",
"apps/auth/fixtures/tests/users.json",
"apps/auth/fixtures/tests/departments.json",
"apps/strategy/fixtures/tests/strategy_data.json",
"apps/user_group/fixtures/tests/user_groups.json",
"apps/task_manager/fixtures/tests/task_data.json",
"apps/task_manager/fixtures/tests/info_api_test_graytask.json",
]
def setUp(self):
self.normal_user = User.objects.get(username='normal_user')
self.admin_user = User.objects.get(username='admin_user')
self.app1_owner_user = User.objects.get(username='app_owner_user')
self.app2_owner_user = User.objects.get(username='test_app_owner')
self.app = App.objects.get(pk=1)
self.test_data = {"name": "任务管理测试2",
"appId": 1,
"startDate": "2017-5-20",
"endDate": "2017-5-27",
"innerStrategyList": [{"id": 1, "pushContent": "innerStrategy1"},
{"id": 1, "pushContent": "innerStrategy2"},
{"id": 1, "pushContent": "innerStrategy3"},
{"id": 1, "pushContent": "innerStrategy4"}],
"outerStrategyList": [1, 2],
"imageId": "aabbceadfdfdfdfdfdf",
"versionDesc": "11111111111111111111111",
"awardRule": "222222222222222222222",
"contact": "33333333333333333333333333"}
self.super_user = self.app1_owner_user
self.for_list_url = 'tasks-list'
self.for_detail_url = 'tasks-detail'
self.client.force_authenticate(user=self.app1_owner_user)
url = reverse(self.for_list_url)
response = self.client.post(url, self.test_data, format='json')
self.task_id = self._get_response_id(response)
def test_create_task_no_innerstrategylist(self):
self.test_data["innerStrategyList"] = []
self.test_data["name"] = "任务管理测试3"
self.client.force_authenticate(user=self.app1_owner_user)
url = reverse(self.for_list_url)
response = self.client.post(url, self.test_data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get("innerStrategyList"), None)
def test_create_task_no_outerstrategylist(self):
self.test_data["outerStrategyList"] = []
self.test_data["name"] = "任务管理测试3"
self.client.force_authenticate(user=self.app1_owner_user)
url = reverse(self.for_list_url)
response = self.client.post(url, self.test_data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get("outerStrategyList"), None)
def test_create_task_success_no_strategy(self):
self.test_data["innerStrategyList"] = []
self.test_data["outerStrategyList"] = []
self.test_data["name"] = "任务管理测试3"
self.client.force_authenticate(user=self.app1_owner_user)
url = reverse(self.for_list_url)
response = self.client.post(url, self.test_data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get("name"), "任务管理测试3")
def test_create_task_fail_by_inner_fail(self):
self.test_data["innerStrategyList"] = [{"id": ""}]
self.test_data["name"] = "任务管理测试3"
self.client.force_authenticate(user=self.app1_owner_user)
url = reverse(self.for_list_url)
response = self.client.post(url, self.test_data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_business_status_code(response), status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get("msg"), "Create Task Fail, {'msg': 'strategy id must be int'}")
def test_create_task_no_permission(self):
self.test_data["appId"] = 2
self.test_data["name"] = "任务管理测试3"
self.client.force_authenticate(user=self.app1_owner_user)
url = reverse(self.for_list_url)
response = self.client.post(url, self.test_data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_business_status_code(response), status.HTTP_403_FORBIDDEN)
self.assertEqual(response.data.get("msg"), "You do not have permission to perform this action.")
def test_create_task_no_image(self):
self.test_data.pop('imageId')
self.test_data["name"] = "任务管理测试3"
self.client.force_authenticate(user=self.app1_owner_user)
url = reverse(self.for_list_url)
response = self.client.post(url, self.test_data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get("imageId"), "")
def test_get_task_list(self):
self.client.force_authenticate(user=self.app1_owner_user)
url = reverse(self.for_list_url)
self.test_data["name"] = "任务管理测试3"
response = self.client.post(url, self.test_data, format='json')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_response_total(response), 5)
def test_get_task_list_is_join_kesutong(self):
self.client.force_authenticate(user=self.normal_user)
response = self.client.get(reverse('tasks-list'), {'isJoinKesutong': 'true'})
self.assertEqual(self._get_response_total(response), 2)
response = self.client.get(reverse('tasks-list'), {'isJoinKesutong': 'false'})
self.assertEqual(self._get_response_total(response), 2)
def test_get_task_list_isdisplay(self):
url = reverse(self.for_list_url)
self.client.force_authenticate(user=self.normal_user)
response = self.client.get(url + "?appId={}&isDisplay=true".format(self.app.id))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_business_status_code(response), status.HTTP_200_OK)
self.assertEqual(self._get_response_total(response), 0)
self.client.force_authenticate(user=self.app1_owner_user)
url = reverse(self.for_list_url) + str(self.task_id) + "/isDisplay/"
is_display = {"isDisplay": "True"}
response = self.client.patch(url, is_display, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_business_status_code(response), status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get('isDisplay'), True)
url = reverse(self.for_list_url)
self.client.force_authenticate(user=self.normal_user)
response = self.client.get(url + "?appId={}&isDisplay=true".format(self.app.id))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_business_status_code(response), status.HTTP_200_OK)
self.assertEqual(self._get_response_total(response), 1)
def test_get_task_by_id(self):
url = reverse(self.for_detail_url, kwargs={'pk': self.task_id})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_response_id(response), self.task_id)
def test_update_task_is_join_kesutong(self):
url = reverse(self.for_detail_url, kwargs={'pk': self.task_id})
response = self.client.patch(url, {"isJoinKesutong": "True"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['isJoinKesutong'], True)
response = self.client.patch(url, {"isJoinKesutong": "False"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['isJoinKesutong'], False)
def test_is_display_set_true(self):
self.client.force_authenticate(user=self.app1_owner_user)
url = reverse(self.for_list_url)
self.test_data["name"] = "任务管理测试3"
response = self.client.post(url, self.test_data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_response_id(response), 5)
url = reverse(self.for_list_url) + str(self.task_id) + "/isDisplay/"
is_display = {"isDisplay": "True"}
response = self.client.patch(url, is_display, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get('isDisplay'), True)
self.client.force_authenticate(user=self.app1_owner_user)
url = reverse(self.for_list_url) + str(self.task_id) + "/"
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get('isDisplay'), True)
def test_is_display_set_false(self):
self.client.force_authenticate(user=self.app1_owner_user)
url = reverse(self.for_list_url) + str(self.task_id) + "/isDisplay/"
is_display = {"isDisplay": "false"}
response = self.client.patch(url, is_display, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get('isDisplay'), False)
def test_is_display_fail_by_invalid_parameter(self):
self.client.force_authenticate(user=self.app1_owner_user)
url = reverse(self.for_list_url) + str(self.task_id) + "/isDisplay/"
is_display = {"isDisplay": "1"}
response = self.client.patch(url, is_display, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_business_status_code(response), status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('msg'), "isDisplay must be true or false")
def test_start_test(self):
self.client.force_authenticate(user=self.app1_owner_user)
url = reverse(self.for_list_url) + str(self.task_id) + "/startTest/"
step = 1
data = {
"currentStep": step,
"appVersion": {
"android": {
"urlDownload": "http://apphub.ffan.com/api/appdownload/ffan/0/0/develop/4.20.0.0.1898//None/None/Feifan_o2o_4_20_0_0_DEV_1898_2017_08_01_release.apk",
"createDate": "2017-08-01",
"versionId": "4.20.0.0.1898"
},
"ios": {
"urlDownload": "http://apphub.ffan.com/api/appdownload/ffan/0/1/develop-Inhouse/4.20.0.1982//None/None/FeiFan-Inhouse.ipa",
"createDate": "2017-07-31",
"versionId": "4.20.0.1982"
}
}
}
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get('current_step'), step)
step = 2
data['currentStep'] = step
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get('current_step'), step)
step = 2
data['currentStep'] = step
# 验证更新版本功能
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get('current_step'), step)
step = 3
data['currentStep'] = step
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get('current_step'), step)
step = 4
data['currentStep'] = step
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get('current_step'), step)
step = 5
data['currentStep'] = step
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get('current_step'), step)
step = 6
data['currentStep'] = step
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get('current_step'), step)
def test_start_test_with_no_version(self):
self.client.force_authenticate(user=self.app1_owner_user)
url = reverse(self.for_list_url) + str(self.task_id) + "/startTest/"
step = 1
data = {
"currentStep": step
}
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get('current_step'), step)
step = 2
data['currentStep'] = step
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get('current_step'), step)
step = 2
data['currentStep'] = step
# 验证更新版本功能
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get('current_step'), step)
step = 3
data['currentStep'] = step
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get('current_step'), step)
step = 4
data['currentStep'] = step
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get('current_step'), step)
step = 5
data['currentStep'] = step
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get('current_step'), step)
step = 6
data['currentStep'] = step
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get('current_step'), step)
def test_start_test_fail(self):
self.client.force_authenticate(user=self.app1_owner_user)
url = reverse(self.for_list_url) + str(self.task_id) + "/startTest/"
step = 0
data = {
"currentStep": step,
"appVersion": {
"android": {
"urlDownload": "http://apphub.ffan.com/api/appdownload/ffan/0/0/develop/4.20.0.0.1898//None/None/Feifan_o2o_4_20_0_0_DEV_1898_2017_08_01_release.apk",
"createDate": "2017-08-01",
"versionId": "4.20.0.0.1898"
},
"ios": {
"urlDownload": "http://apphub.ffan.com/api/appdownload/ffan/0/1/develop-Inhouse/4.20.0.1982//None/None/FeiFan-Inhouse.ipa",
"createDate": "2017-07-31",
"versionId": "4.20.0.1982"
}
}
}
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('status'), 400)
self.assertEqual(response.data.get('msg'), "current_step must be 1")
step = 1
data['currentStep'] = step
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get('current_step'), step)
step = 3
data['currentStep'] = step
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('status'), 400)
self.assertEqual(response.data.get('msg'), "current_step mast step by step or not large max")
step = 5
data['currentStep'] = step
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('status'), 400)
self.assertEqual(response.data.get('msg'), "current_step mast step by step or not large max")
def test_start_test_fail_with_no_version(self):
self.client.force_authenticate(user=self.app1_owner_user)
url = reverse(self.for_list_url) + str(self.task_id) + "/startTest/"
step = 0
data = {
"currentStep": step
}
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('status'), 400)
self.assertEqual(response.data.get('msg'), "current_step must be 1")
step = 1
data['currentStep'] = step
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get('current_step'), step)
step = 3
data['currentStep'] = step
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('status'), 400)
self.assertEqual(response.data.get('msg'), "current_step mast step by step or not large max")
step = 5
data['currentStep'] = step
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('status'), 400)
self.assertEqual(response.data.get('msg'), "current_step mast step by step or not large max")
def test_start_test_fail_no_auth(self):
self.client.force_authenticate(user=self.normal_user)
url = reverse(self.for_list_url) + str(self.task_id) + "/startTest/"
step = 0
data = {
"currentStep": step,
"appVersion": {
"android": {
"urlDownload": "http://apphub.ffan.com/api/appdownload/ffan/0/0/develop/4.20.0.0.1898//None/None/Feifan_o2o_4_20_0_0_DEV_1898_2017_08_01_release.apk",
"createDate": "2017-08-01",
"versionId": "4.20.0.0.1898"
},
"ios": {
"urlDownload": "http://apphub.ffan.com/api/appdownload/ffan/0/1/develop-Inhouse/4.20.0.1982//None/None/FeiFan-Inhouse.ipa",
"createDate": "2017-07-31",
"versionId": "4.20.0.1982"
}
}
}
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_business_status_code(response), status.HTTP_403_FORBIDDEN)
def test_start_test_fail_status(self):
self.client.force_authenticate(user=self.app1_owner_user)
url = reverse(self.for_list_url) + str(self.task_id) + "/currentStatus/"
data = {
"status": "finished",
}
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get("currentStatus"), "已结束")
url = reverse(self.for_list_url) + str(self.task_id) + "/startTest/"
step = 0
data = {
"currentStep": step,
"appVersion": {
"android": {
"urlDownload": "http://apphub.ffan.com/api/appdownload/ffan/0/0/develop/4.20.0.0.1898//None/None/Feifan_o2o_4_20_0_0_DEV_1898_2017_08_01_release.apk",
"createDate": "2017-08-01",
"versionId": "4.20.0.0.1898"
},
"ios": {
"urlDownload": "http://apphub.ffan.com/api/appdownload/ffan/0/1/develop-Inhouse/4.20.0.1982//None/None/FeiFan-Inhouse.ipa",
"createDate": "2017-07-31",
"versionId": "4.20.0.1982"
}
}
}
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('status'), 400)
self.assertEqual(response.data.get('msg'), "Task was test completed!")
def test_task_status(self):
self.client.force_authenticate(user=self.app1_owner_user)
url = reverse(self.for_list_url) + str(self.task_id) + "/currentStatus/"
data = {
"status": "finished",
}
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get("currentStatus"), "已结束")
def test_task_status_fail(self):
self.client.force_authenticate(user=self.app1_owner_user)
url = reverse(self.for_list_url) + str(self.task_id) + "/currentStatus/"
data = {
"status": "finished",
}
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get("currentStatus"), "已结束")
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('msg'), "Task can not be set to this status: finished")
def test_update_image(self):
self.client.force_authenticate(user=self.app1_owner_user)
url = reverse(self.for_list_url) + str(self.task_id) + "/image/"
data = {
"imageId": "eprghperahgioaergji",
}
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('data', {}).get("imageId"), "eprghperahgioaergji")
def test_update_image_fail(self):
self.client.force_authenticate(user=self.app1_owner_user)
url = reverse(self.for_list_url) + str(self.task_id) + "/image/"
data = {
"imageId": "eprghperahgioaergji2",
}
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('msg'), "Update image Fail, No Image matches the given query. ")
class TaskStatusTests(APITestCase, GetResponseMixin):
fixtures = [
"apps/app/fixtures/tests/apps.json",
"apps/common/fixtures/tests/images.json",
"apps/app/fixtures/tests/app_types.json",
"apps/auth/fixtures/tests/users.json",
"apps/auth/fixtures/tests/departments.json",
"apps/strategy/fixtures/tests/strategy_data.json",
"apps/user_group/fixtures/tests/user_groups.json",
"apps/task_manager/fixtures/tests/task_data.json",
]
def setUp(self):
self.task_status_url = 'status-list'
self.normal_user = User.objects.get(username='normal_user')
def test_get_status(self):
self.client.force_authenticate(user=self.normal_user)
url = reverse(self.task_status_url)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_response_total(response), 3)
class TaskExtensionFieldTests(APITestCase, GetResponseMixin):
fixtures = [
"apps/app/fixtures/tests/apps.json",
"apps/common/fixtures/tests/images.json",
"apps/app/fixtures/tests/app_types.json",
"apps/auth/fixtures/tests/users.json",
"apps/auth/fixtures/tests/departments.json",
"apps/strategy/fixtures/tests/strategy_data.json",
"apps/user_group/fixtures/tests/user_groups.json",
"apps/task_manager/fixtures/tests/task_data.json",
"apps/task_manager/fixtures/tests/info_api_test_graytask.json"
]
def setUp(self):
self.client.force_authenticate(user=User.objects.get(username='app_owner_user'))
def test_task_extension_field_create(self):
url = reverse('issueextfields-list', kwargs={'task_id': 1})
response = self.client.post(url, {'name': '广场'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, OrderedDict([('status', 200),
('msg', '成功'),
('data', {'id': 1, 'isOptional': True, 'name': '广场',
'type': None, 'taskId': 1,
'default': None, 'taskName': '0711飞凡灰度发布'})]))
response = self.client.post(url, {'name': '手机型号', 'isOptional': True, 'default': 'iPhone', 'type': 'string'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
OrderedDict([('status', 200), ('msg', '成功'),
('data', {'id': 2, 'isOptional': True, 'name': '手机型号',
'type': 'string', 'taskId': 1, 'default': 'iPhone',
'taskName': '0711飞凡灰度发布'})]))
def test_task_extension_field_can_not_create_same_name_twice(self):
url = reverse('issueextfields-list', kwargs={'task_id': 1})
self.client.post(url, {'name': '手机型号'})
response = self.client.post(url, {'name': '手机型号'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, OrderedDict([('status', 400), ('msg', '此任务已经有了同名扩展字段')]))
def test_task_extension_field_patch(self):
url = reverse('issueextfields-list', kwargs={'task_id': 1})
self.client.post(url, {'name': '广场'})
response = self.client.patch(reverse('issueextfields-detail',
kwargs={'task_id': 1, 'pk': 1}), {'name': '所在万达广场'})
self.assertEqual(response.data, OrderedDict([('status', 200),
('msg', '成功'),
('data', {'id': 1, 'isOptional': True, 'name': '所在万达广场',
'type': None, 'taskId': 1,
'default': None, 'taskName': '0711飞凡灰度发布'})]))
def test_task_extension_field_get_detail(self):
url = reverse('issueextfields-list', kwargs={'task_id': 1})
self.client.post(url, {'name': '广场'})
response = self.client.get(reverse('issueextfields-detail', kwargs={'task_id': 1, 'pk': 1}))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
OrderedDict([('status', 200), ('msg', '成功'),
('data', {'isOptional': True, 'taskName': '0711飞凡灰度发布',
'type': None, 'id': 1, 'default': None, 'name': '广场', 'taskId': 1})]))
def test_task_extension_field_delete(self):
url = reverse('issueextfields-list', kwargs={'task_id': 1})
self.client.post(url, {'name': '广场'})
response = self.client.delete(reverse('issueextfields-detail', kwargs={'task_id': 1, 'pk': 1}))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, OrderedDict([('status', 200), ('msg', '成功')]))
# confirm it's really deleted
response = self.client.get(reverse('issueextfields-list', kwargs={'task_id': 1}))
self.assertEqual(response.data,
OrderedDict([('status', 200), ('msg', '成功'),
('data', OrderedDict([('total', 0), ('next', None),
('previous', None), ('results', [])]))]))
def test_task_extension_field_list_and_filter(self):
url = reverse('issueextfields-list', kwargs={'task_id': 1})
self.client.post(url, {'name': '广场'})
self.client.post(url, {'name': '手机型号', 'isOptional': True, 'default': 'iPhone', 'type': 'string'})
response = self.client.get(url)
self.assertEqual(self._get_response_total(response), 2)
response = self.client.get(url, {'name': '广场'})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(url, {'name': '手机型号'})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(url, {'name': '汪汪'})
self.assertEqual(self._get_response_total(response), 0)
response = self.client.get(url, {'isOptional': True})
self.assertEqual(self._get_response_total(response), 2)
response = self.client.get(url, {'isOptional': False})
self.assertEqual(self._get_response_total(response), 0)
response = self.client.get(url, {'type': 'string'})
self.assertEqual(self._get_response_total(response), 1)
# 联合查询
response = self.client.get(url, {'name': '手机型号', 'isOptional': True, 'type': 'string'})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(url, {'name': '广场', 'isOptional': True, 'type': 'string'})
self.assertEqual(self._get_response_total(response), 0)
response = self.client.get(url, {'name': '广场', 'isOptional': True})
self.assertEqual(self._get_response_total(response), 1)
| StarcoderdataPython |
172457 | """https://code.google.com/codejam/contest/10284486/dashboard#s=p1&a=1"""
def main():
T = int(input())
for i in range(1, T + 1):
N, K, P = (int(s) for s in input().split())
A = []
B = []
C = []
for _ in range(K):
a, b, c = (int(s) for s in input().split())
A.append(a)
B.append(b)
C.append(c)
print("Case #{}: {}".format(i, solve(N, P, A, B, C)))
def solve(N: int, P: int, A: list, B: list, C: list) -> str:
res = [None for _ in range(N)]
fill = list(bin(P - 1)[2:])
fill = ["0", ] * (N - len(fill) - len(A)) + fill
for a, c in zip(A, C):
res[a - 1] = str(c)
for i in range(len(res) - 1, -1, -1):
if res[i] is not None:
continue
if not fill:
break
res[i] = fill[-1]
fill.pop()
return "".join(res)
if __name__ == '__main__':
main()
| StarcoderdataPython |
4804996 | import random as rand
import matplotlib.pyplot as plt
#initial position
current = 0
# burst parameters
m = [1, 10, 100] #burst length
num_bursts = [1000, 100, 10] #number of bursts
#total_length = m * num_bursts
total_length = 1000
#bias parameters
walk_probabilities = [.5, .51, .6, .7, .8]
#unbiased/biased random walks
for k in walk_probabilities:
random_walk = [] #holds current position and length of walk
current = 0 #start walk at 0
for j in range(total_length):
alpha = rand.random()
if (alpha < k): #move right if probability is less than walk_probability
current += 1
else: #move left otherwise
current -= 1
random_walk.append((current, j)) #append new position and length of walk
x, y = zip(*random_walk) #extract random walk
plt.plot(x, y, label='p = {0}'.format(k))
#short burst random walk
for r in range(len(num_bursts)):
t = 0 #step counter
burst_max = 0 #overall burst max for a given walk
random_walk = []
for j in range(num_bursts[r]): #loop for the number of bursts
current = burst_max #set the current position of the walk to be the overall burst max
for i in range(m[r]): #loop for burst
t+=1
alpha = rand.random()
if (alpha < 0.5): #move to right with probability 0.5
current += 1
else: #move to left with probability 0.5
current -= 1
random_walk.append((current, t)) #append position and length of walk
burst_max = max(burst_max, current) #set new overall burst max
x, y = zip(*random_walk) #extract random walk
plt.plot(x, y, label='burst, {0} steps'.format(m[r]))
ax = plt.gca()
ax.spines['left'].set_position(('data', 0)) #center y-axis
plt.xlim([-total_length, total_length])
plt.ylim([1, total_length * 1.05])
plt.legend(loc='upper left')
plt.xlabel("Position")
plt.ylabel("Step").set_position(('data', .3))
plt.title("Random walks and unbiased short bursts")
plt.savefig("random_walks_unbiased_bursts.png")
plt.show() | StarcoderdataPython |
1657544 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
# import default models from django-edw to materialize them
from edw.models.defaults import mapping
from edw.models.defaults.customer import Customer
from edw.models.defaults.term import Term
from edw.models.defaults.data_mart import DataMart
from todos import Todo
| StarcoderdataPython |
1712334 | <filename>hotword.py
import snowboydecoder
import sys
import signal
import os.path
class hotword:
def __init__(self):
self.interrupted = False
self.model = ''.join([os.path.dirname(__file__), '/HARU.pmdl'])
def signal_handler(self, signal, frame):
self.interrupted = True
def interrupt_callback(self):
return self.interrupted
def start_detection(self, callback_func):
signal.signal(signal.SIGINT, self.signal_handler)
self.detector = snowboydecoder.HotwordDetector(self.model, sensitivity=0.5)
self.detector.start(detected_callback=callback_func, interrupt_check=self.interrupt_callback, sleep_time=5)
def terminate_detection(self):
self.detector.terminate()
| StarcoderdataPython |
1731054 | #!/usr/bin/env python3
# coding=utf-8
from __future__ import print_function
import os
import click
from importlib.machinery import SourceFileLoader
from inspect import getmembers
from absl import logging
from .test import cli as cli_test
from .workflow import cli as cli_workflow
from .server import cli as cli_server
logger = logging.get_absl_logger()
__all__ = ['create_cli']
def load_commands_from_file(path):
module = SourceFileLoader('custom_commands', path).load_module()
commands = [obj for name, obj in getmembers(module) if isinstance(obj, click.core.Command)]
return commands
def create_cli(package_name, package_path, exclude=None, type_=None, config=None):
base_path = os.path.abspath(os.path.join(package_path, '..'))
@click.group('custom')
@click.option('--debug', is_flag=True, help='Enable debug mode.')
@click.pass_context
def cli(ctx, debug):
ctx.obj = {
'debug': debug,
'package_name': package_name,
'package_path': package_path,
'base_path': base_path,
'type': type_,
'config': config,
}
commands = {}
commands.update(cli_test.commands)
commands.update(cli_workflow.commands)
commands.update(cli_server.commands)
for name, command in commands.items():
cli.add_command(command, name=name)
commands_file_path = os.path.join(base_path, 'commands.py')
logger.info(commands_file_path)
if os.path.exists(commands_file_path):
commands = load_commands_from_file(commands_file_path)
for command in commands:
cli.add_command(command)
return cli
| StarcoderdataPython |
3309138 | from os import listdir
from lxml import etree
import json
import time
import numpy as np
from scipy.stats import ttest_ind
xmlPath = 'XML/'
jsonPath = 'JSON/'
def testBatchXML():
startTime = time.time()
for file in listdir(xmlPath):
with open(xmlPath+file, 'r') as xmlFile:
xmlString = bytes(xmlFile.read(), 'utf-8')
x = etree.fromstring(xmlString)
string = etree.tostring(x)
del xmlString, x, string
return time.time() - startTime
def testBatchJSON():
startTime = time.time()
for file in listdir(jsonPath):
with open(jsonPath+file, 'r') as jsonFile:
jsonString = jsonFile.read()
j = json.loads(jsonString)
string = json.dumps(j)
del jsonString, j, string
return time.time() - startTime
nbIter = 1000
timesXML = []
timesJSON = []
for i in range(nbIter):
if i%(nbIter//100) == 0 and i != 0:
print('{}%'.format(i*100//nbIter))
timesXML.append(testBatchXML())
timesJSON.append(testBatchJSON())
print('lxml.etree, json, {} iterations, 3 sigma'.format(nbIter))
print('XML: {:.3f} s +/- {:.3f}'.format(np.mean(timesXML), 3*np.std(timesXML)))
print('JSON: {:.3f} s +/- {:.3f}'.format(np.mean(timesJSON), 3*np.std(timesJSON)))
t, p = ttest_ind(timesXML, timesJSON, equal_var=False)
print('p-value: {}'.format(p))
| StarcoderdataPython |
1784348 | {
"targets": [
{
"target_name": "Engine",
"type": "none",
"configurations": {
"Debug": {},
"Release": {}
},
"all_dependent_settings": {
"include_dirs": [ "Inc" ]
},
"direct_dependent_settings": {
"conditions": [
["OS == 'linux'", {
"libraries": [ "-lEngine" ]
}],
["OS == 'win'", {
"libraries": [ "-lEngine.lib" ],
"msvs_settings": {
"VCLinkerTool": {
"AdditionalLibraryDirectories": [
">(DEPTH)/Engine/Lib"
]
}
}
}]
]
}
}
]
}
| StarcoderdataPython |
32204 | <gh_stars>10-100
"""Views for Django Rest Framework Session Endpoint extension."""
from django.contrib.auth import login, logout
from rest_framework import parsers, renderers
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.response import Response
from rest_framework.views import APIView
class SessionAuthView(APIView):
"""Provides methods for REST-like session authentication."""
throttle_classes = ()
permission_classes = ()
parser_classes = (
parsers.FormParser,
parsers.MultiPartParser,
parsers.JSONParser
)
renderer_classes = (renderers.JSONRenderer,)
def post(self, request):
"""Login using posted username and password."""
serializer = AuthTokenSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
login(request, user)
return Response({'detail': 'Session login successful.'})
def delete(self, request):
"""Logout the current session."""
logout(request)
return Response({'detail': 'Session logout successful.'})
session_auth_view = SessionAuthView.as_view()
| StarcoderdataPython |
113601 | <filename>scripts/extract_single_mode_races.py
# -*- coding=UTF-8 -*-
# pyright: strict
""". """
if True:
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
from typing import Iterator, Text, Tuple
import sqlite3
import argparse
import os
import contextlib
from auto_derby.single_mode.race import Race, DATA_PATH
import json
import pathlib
def _get_fan_set(db: sqlite3.Connection, fan_set_id: int) -> Tuple[int, ...]:
with contextlib.closing(
db.execute(
"""
SELECT fan_count
FROM single_mode_fan_count
WHERE fan_set_id = ?
ORDER BY "order"
;
""",
(fan_set_id,),
)
) as cur:
return tuple(i[0] for i in cur.fetchall())
def _read_master_mdb(path: Text) -> Iterator[Race]:
db = sqlite3.connect(path)
cur = db.execute(
"""
SELECT
t4.text,
t7.text AS stadium,
t1.month,
t1.half,
t3.grade,
t1.race_permission,
t1.need_fan_count,
t1.fan_set_id,
t3.entry_num,
t5.distance,
t5.ground,
t5.inout,
t5.turn,
t6.target_status_1,
t6.target_status_2
FROM single_mode_program AS t1
LEFT JOIN race_instance AS t2 ON t1.race_instance_id = t2.id
LEFT JOIN race AS t3 ON t2.race_id = t3.id
LEFT JOIN text_data AS t4 ON t4.category = 28 AND t2.id = t4."index"
LEFT JOIN race_course_set AS t5 ON t5.id = t3.course_set
LEFT JOIN race_course_set_status AS t6 ON t6.course_set_status_id = t5.course_set_status_id
LEFT JOIN text_data AS t7 ON t7.category = 35 AND t7."index" = t5.race_track_id
WHERE t1.base_program_id = 0
ORDER BY t1.race_permission, t1.month, t1.half, t3.grade DESC
;
"""
)
with contextlib.closing(cur):
for i in cur:
assert len(i) == 15, i
v = Race()
(
v.name,
v.stadium,
v.month,
v.half,
v.grade,
v.permission,
v.min_fan_count,
fan_set_id,
v.entry_count,
v.distance,
v.ground,
v.track,
v.turn,
) = i[:-2]
v.target_statuses = tuple(j for j in i[-2:] if j)
v.fan_counts = _get_fan_set(db, fan_set_id)
yield v
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"path",
nargs="?",
default=os.getenv("LocalAppData", "")
+ "Low/cygames/umamusume/master/master.mdb",
)
args = parser.parse_args()
path: Text = args.path
data = [i.to_dict() for i in _read_master_mdb(path)]
with pathlib.Path(DATA_PATH).open("w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False, indent=2)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1710079 | <gh_stars>1-10
import datetime
import json
import os
import time
import snap
from db_interface import get_edge_number, get_all
id_pkg_dict = {}
def get_id_from_package(graph, package):
node_id = id_pkg_dict.get(package, -1)
if node_id == -1:
node_id = graph.AddNode(-1)
id_pkg_dict[package] = node_id
graph.AddStrAttrDatN(node_id, package, "pkg")
return node_id
def create_play_store_graph(output_graph_path):
start = time.time()
docs = get_all()
n = docs.count()
print("# Nodes: {0}".format(n))
m = get_edge_number()
print("# Edges: {0}".format(m))
graph = snap.TNEANet.New(n, m)
i = 0
for doc in docs:
if i % 10000 == 0:
print("{0} {1:.2f}% completed".format(datetime.datetime.now(), float(i) / n * 100.0))
package = doc.get('docid')
node_id = get_id_from_package(graph, package)
similar_packages = doc.get('similarTo')
for p in similar_packages:
target_id = get_id_from_package(graph, p)
graph.AddEdge(node_id, target_id)
i += 1
end = time.time()
print("Saving to binary")
graph_path = os.path.abspath(output_graph_path + ".graph")
fout = snap.TFOut(graph_path)
graph.Save(fout)
fout.Flush()
print("Saving Edge List")
edgelist_path = os.path.abspath(output_graph_path + ".edgelist.txt")
snap.SaveEdgeList(graph, edgelist_path, "Google Play Store snapshot graph, period 10/08/2017 - 07/09/2017")
print("Saving dictionary")
dict_path = os.path.abspath(output_graph_path + ".pkg_to_id_dict.json")
with open(dict_path, 'w') as f:
f.write(json.dumps(id_pkg_dict, indent=4))
print("Total time: {0}".format(end - start))
| StarcoderdataPython |
3304032 | <reponame>cetinibs/vercel
from flask import Flask, Response, request
app = Flask(__name__)
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
return Response(request.full_path, mimetype="text/plain")
if __name__ == '__main__':
app.run(debug=True, port=8002)
| StarcoderdataPython |
4804123 | <gh_stars>10-100
import json
import os
from urllib import request, parse
THRESHOLD = 3000000000000000000 # 3 ETH
FAUCET_ADDRESS = os.environ['FAUCET_ADDRESS']
INFURA_API_TOKEN = os.environ['INFURA_API_TOKEN']
GITHUB_BOT_TOKEN = os.environ['GITHUB_BOT_TOKEN']
def get_faucet_balance():
url = f'https://rinkeby.infura.io/v3/{INFURA_API_TOKEN}'
data_dict = {
'jsonrpc': '2.0',
'id': 4,
'method': 'eth_getBalance',
'params': [FAUCET_ADDRESS, 'latest']
}
headers = {'Content-Type': 'application/json'}
data = json.dumps(data_dict)
req = request.Request(url, data.encode(), headers)
with request.urlopen(req) as resp:
result_decoded = (resp.read()).decode()
result_json = json.loads(result_decoded)
# hex string to int
balance = int(result_json['result'], 16)
return balance
def submit_github_issue(balance):
url = 'https://api.github.com/repos/omgnetwork/plasma-contracts/issues'
data_dict = {
'title': 'Faucet address of CI is in low balance!',
'body': f'Please send some fund to the poor faucet: `{FAUCET_ADDRESS}`. '\
f'Current balance is: `{balance}` wei',
}
headers = {
'Accept': 'application/vnd.github.v3+json',
'authorization': f'token {GITHUB_BOT_TOKEN}'
}
data = json.dumps(data_dict)
req = request.Request(url, data.encode(), headers)
request.urlopen(req)
with request.urlopen(req) as resp:
result = (resp.read()).decode()
print('GH submission result:', result)
if __name__ == "__main__":
balance = get_faucet_balance()
print('Faucet balance:', balance, '(wei)')
if balance < THRESHOLD:
print('balance is low, submitting GH issue...')
submit_github_issue(balance)
| StarcoderdataPython |
3360226 | <reponame>zoho/zohocrm-python-sdk-2.0
from abc import ABC, abstractmethod
class DeletedRecordsHandler(ABC):
def __init__(self):
"""Creates an instance of DeletedRecordsHandler"""
pass
| StarcoderdataPython |
147138 | <reponame>lorenanda/Supermarket_MCMC_simulation<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Customer class that simulates the paths
of new customers in the supermarket.
"""
import time
import numpy as np
import pandas as pd
import cv2
import astar as at
from animation_template import SupermarketMap, MARKET
# from transition_matrix import transition_matrix
transition_matrix = pd.read_csv("./data/transition_matrix.csv")
transition_matrix.set_index("location", inplace=True)
tiles = cv2.imread('./images/tiles.png')
TILE_SIZE = 32
OFS = 50
# dest = pd.read_csv("./data/destinations.csv")
# dest.set_index("location", inplace=True)
dest = {
"entrance_x": [9, 10],
"entrance_y": [14],
"dairy_x": [2, 4, 6],
"dairy_y": [3],
"fruit_x": [5, 6, 3],
"fruit_y": [6],
"spices_x": [6, 4, 2],
"spices_y": [10],
"drinks_x": [6, 4, 2],
"drinks_y": [11],
"checkout_x": [8, 9],
"checkout_y": [7, 3]}
class Customer:
''' a single customer that moves through the supermarket in a MCMC simulation'''
# constructor
def __init__(self, id, state, transition_mat, terrain_map, image, x, y):
self.id = id
self.state = state
self.transition_mat = transition_mat
self.path = ["entrance"]
self.terrain_map = terrain_map
self.image = image
self.x = x
self.y = y
self.path_to_dest = 0
# repr
def __repr__(self):
'''returns a csv string for that customer
'''
return f'{self.id}, {self.state}, {self.path}, {self.x},{self.y}'
def is_active(self):
'''
Returns True if the customer has not reached the checkout for
the second time yet, False otherwise.
'''
if self.state == 'checkout':
return False
else:
return True
# set state
def next_state(self):
'''Propagates the customer to the next state using a weighted
random choice from the transition probabilites conditional on the
current state.
Returns nothing.
'''
# currently not using the transition probabilities
next_location = np.random.choice(
self.transition_mat.columns.values, p=self.transition_mat.loc[self.state])
self.state = next_location
self.path.append(self.state)
def draw(self, frame):
xpos = OFS + self.x * TILE_SIZE
ypos = OFS + self.y * TILE_SIZE
frame[ypos:ypos+self.image.shape[0],
xpos:xpos + self.image.shape[0]] = self.image
# overlay the Customer image / sprite onto the frame
def get_shortest_path(self, grid, dest):
start = self.path[len(self.path) - 2]
end = self.state
print(start)
print(end)
# if start == end:
# self.path_to_dest = 0
# else:
start_loc_x = np.random.choice(dest[start+"_x"])
start_loc_y = np.random.choice(dest[start+"_y"])
end_loc_x = np.random.choice(dest[end+"_x"])
end_loc_y = np.random.choice(dest[end + "_y"])
print(start_loc_x)
print(start_loc_y)
print(end_loc_x)
print(end_loc_y)
path_to_dest = at.find_path(grid, (start_loc_x, start_loc_y),
(end_loc_x, end_loc_y), at.possible_moves)
self.path_to_dest = path_to_dest
print(self.path_to_dest)
def move(self, next_p):
if next_p != 0:
self.x = next_p[1]
self.y = next_p[0]
else:
self.x = self.x
self.y = self.y
if __name__ == '__main__':
marketMap = SupermarketMap(MARKET, tiles)
cust1 = Customer(5, "entrance", transition_matrix, marketMap,
tiles[3 * 32:4 * 32, 0 * 32:1 * 32], 15, 10)
cust1.next_state()
print(cust1)
cust1.get_shortest_path(grid=at.grid, dest=dest)
print(cust1.path_to_dest)
if cust1.path_to_dest != 0:
for next_p in cust1.path_to_dest:
cust1.move(next_p)
print(cust1)
print(cust1)
| StarcoderdataPython |
3226953 | import sqlite3
import logging
from weight_unit import WeightUnit
class Database:
weight_units = []
def __init__(self, path):
self.db = sqlite3.connect(path)
cursor = self.db.cursor()
for row in cursor.execute('SELECT id, name, language_id FROM nutrition_weightunit'):
self.weight_units.append(WeightUnit(
row[1],
row[0],
row[2]
))
cursor.close()
def get_last_ingredient_id(self):
cursor = self.db.cursor()
cursor.execute('SELECT id FROM nutrition_ingredient ORDER BY id DESC LIMIT 1')
result = cursor.fetchone()
if result == None:
result = 0
else:
result = result[0]
cursor.close()
return result
def insert_ingredient(self, ingredient):
logging.info('Inserting ingredient {}'.format(ingredient.name))
query = ('INSERT INTO nutrition_ingredient (id, license_author, status, creation_date, update_date, '
'name, energy, protein, carbohydrates, carbohydrates_sugar, fat, fat_saturated, fibres, '
'sodium, language_id, license_id) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)')
parameters = (ingredient.id, ingredient.license_author, ingredient.status, ingredient.creation_date,
ingredient.update_date, ingredient.name, ingredient.energy, ingredient.protein,
ingredient.carbohydrates, ingredient.carbohydrates_sugar, ingredient.fat, ingredient.fat_saturated,
ingredient.fibres, ingredient.sodium, ingredient.language_id, ingredient.license_id)
cursor = self.db.cursor()
cursor.execute(query, parameters)
self.db.commit()
cursor.close()
def insert_weight_unit(self, weight_unit):
logging.info('Inserting weight unit {}'.format(weight_unit.name))
query = 'INSERT INTO nutrition_weightunit (name, language_id) VALUES (?, ?)'
cursor = self.db.cursor()
cursor.execute(query, (weight_unit.name, weight_unit.language_id))
weight_unit.id = cursor.lastrowid
self.weight_units.append(weight_unit)
self.db.commit()
cursor.close()
return weight_unit.id
def insert_ingredient_weight_unit(self, ingredient_weight_unit):
found = False
for weight_unit in self.weight_units:
if weight_unit.name == ingredient_weight_unit.weight_unit.name:
found = True
ingredient_weight_unit.unit_id = weight_unit.id
break
if not found:
ingredient_weight_unit.unit_id = self.insert_weight_unit(ingredient_weight_unit.weight_unit)
logging.info('Inserting ingredient weight unit {}, {}'.format(ingredient_weight_unit.unit_id, ingredient_weight_unit.ingredient_id))
query = 'INSERT INTO nutrition_ingredientweightunit (gram, amount, ingredient_id, unit_id) VALUES (?, ?, ?, ?)'
cursor = self.db.cursor()
cursor.execute(query, (ingredient_weight_unit.gram, ingredient_weight_unit.amount,
ingredient_weight_unit.ingredient_id, ingredient_weight_unit.unit_id))
self.db.commit()
cursor.close()
def close(self):
self.db.commit()
self.db.close()
| StarcoderdataPython |
1601115 | <filename>lexer_token_map.py
# Map generic token type string to STC lexer constants.
# The entry for STC_LEX_NULL is a template containing all possible token types.
from wx import stc
lexer_token_description = {
"assembly": "Assembly Code",
"character": "Character",
"comment": "Block Comment",
"commentline": "Line Comment",
"decorator": "Decorator",
"default": "Default",
"defclass": "Defined Class Name",
"defun": "Defined Function Name",
"defmodule": "Defined Module Name",
"diffadded": "Diff Added",
"diffchanged": "Diff Changed",
"diffdeleted": "Diff Deleted",
"diffheader": "Diff Header",
"diffposition": "Diff Position",
"error": "Error",
"identifier": "Identifier",
"keyword": "Keyword",
"label": "Label",
"math": "Inline Math",
"mathblock": "Math Block",
"number": "Number",
"operator": "Operator",
"preprocessor": "Preprocessor",
"reference": "Variable Reference",
"regex": "Regular Expression",
"string": "String",
"stringblock": "Block String",
"stringeol": "Unterminated String",
"stringref": "Variable Reference In String",
"symbol": "Symbol",
"tag": "Tag",
"tagattribute": "Tag Attribute",
}
lexer_tokens_html = {
"comment": [stc.STC_H_COMMENT, stc.STC_H_XCCOMMENT, stc.STC_H_SGML_1ST_PARAM_COMMENT, stc.STC_H_SGML_COMMENT, stc.STC_HPHP_COMMENT, stc.STC_HJ_COMMENT, stc.STC_HJ_COMMENTDOC],
"commentline": [stc.STC_HPHP_COMMENTLINE, stc.STC_HJ_COMMENTLINE],
"default": [stc.STC_H_DEFAULT, stc.STC_H_SGML_1ST_PARAM, stc.STC_H_SGML_DEFAULT, stc.STC_H_SGML_BLOCK_DEFAULT, stc.STC_H_SGML_SPECIAL, stc.STC_HPHP_DEFAULT, stc.STC_HJ_DEFAULT],
"error": [stc.STC_H_SGML_ERROR],
"keyword": [stc.STC_HPHP_WORD, stc.STC_HJ_KEYWORD, stc.STC_HJ_WORD],
"number": [stc.STC_H_NUMBER, stc.STC_H_VALUE, stc.STC_HPHP_NUMBER, stc.STC_HPHP_OPERATOR, stc.STC_HJ_NUMBER],
"operator": [stc.STC_HJ_SYMBOLS],
"reference": [stc.STC_HPHP_VARIABLE],
"regex": [stc.STC_HJ_REGEX],
"string": [stc.STC_H_DOUBLESTRING, stc.STC_H_SINGLESTRING, stc.STC_H_SGML_DOUBLESTRING, stc.STC_H_SGML_SIMPLESTRING,stc.STC_HPHP_HSTRING, stc.STC_HPHP_SIMPLESTRING, stc.STC_HJ_DOUBLESTRING, stc.STC_HJ_SINGLESTRING],
"stringblock": [stc.STC_H_CDATA],
"stringeol": [stc.STC_HJ_STRINGEOL],
"stringref": [stc.STC_HPHP_COMPLEX_VARIABLE, stc.STC_HPHP_HSTRING_VARIABLE],
"symbol": [stc.STC_H_ENTITY, stc.STC_H_SGML_ENTITY],
"tag": [stc.STC_H_ASP, stc.STC_H_ASPAT, stc.STC_H_OTHER, stc.STC_H_QUESTION, stc.STC_H_TAG, stc.STC_H_TAGEND, stc.STC_H_TAGUNKNOWN, stc.STC_H_SCRIPT, stc.STC_H_XMLSTART, stc.STC_H_XMLEND, stc.STC_H_SGML_COMMAND],
"tagattribute": [stc.STC_H_ATTRIBUTE, stc.STC_H_ATTRIBUTEUNKNOWN],
}
lexer_tokens_basic = {
"assembly": [stc.STC_B_ASM],
"comment": [stc.STC_B_COMMENT],
"default": [stc.STC_B_DEFAULT],
"identifier": [stc.STC_B_IDENTIFIER, stc.STC_B_CONSTANT],
"keyword": [stc.STC_B_KEYWORD, stc.STC_B_KEYWORD2, stc.STC_B_KEYWORD3, stc.STC_B_KEYWORD4],
"label": [stc.STC_B_LABEL],
"number": [stc.STC_B_NUMBER, stc.STC_B_BINNUMBER, stc.STC_B_HEXNUMBER, stc.STC_B_DATE],
"operator": [stc.STC_B_OPERATOR],
"preprocessor": [stc.STC_B_PREPROCESSOR],
"string": [stc.STC_B_STRING],
"stringeol": [stc.STC_B_STRINGEOL, stc.STC_B_ERROR],
}
lexer_tokens_eiffel = {
"character": [stc.STC_EIFFEL_CHARACTER],
"commentline": [stc.STC_EIFFEL_COMMENTLINE],
"default": [stc.STC_EIFFEL_DEFAULT],
"identifier": [stc.STC_EIFFEL_IDENTIFIER],
"keyword": [stc.STC_EIFFEL_WORD],
"number": [stc.STC_EIFFEL_NUMBER],
"operator": [stc.STC_EIFFEL_OPERATOR],
"string": [stc.STC_EIFFEL_STRING],
"stringeol": [stc.STC_EIFFEL_STRINGEOL],
}
lexer_tokens_matlab = {
"comment": [stc.STC_MATLAB_COMMENT],
"default": [stc.STC_MATLAB_DEFAULT],
"identifier": [stc.STC_MATLAB_IDENTIFIER],
"keyword": [stc.STC_MATLAB_KEYWORD],
"number": [stc.STC_MATLAB_NUMBER],
"operator": [stc.STC_MATLAB_OPERATOR],
"preprocessor": [stc.STC_MATLAB_COMMAND],
"string": [stc.STC_MATLAB_DOUBLEQUOTESTRING, stc.STC_MATLAB_STRING],
}
lexer_tokens_fortran = {
"comment": [stc.STC_F_COMMENT],
"default": [stc.STC_F_DEFAULT, stc.STC_F_CONTINUATION],
"identifier": [stc.STC_F_IDENTIFIER],
"keyword": [stc.STC_F_WORD, stc.STC_F_WORD2, stc.STC_F_WORD3],
"label": [stc.STC_F_LABEL],
"number": [stc.STC_F_NUMBER],
"operator": [stc.STC_F_OPERATOR, stc.STC_F_OPERATOR2],
"preprocessor": [stc.STC_F_PREPROCESSOR],
"string": [stc.STC_F_STRING1, stc.STC_F_STRING2],
"stringeol": [stc.STC_F_STRINGEOL],
}
lexer_token_map = {
stc.STC_LEX_NULL: {
"assembly": [],
"character": [],
"comment": [],
"commentline": [],
"decorator": [],
"default": [],
"defclass": [],
"defun": [],
"defmodule": [],
"diffadded": [],
"diffchanged": [],
"diffdeleted": [],
"diffheader": [],
"diffposition": [],
"error": [],
"identifier": [],
"keyword": [],
"label": [],
"math": [],
"mathblock": [],
"number": [],
"operator": [],
"preprocessor": [],
"reference": [],
"regex": [],
"string": [],
"stringblock": [],
"stringeol": [],
"stringref": [],
"symbol": [],
"tag": [],
"tagattribute": [],
},
stc.STC_LEX_PYTHON: {
"character": [stc.STC_P_CHARACTER],
"comment": [stc.STC_P_COMMENTBLOCK],
"commentline": [stc.STC_P_COMMENTLINE],
"decorator": [stc.STC_P_DECORATOR],
"default": [stc.STC_P_DEFAULT],
"defclass": [stc.STC_P_CLASSNAME],
"defun": [stc.STC_P_DEFNAME],
"identifier": [stc.STC_P_IDENTIFIER],
"keyword": [stc.STC_P_WORD, stc.STC_P_WORD2],
"number": [stc.STC_P_NUMBER],
"operator": [stc.STC_P_OPERATOR],
"string": [stc.STC_P_STRING],
"stringblock": [stc.STC_P_TRIPLE, stc.STC_P_TRIPLEDOUBLE],
"stringeol": [stc.STC_P_STRINGEOL],
},
stc.STC_LEX_CPP: {
"character": [stc.STC_C_CHARACTER],
"comment": [stc.STC_C_COMMENT, stc.STC_C_COMMENTDOC, stc.STC_C_COMMENTDOCKEYWORD, stc.STC_C_COMMENTDOCKEYWORDERROR, stc.STC_C_PREPROCESSORCOMMENT],
"commentline": [stc.STC_C_COMMENTLINE, stc.STC_C_COMMENTLINEDOC],
"default": [stc.STC_C_DEFAULT],
"identifier": [stc.STC_C_GLOBALCLASS, stc.STC_C_IDENTIFIER],
"keyword": [stc.STC_C_WORD, stc.STC_C_WORD2],
"number": [stc.STC_C_NUMBER, stc.STC_C_UUID],
"operator": [stc.STC_C_OPERATOR],
"preprocessor": [stc.STC_C_PREPROCESSOR],
"regex": [stc.STC_C_REGEX],
"string": [stc.STC_C_HASHQUOTEDSTRING, stc.STC_C_STRING, stc.STC_C_STRINGRAW],
"stringblock": [stc.STC_C_TRIPLEVERBATIM, stc.STC_C_VERBATIM],
"stringeol": [stc.STC_C_STRINGEOL],
},
stc.STC_LEX_HTML: lexer_tokens_html,
stc.STC_LEX_XML: lexer_tokens_html,
stc.STC_LEX_PHPSCRIPT: lexer_tokens_html,
stc.STC_LEX_PERL: {
"character": [stc.STC_PL_CHARACTER],
"commentline": [stc.STC_PL_COMMENTLINE],
"default": [stc.STC_PL_DEFAULT],
"defun": [stc.STC_PL_SUB_PROTOTYPE],
"identifier": [stc.STC_PL_IDENTIFIER],
"keyword": [stc.STC_PL_WORD],
"number": [stc.STC_PL_NUMBER],
"operator": [stc.STC_PL_OPERATOR, stc.STC_PL_PUNCTUATION],
"preprocessor": [stc.STC_PL_PREPROCESSOR],
"reference": [stc.STC_PL_ARRAY, stc.STC_PL_HASH, stc.STC_PL_SCALAR, stc.STC_PL_SYMBOLTABLE],
"regex": [stc.STC_PL_REGEX, stc.STC_PL_REGEX_VAR],
"string": [stc.STC_PL_BACKTICKS, stc.STC_PL_STRING, stc.STC_PL_REGSUBST, stc.STC_PL_FORMAT, stc.STC_PL_FORMAT_IDENT, stc.STC_PL_LONGQUOTE, stc.STC_PL_STRING_Q, stc.STC_PL_STRING_QQ, stc.STC_PL_STRING_QR, stc.STC_PL_STRING_QW, stc.STC_PL_STRING_QX, stc.STC_PL_XLAT],
"stringblock": [stc.STC_PL_DATASECTION, stc.STC_PL_HERE_DELIM, stc.STC_PL_HERE_Q, stc.STC_PL_HERE_QQ, stc.STC_PL_HERE_QX, stc.STC_PL_POD, stc.STC_PL_POD_VERB],
"stringeol": [stc.STC_PL_ERROR],
"stringref": [stc.STC_PL_BACKTICKS_VAR, stc.STC_PL_STRING_QQ_VAR, stc.STC_PL_STRING_QR_VAR, stc.STC_PL_STRING_QX_VAR, stc.STC_PL_REGSUBST_VAR, stc.STC_PL_STRING_VAR, stc.STC_PL_HERE_QQ_VAR, stc.STC_PL_HERE_QX_VAR],
},
stc.STC_LEX_SQL: {
"character": [stc.STC_SQL_CHARACTER],
"comment": [stc.STC_SQL_COMMENT, stc.STC_SQL_COMMENTDOC, stc.STC_SQL_COMMENTDOCKEYWORD, stc.STC_SQL_COMMENTDOCKEYWORDERROR, stc.STC_SQL_SQLPLUS_COMMENT],
"commentline": [stc.STC_SQL_COMMENTLINE, stc.STC_SQL_COMMENTLINEDOC],
"default": [stc.STC_SQL_DEFAULT],
"identifier": [stc.STC_SQL_IDENTIFIER],
"keyword": [stc.STC_SQL_USER1, stc.STC_SQL_USER2, stc.STC_SQL_USER3, stc.STC_SQL_USER4, stc.STC_SQL_WORD, stc.STC_SQL_WORD2],
"number": [stc.STC_SQL_NUMBER],
"operator": [stc.STC_SQL_OPERATOR],
"preprocessor": [stc.STC_SQL_SQLPLUS, stc.STC_SQL_SQLPLUS_PROMPT],
"string": [stc.STC_SQL_QUOTEDIDENTIFIER, stc.STC_SQL_STRING],
},
stc.STC_LEX_BLITZBASIC: lexer_tokens_basic,
stc.STC_LEX_PUREBASIC: lexer_tokens_basic,
stc.STC_LEX_FREEBASIC: lexer_tokens_basic,
stc.STC_LEX_POWERBASIC: lexer_tokens_basic,
stc.STC_LEX_VB: lexer_tokens_basic,
stc.STC_LEX_VBSCRIPT: lexer_tokens_basic,
stc.STC_LEX_MAKEFILE: {
"comment": [stc.STC_MAKE_COMMENT],
"default": [stc.STC_MAKE_DEFAULT],
"identifier": [stc.STC_MAKE_IDENTIFIER],
"label": [stc.STC_MAKE_TARGET],
"operator": [stc.STC_MAKE_OPERATOR],
"preprocessor": [stc.STC_MAKE_PREPROCESSOR],
"stringeol": [stc.STC_MAKE_IDEOL],
},
stc.STC_LEX_BATCH: {
"comment": [stc.STC_BAT_COMMENT],
"default": [stc.STC_BAT_DEFAULT],
"identifier": [stc.STC_BAT_IDENTIFIER],
"keyword": [stc.STC_BAT_COMMAND, stc.STC_BAT_WORD, stc.STC_BAT_HIDE],
"label": [stc.STC_BAT_LABEL],
"operator": [stc.STC_BAT_OPERATOR],
},
stc.STC_LEX_LATEX: {
"comment": [stc.STC_L_COMMENT2],
"commentline": [stc.STC_L_COMMENT],
"default": [stc.STC_L_DEFAULT],
"error": [stc.STC_L_ERROR],
"math": [stc.STC_L_MATH],
"mathblock": [stc.STC_L_MATH2],
"reference": [stc.STC_L_SPECIAL],
"stringblock": [stc.STC_L_VERBATIM],
"tag": [stc.STC_L_COMMAND, stc.STC_L_SHORTCMD, stc.STC_L_TAG, stc.STC_L_TAG2],
"tagattribute": [stc.STC_L_CMDOPT],
},
stc.STC_LEX_LUA: {
"character": [stc.STC_LUA_CHARACTER],
"comment": [stc.STC_LUA_COMMENT, stc.STC_LUA_COMMENTDOC],
"commentline": [stc.STC_LUA_COMMENTLINE],
"default": [stc.STC_LUA_DEFAULT],
"identifier": [stc.STC_LUA_IDENTIFIER],
"keyword": [stc.STC_LUA_WORD, stc.STC_LUA_WORD2, stc.STC_LUA_WORD3, stc.STC_LUA_WORD4, stc.STC_LUA_WORD5, stc.STC_LUA_WORD6, stc.STC_LUA_WORD7, stc.STC_LUA_WORD8],
"label": [stc.STC_LUA_LABEL],
"number": [stc.STC_LUA_NUMBER],
"operator": [stc.STC_LUA_OPERATOR],
"preprocessor": [stc.STC_LUA_PREPROCESSOR],
"string": [stc.STC_LUA_LITERALSTRING, stc.STC_LUA_STRING],
"stringeol": [stc.STC_LUA_STRINGEOL],
},
stc.STC_LEX_DIFF: {
"comment": [stc.STC_DIFF_COMMENT],
"default": [stc.STC_DIFF_DEFAULT],
"diffadded": [stc.STC_DIFF_ADDED],
"diffchanged": [stc.STC_DIFF_CHANGED],
"diffdeleted": [stc.STC_DIFF_DELETED],
"diffheader": [stc.STC_DIFF_HEADER, stc.STC_DIFF_COMMAND],
"diffposition": [stc.STC_DIFF_POSITION],
},
stc.STC_LEX_PASCAL: {
"assembly": [stc.STC_PAS_ASM],
"character": [stc.STC_PAS_CHARACTER],
"comment": [stc.STC_PAS_COMMENT, stc.STC_PAS_COMMENT2],
"commentline": [stc.STC_PAS_COMMENTLINE],
"default": [stc.STC_PAS_DEFAULT],
"identifier": [stc.STC_PAS_IDENTIFIER],
"keyword": [stc.STC_PAS_WORD],
"number": [stc.STC_PAS_HEXNUMBER, stc.STC_PAS_NUMBER],
"operator": [stc.STC_PAS_OPERATOR],
"preprocessor": [stc.STC_PAS_PREPROCESSOR, stc.STC_PAS_PREPROCESSOR2],
"string": [stc.STC_PAS_STRING],
"stringeol": [stc.STC_PAS_STRINGEOL],
},
stc.STC_LEX_ADA: {
"character": [stc.STC_ADA_CHARACTER],
"commentline": [stc.STC_ADA_COMMENTLINE],
"default": [stc.STC_ADA_DEFAULT],
"error": [stc.STC_ADA_ILLEGAL],
"identifier": [stc.STC_ADA_IDENTIFIER],
"keyword": [stc.STC_ADA_WORD],
"label": [stc.STC_ADA_LABEL],
"number": [stc.STC_ADA_NUMBER],
"operator": [stc.STC_ADA_DELIMITER],
"stringblock": [stc.STC_ADA_STRING],
"stringeol": [stc.STC_ADA_CHARACTEREOL, stc.STC_ADA_STRINGEOL],
},
stc.STC_LEX_LISP: {
"commentline": [stc.STC_LISP_COMMENT],
"default": [stc.STC_LISP_DEFAULT],
"identifier": [stc.STC_LISP_IDENTIFIER],
"keyword": [stc.STC_LISP_KEYWORD, stc.STC_LISP_KEYWORD_KW],
"number": [stc.STC_LISP_NUMBER],
"operator": [stc.STC_LISP_OPERATOR],
"string": [stc.STC_LISP_STRING],
"stringeol": [stc.STC_LISP_STRINGEOL],
"symbol": [stc.STC_LISP_SYMBOL],
},
stc.STC_LEX_RUBY: {
"character": [stc.STC_RB_CHARACTER],
"commentline": [stc.STC_RB_COMMENTLINE],
"default": [stc.STC_RB_DEFAULT],
"defclass": [stc.STC_RB_CLASSNAME],
"defun": [stc.STC_RB_DEFNAME],
"defmodule": [stc.STC_RB_MODULE_NAME],
"error": [stc.STC_RB_ERROR],
"identifier": [stc.STC_RB_GLOBAL, stc.STC_RB_IDENTIFIER],
"keyword": [stc.STC_RB_STDERR, stc.STC_RB_STDIN, stc.STC_RB_STDOUT, stc.STC_RB_WORD, stc.STC_RB_WORD_DEMOTED],
"number": [stc.STC_RB_NUMBER],
"operator": [stc.STC_RB_OPERATOR],
"reference": [stc.STC_RB_CLASS_VAR, stc.STC_RB_INSTANCE_VAR],
"regex": [stc.STC_RB_REGEX],
"string": [stc.STC_RB_BACKTICKS, stc.STC_RB_STRING, stc.STC_RB_STRING_Q, stc.STC_RB_STRING_QQ, stc.STC_RB_STRING_QR, stc.STC_RB_STRING_QW, stc.STC_RB_STRING_QX],
"stringblock": [stc.STC_RB_DATASECTION, stc.STC_RB_HERE_DELIM, stc.STC_RB_HERE_Q, stc.STC_RB_HERE_QQ, stc.STC_RB_HERE_QX, stc.STC_RB_POD],
"symbol": [stc.STC_RB_SYMBOL],
},
stc.STC_LEX_EIFFEL: lexer_tokens_eiffel,
stc.STC_LEX_EIFFELKW: lexer_tokens_eiffel,
stc.STC_LEX_TCL: {
"comment": [stc.STC_TCL_BLOCK_COMMENT, stc.STC_TCL_COMMENT, stc.STC_TCL_COMMENT_BOX],
"commentline": [stc.STC_TCL_COMMENTLINE],
"default": [stc.STC_TCL_DEFAULT, stc.STC_TCL_EXPAND, stc.STC_TCL_MODIFIER],
"identifier": [stc.STC_TCL_IDENTIFIER],
"keyword": [stc.STC_TCL_WORD, stc.STC_TCL_WORD2, stc.STC_TCL_WORD3, stc.STC_TCL_WORD4, stc.STC_TCL_WORD5, stc.STC_TCL_WORD6, stc.STC_TCL_WORD7, stc.STC_TCL_WORD8],
"number": [stc.STC_TCL_NUMBER],
"operator": [stc.STC_TCL_OPERATOR],
"string": [stc.STC_TCL_IN_QUOTE, stc.STC_TCL_WORD_IN_QUOTE],
"stringref": [stc.STC_TCL_SUBSTITUTION, stc.STC_TCL_SUB_BRACE],
},
stc.STC_LEX_MATLAB: lexer_tokens_matlab,
stc.STC_LEX_OCTAVE: lexer_tokens_matlab,
stc.STC_LEX_ASM: {
"character": [stc.STC_ASM_CHARACTER],
"comment": [stc.STC_ASM_COMMENT, stc.STC_ASM_COMMENTDIRECTIVE],
"commentline": [stc.STC_ASM_COMMENTBLOCK],
"default": [stc.STC_ASM_DEFAULT],
"identifier": [stc.STC_ASM_IDENTIFIER],
"keyword": [stc.STC_ASM_CPUINSTRUCTION, stc.STC_ASM_EXTINSTRUCTION, stc.STC_ASM_MATHINSTRUCTION],
"number": [stc.STC_ASM_NUMBER],
"operator": [stc.STC_ASM_OPERATOR],
"preprocessor": [stc.STC_ASM_DIRECTIVE, stc.STC_ASM_DIRECTIVEOPERAND],
"reference": [stc.STC_ASM_REGISTER],
"string": [stc.STC_ASM_STRING],
"stringeol": [stc.STC_ASM_STRINGEOL],
},
stc.STC_LEX_FORTRAN: lexer_tokens_fortran,
stc.STC_LEX_F77: lexer_tokens_fortran,
stc.STC_LEX_CSS: {
"comment": [stc.STC_CSS_COMMENT, stc.STC_CSS_IMPORTANT],
"default": [stc.STC_CSS_DEFAULT],
"identifier": [stc.STC_CSS_IDENTIFIER2, stc.STC_CSS_IDENTIFIER3, stc.STC_CSS_TAG, stc.STC_CSS_UNKNOWN_IDENTIFIER, stc.STC_CSS_PSEUDOCLASS, stc.STC_CSS_PSEUDOELEMENT, stc.STC_CSS_EXTENDED_IDENTIFIER, stc.STC_CSS_EXTENDED_PSEUDOCLASS, stc.STC_CSS_EXTENDED_PSEUDOELEMENT, stc.STC_CSS_UNKNOWN_PSEUDOCLASS, stc.STC_CSS_VARIABLE],
"keyword": [stc.STC_CSS_IDENTIFIER],
"number": [stc.STC_CSS_VALUE],
"operator": [stc.STC_CSS_OPERATOR],
"preprocessor": [stc.STC_CSS_DIRECTIVE, stc.STC_CSS_MEDIA],
"reference": [stc.STC_CSS_CLASS, stc.STC_CSS_ID],
"string": [stc.STC_CSS_SINGLESTRING, stc.STC_CSS_DOUBLESTRING],
"tagattribute": [stc.STC_CSS_ATTRIBUTE],
},
stc.STC_LEX_PS: {
"commentline": [stc.STC_PS_COMMENT, stc.STC_PS_DSC_COMMENT, stc.STC_PS_DSC_VALUE],
"default": [stc.STC_PS_DEFAULT],
"error": [stc.STC_PS_BADSTRINGCHAR],
"identifier": [stc.STC_PS_NAME, stc.STC_PS_LITERAL, stc.STC_PS_IMMEVAL],
"keyword": [stc.STC_PS_KEYWORD],
"number": [stc.STC_PS_NUMBER],
"operator": [stc.STC_PS_PAREN_ARRAY, stc.STC_PS_PAREN_DICT, stc.STC_PS_PAREN_PROC],
"string": [stc.STC_PS_BASE85STRING, stc.STC_PS_HEXSTRING, stc.STC_PS_TEXT],
},
stc.STC_LEX_YAML: {
"commentline": [stc.STC_YAML_COMMENT, stc.STC_YAML_DOCUMENT],
"default": [stc.STC_YAML_DEFAULT],
"error": [stc.STC_YAML_ERROR],
"identifier": [stc.STC_YAML_IDENTIFIER],
"keyword": [stc.STC_YAML_KEYWORD],
"number": [stc.STC_YAML_NUMBER],
"operator": [stc.STC_YAML_OPERATOR],
"reference": [stc.STC_YAML_REFERENCE],
"string": [stc.STC_YAML_TEXT],
},
stc.STC_LEX_ERLANG: {
"character": [stc.STC_ERLANG_CHARACTER],
"commentline": [stc.STC_ERLANG_COMMENT, stc.STC_ERLANG_COMMENT_DOC, stc.STC_ERLANG_COMMENT_DOC_MACRO, stc.STC_ERLANG_COMMENT_FUNCTION, stc.STC_ERLANG_COMMENT_MODULE],
"default": [stc.STC_ERLANG_DEFAULT],
"identifier": [stc.STC_ERLANG_ATOM, stc.STC_ERLANG_BIFS, stc.STC_ERLANG_FUNCTION_NAME, stc.STC_ERLANG_MACRO, stc.STC_ERLANG_NODE_NAME, stc.STC_ERLANG_RECORD, stc.STC_ERLANG_VARIABLE, stc.STC_ERLANG_UNKNOWN],
"keyword": [stc.STC_ERLANG_KEYWORD],
"number": [stc.STC_ERLANG_NUMBER],
"operator": [stc.STC_ERLANG_OPERATOR],
"preprocessor": [stc.STC_ERLANG_MODULES, stc.STC_ERLANG_MODULES_ATT, stc.STC_ERLANG_PREPROC],
"string": [stc.STC_ERLANG_ATOM_QUOTED, stc.STC_ERLANG_MACRO_QUOTED, stc.STC_ERLANG_NODE_NAME_QUOTED, stc.STC_ERLANG_RECORD_QUOTED, stc.STC_ERLANG_STRING],
},
stc.STC_LEX_VERILOG: {
"comment": [stc.STC_V_COMMENT],
"commentline": [stc.STC_V_COMMENTLINE, stc.STC_V_COMMENTLINEBANG],
"default": [stc.STC_V_DEFAULT],
"identifier": [stc.STC_V_IDENTIFIER, stc.STC_V_USER],
"keyword": [stc.STC_V_WORD, stc.STC_V_WORD2, stc.STC_V_WORD3],
"number": [stc.STC_V_NUMBER],
"operator": [stc.STC_V_OPERATOR],
"preprocessor": [stc.STC_V_PREPROCESSOR],
"string": [stc.STC_V_STRING],
"stringeol": [stc.STC_V_STRINGEOL],
},
stc.STC_LEX_BASH: {
"character": [stc.STC_SH_CHARACTER],
"commentline": [stc.STC_SH_COMMENTLINE],
"default": [stc.STC_SH_DEFAULT],
"error": [stc.STC_SH_ERROR],
"identifier": [stc.STC_SH_IDENTIFIER, stc.STC_SH_PARAM],
"keyword": [stc.STC_SH_WORD],
"number": [stc.STC_SH_NUMBER],
"operator": [stc.STC_SH_OPERATOR],
"reference": [stc.STC_SH_SCALAR],
"string": [stc.STC_SH_BACKTICKS, stc.STC_SH_STRING],
"stringblock": [stc.STC_SH_HERE_DELIM, stc.STC_SH_HERE_Q],
},
stc.STC_LEX_VHDL: {
"comment": [stc.STC_VHDL_COMMENT],
"commentline": [stc.STC_VHDL_COMMENTLINEBANG],
"default": [stc.STC_VHDL_DEFAULT],
"identifier": [stc.STC_VHDL_ATTRIBUTE, stc.STC_VHDL_IDENTIFIER, stc.STC_VHDL_STDFUNCTION, stc.STC_VHDL_STDPACKAGE, stc.STC_VHDL_STDTYPE, stc.STC_VHDL_USERWORD],
"keyword": [stc.STC_VHDL_KEYWORD],
"number": [stc.STC_VHDL_NUMBER],
"operator": [stc.STC_VHDL_OPERATOR, stc.STC_VHDL_STDOPERATOR],
"string": [stc.STC_VHDL_STRING],
"stringeol": [stc.STC_VHDL_STRINGEOL],
},
stc.STC_LEX_CAML: {
"character": [stc.STC_CAML_CHAR],
"comment": [stc.STC_CAML_COMMENT, stc.STC_CAML_COMMENT1, stc.STC_CAML_COMMENT2, stc.STC_CAML_COMMENT3],
"default": [stc.STC_CAML_DEFAULT],
"identifier": [stc.STC_CAML_IDENTIFIER, stc.STC_CAML_TAGNAME],
"keyword": [stc.STC_CAML_KEYWORD, stc.STC_CAML_KEYWORD2, stc.STC_CAML_KEYWORD3],
"number": [stc.STC_CAML_NUMBER],
"operator": [stc.STC_CAML_OPERATOR],
"preprocessor": [stc.STC_CAML_LINENUM],
"string": [stc.STC_CAML_STRING, stc.STC_CAML_WHITE],
},
stc.STC_LEX_HASKELL: {
"character": [stc.STC_HA_CHARACTER],
"comment": [stc.STC_HA_COMMENTBLOCK, stc.STC_HA_COMMENTBLOCK2, stc.STC_HA_COMMENTBLOCK3],
"commentline": [stc.STC_HA_COMMENTLINE],
"default": [stc.STC_HA_CAPITAL, stc.STC_HA_DEFAULT, stc.STC_HA_IMPORT],
"defmodule": [stc.STC_HA_CLASS, stc.STC_HA_DATA, stc.STC_HA_INSTANCE, stc.STC_HA_MODULE],
"identifier": [stc.STC_HA_IDENTIFIER],
"keyword": [stc.STC_HA_KEYWORD],
"number": [stc.STC_HA_NUMBER],
"operator": [stc.STC_HA_OPERATOR],
"string": [stc.STC_HA_STRING],
},
stc.STC_LEX_SMALLTALK: {
"character": [stc.STC_ST_CHARACTER],
"commentline": [stc.STC_ST_COMMENT],
"default": [stc.STC_ST_DEFAULT],
"identifier": [stc.STC_ST_GLOBAL, stc.STC_ST_KWSEND, stc.STC_ST_SPECIAL, stc.STC_ST_SPEC_SEL],
"keyword": [stc.STC_ST_BOOL, stc.STC_ST_NIL, stc.STC_ST_RETURN, stc.STC_ST_SELF, stc.STC_ST_SUPER],
"number": [stc.STC_ST_BINARY, stc.STC_ST_NUMBER],
"operator": [stc.STC_ST_ASSIGN],
"string": [stc.STC_ST_STRING],
"symbol": [stc.STC_ST_SYMBOL],
},
stc.STC_LEX_D: {
"character": [stc.STC_D_CHARACTER],
"comment": [stc.STC_D_COMMENT, stc.STC_D_COMMENTDOC, stc.STC_D_COMMENTDOCKEYWORD, stc.STC_D_COMMENTDOCKEYWORDERROR, stc.STC_D_COMMENTNESTED],
"commentline": [stc.STC_D_COMMENTLINE, stc.STC_D_COMMENTLINEDOC],
"default": [stc.STC_D_DEFAULT],
"defclass": [stc.STC_D_TYPEDEF],
"identifier": [stc.STC_D_IDENTIFIER],
"keyword": [stc.STC_D_WORD, stc.STC_D_WORD2, stc.STC_D_WORD3, stc.STC_D_WORD5, stc.STC_D_WORD6, stc.STC_D_WORD7],
"number": [stc.STC_D_NUMBER],
"operator": [stc.STC_D_OPERATOR],
"string": [stc.STC_D_STRING, stc.STC_D_STRINGB, stc.STC_D_STRINGR],
"stringeol": [stc.STC_D_STRINGEOL],
},
stc.STC_LEX_CMAKE: {
"comment": [stc.STC_CMAKE_COMMENT],
"default": [stc.STC_CMAKE_DEFAULT],
"identifier": [stc.STC_CMAKE_USERDEFINED],
"keyword": [stc.STC_CMAKE_COMMANDS, stc.STC_CMAKE_FOREACHDEF, stc.STC_CMAKE_IFDEFINEDEF, stc.STC_CMAKE_MACRODEF, stc.STC_CMAKE_WHILEDEF],
"number": [stc.STC_CMAKE_NUMBER],
"reference": [stc.STC_CMAKE_PARAMETERS, stc.STC_CMAKE_VARIABLE],
"string": [stc.STC_CMAKE_STRINGDQ, stc.STC_CMAKE_STRINGLQ, stc.STC_CMAKE_STRINGRQ],
"stringref": [stc.STC_CMAKE_STRINGVAR],
},
stc.STC_LEX_R: {
"comment": [stc.STC_R_COMMENT],
"default": [stc.STC_R_DEFAULT],
"identifier": [stc.STC_R_IDENTIFIER],
"keyword": [stc.STC_R_BASEKWORD, stc.STC_R_KWORD, stc.STC_R_OTHERKWORD],
"number": [stc.STC_R_NUMBER],
"operator": [stc.STC_R_INFIX, stc.STC_R_OPERATOR],
"string": [stc.STC_R_STRING, stc.STC_R_STRING2],
"stringeol": [stc.STC_R_INFIXEOL],
},
stc.STC_LEX_SML: {
"character": [stc.STC_SML_CHAR],
"comment": [stc.STC_SML_COMMENT, stc.STC_SML_COMMENT1, stc.STC_SML_COMMENT2, stc.STC_SML_COMMENT3],
"default": [stc.STC_SML_DEFAULT],
"identifier": [stc.STC_SML_IDENTIFIER, stc.STC_SML_TAGNAME],
"keyword": [stc.STC_SML_KEYWORD, stc.STC_SML_KEYWORD2, stc.STC_SML_KEYWORD3],
"number": [stc.STC_SML_NUMBER],
"operator": [stc.STC_SML_OPERATOR],
"preprocessor": [stc.STC_SML_LINENUM],
"string": [stc.STC_SML_STRING],
},
stc.STC_LEX_MODULA: {
"character": [stc.STC_MODULA_CHAR, stc.STC_MODULA_CHARSPEC],
"comment": [stc.STC_MODULA_COMMENT, stc.STC_MODULA_DOXYCOMM, stc.STC_MODULA_DOXYKEY],
"default": [stc.STC_MODULA_DEFAULT],
"defun": [stc.STC_MODULA_PROC],
"keyword": [stc.STC_MODULA_KEYWORD, stc.STC_MODULA_RESERVED],
"number": [stc.STC_MODULA_BASENUM, stc.STC_MODULA_FLOAT, stc.STC_MODULA_NUMBER],
"operator": [stc.STC_MODULA_OPERATOR],
"preprocessor": [stc.STC_MODULA_PRAGMA, stc.STC_MODULA_PRGKEY],
"string": [stc.STC_MODULA_STRING, stc.STC_MODULA_STRSPEC],
"stringeol": [stc.STC_MODULA_BADSTR],
},
stc.STC_LEX_COFFEESCRIPT: {
"character": [stc.STC_COFFEESCRIPT_CHARACTER],
"comment": [stc.STC_COFFEESCRIPT_COMMENT, stc.STC_COFFEESCRIPT_COMMENTBLOCK, stc.STC_COFFEESCRIPT_COMMENTDOC, stc.STC_COFFEESCRIPT_COMMENTDOCKEYWORD, stc.STC_COFFEESCRIPT_COMMENTDOCKEYWORDERROR],
"commentline": [stc.STC_COFFEESCRIPT_COMMENTLINE, stc.STC_COFFEESCRIPT_COMMENTLINEDOC, stc.STC_COFFEESCRIPT_VERBOSE_REGEX_COMMENT],
"default": [stc.STC_COFFEESCRIPT_DEFAULT],
"identifier": [stc.STC_COFFEESCRIPT_GLOBALCLASS, stc.STC_COFFEESCRIPT_IDENTIFIER],
"keyword": [stc.STC_COFFEESCRIPT_WORD, stc.STC_COFFEESCRIPT_WORD2],
"number": [stc.STC_COFFEESCRIPT_NUMBER, stc.STC_COFFEESCRIPT_UUID],
"operator": [stc.STC_COFFEESCRIPT_OPERATOR],
"preprocessor": [stc.STC_COFFEESCRIPT_PREPROCESSOR],
"regex": [stc.STC_COFFEESCRIPT_REGEX, stc.STC_COFFEESCRIPT_VERBOSE_REGEX],
"string": [stc.STC_COFFEESCRIPT_HASHQUOTEDSTRING, stc.STC_COFFEESCRIPT_STRING, stc.STC_COFFEESCRIPT_STRINGRAW],
"stringblock": [stc.STC_COFFEESCRIPT_TRIPLEVERBATIM, stc.STC_COFFEESCRIPT_VERBATIM],
"stringeol": [stc.STC_COFFEESCRIPT_STRINGEOL],
},
}
| StarcoderdataPython |
4821615 | """
The main functionality for the Telegram bot
"""
from libdev.cfg import cfg
from libdev.gen import generate
from libdev.aws import upload_file
from lib._variables import (
languages, languages_chosen, tokens,
user_ids, user_logins, user_statuses, user_names, user_titles,
)
from lib._api import auth, api
from lib.reports import report
__all__ = (
'cfg',
'generate',
'languages', 'languages_chosen', 'tokens',
'user_ids', 'user_logins', 'user_statuses', 'user_names', 'user_titles',
'auth', 'api',
'upload_file',
'report',
)
| StarcoderdataPython |
1684392 | <filename>plotting.py
from matplotlib import pyplot as plt
from matplotlib.patches import Circle
import matplotlib.lines as lines
from math import sin
from math import cos
from math import radians
#--- FUNCTIONS ----------------------------------------------------------------+
def plot_organism(x1, y1, theta, ax):
circle = Circle([x1,y1], 0.05, edgecolor = 'g', facecolor = 'lightgreen', zorder=8)
ax.add_artist(circle)
edge = Circle([x1,y1], 0.05, facecolor='None', edgecolor = 'darkgreen', zorder=8)
ax.add_artist(edge)
tail_len = 0.075
x2 = cos(radians(theta)) * tail_len + x1
y2 = sin(radians(theta)) * tail_len + y1
ax.add_line(lines.Line2D([x1,x2],[y1,y2], color='darkgreen', linewidth=1, zorder=10))
pass
def plot_food(x1, y1, ax):
circle = Circle([x1,y1], 0.03, edgecolor = 'darkslateblue', facecolor = 'mediumslateblue', zorder=5)
ax.add_artist(circle)
pass
#--- END ----------------------------------------------------------------------+
| StarcoderdataPython |
1676984 | <reponame>crappyoats/eclipse_vision
# <NAME>
# Illinois State Geological Survey, University of Illinois
# 2015-05-31
from __future__ import print_function
from PRTEntry import PRTEntry
from collections import defaultdict
from tqdm import tqdm
import sys
class PRTController(object):
"""class for parsing PRT file output from Eclipse/Petrel
:self.prt_file: PRT file string
:self.runs: defaultdict of lists containing the VTK grids for each
timestep, referrenced by search term/poro/perms
:self.f_len: len of PRT file. Used to get better count for progress bar.
:self.x, y, z: dimensions of simulation grid"""
def __init__(self, prt):
"""initializes all variables and runs self._get_len. requires prt string
as arg"""
self.prt_file = prt
self.runs = defaultdict(list)
self.f_len = self._get_len(prt)
def _get_len(self, fname):
"""gets number of lines in prt. prints periods to show progress."""
with open(fname) as f:
sys.stdout.write('Opening files...')
sys.stdout.flush()
for i, l in enumerate(f):
if i % 1000000 == 0:
sys.stdout.write('.')
sys.stdout.flush()
pass
print("")
return i + 1
def set_dims(self, dim_tup):
self.x, self.y, self.z = dim_tup
def add_runs(self, terms):
"""searches for term and creates PRTEntry for all data in term"""
opened = open(self.prt_file)
with tqdm(total=self.f_len) as pbar:
pbar.set_description("Searching PRT")
for line in opened:
if line.strip().startswith(terms):
term = self._det_term(line, terms)
entry = PRTEntry(self.x, self.y, self.z)
entry.read_type_info(line)
self._skip_lines(opened, term, pbar)
entry.read_cell_info(opened, pbar)
self.runs[term] += [entry]
pbar.update(1)
opened.close()
def _skip_lines(self, f, term, pbar):
for line in f:
pbar.update(1)
if line.strip().startswith(term):
return
def _det_term(self, line, terms):
"""determines term that section is referring to"""
for each in terms:
if line.strip().startswith(each):
return each
def write_timesteps(self, txt_file):
for term, runs in self.runs.iteritems():
txt_file.write("The sequence for " + term + '\n')
for run in runs:
txt_file.write(str(run.time) + " ")
txt_file.write('\n')
if __name__ == '__main__':
from sys import argv
test = PRTController(argv[1])
test.set_dims((100, 100, 100))
test.add_runs(("SGAS", "PRESSURE"))
txt = open("timesteps.txt", 'w')
test.write_timesteps(txt)
| StarcoderdataPython |
101614 | '''
prep_dev_notebook:
pred_newshapes_dev: Runs against new_shapes
'''
import os
import sys
import random
import math
import re
import gc
import time
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
import tensorflow as tf
import keras
import keras.backend as KB
import mrcnn.model_mod as modellib
import mrcnn.visualize as visualize
from mrcnn.config import Config
from mrcnn.dataset import Dataset
from mrcnn.utils import stack_tensors, stack_tensors_3d, log
from mrcnn.datagen import data_generator, load_image_gt
import platform
syst = platform.system()
if syst == 'Windows':
# Root directory of the project
print(' windows ' , syst)
# WINDOWS MACHINE ------------------------------------------------------------------
ROOT_DIR = "E:\\"
MODEL_PATH = os.path.join(ROOT_DIR, "models")
DATASET_PATH = os.path.join(ROOT_DIR, 'MLDatasets')
#### MODEL_DIR = os.path.join(MODEL_PATH, "mrcnn_logs")
COCO_MODEL_PATH = os.path.join(MODEL_PATH, "mask_rcnn_coco.h5")
DEFAULT_LOGS_DIR = os.path.join(MODEL_PATH, "mrcnn_coco_logs")
COCO_DATASET_PATH = os.path.join(DATASET_PATH,"coco2014")
RESNET_MODEL_PATH = os.path.join(MODEL_PATH, "resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5")
elif syst == 'Linux':
print(' Linx ' , syst)
# LINUX MACHINE ------------------------------------------------------------------
ROOT_DIR = os.getcwd()
MODEL_PATH = os.path.expanduser('~/models')
DATASET_PATH = os.path.expanduser('~/MLDatasets')
#### MODEL_DIR = os.path.join(MODEL_PATH, "mrcnn_development_logs")
COCO_MODEL_PATH = os.path.join(MODEL_PATH, "mask_rcnn_coco.h5")
COCO_DATASET_PATH = os.path.join(DATASET_PATH,"coco2014")
DEFAULT_LOGS_DIR = os.path.join(MODEL_PATH, "mrcnn_coco_logs")
RESNET_MODEL_PATH = os.path.join(MODEL_PATH, "resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5")
else :
raise Error('unreconized system ' )
print("Tensorflow Version: {} Keras Version : {} ".format(tf.__version__,keras.__version__))
import pprint
pp = pprint.PrettyPrinter(indent=2, width=100)
np.set_printoptions(linewidth=100,precision=4,threshold=1000, suppress = True)
##------------------------------------------------------------------------------------
## Old Shapes TRAINING
##------------------------------------------------------------------------------------
def prep_oldshapes_train(init_with = None, FCN_layers = False, batch_sz = 5, epoch_steps = 4, folder_name= "mrcnn_oldshape_training_logs"):
import mrcnn.shapes as shapes
MODEL_DIR = os.path.join(MODEL_PATH, folder_name)
# Build configuration object -----------------------------------------------
config = shapes.ShapesConfig()
config.BATCH_SIZE = batch_sz # Batch size is 2 (# GPUs * images/GPU).
config.IMAGES_PER_GPU = batch_sz # Must match BATCH_SIZE
config.STEPS_PER_EPOCH = epoch_steps
config.FCN_INPUT_SHAPE = config.IMAGE_SHAPE[0:2]
# Build shape dataset -----------------------------------------------
dataset_train = shapes.ShapesDataset()
dataset_train.load_shapes(3000, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_train.prepare()
# Validation dataset
dataset_val = shapes.ShapesDataset()
dataset_val.load_shapes(500, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_val.prepare()
try :
del model
print('delete model is successful')
gc.collect()
except:
pass
KB.clear_session()
model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR, FCN_layers = FCN_layers)
print(' COCO Model Path : ', COCO_MODEL_PATH)
print(' Checkpoint folder Path: ', MODEL_DIR)
print(' Model Parent Path : ', MODEL_PATH)
print(' Resent Model Path : ', RESNET_MODEL_PATH)
load_model(model, init_with = init_with)
train_generator = data_generator(dataset_train, model.config, shuffle=True,
batch_size=model.config.BATCH_SIZE,
augment = False)
val_generator = data_generator(dataset_val, model.config, shuffle=True,
batch_size=model.config.BATCH_SIZE,
augment=False)
model.config.display()
return [model, dataset_train, dataset_val, train_generator, val_generator, config]
##------------------------------------------------------------------------------------
## Old Shapes TESTING
##------------------------------------------------------------------------------------
def prep_oldshapes_test(init_with = None, FCN_layers = False, batch_sz = 5, epoch_steps = 4, folder_name= "mrcnn_oldshape_test_logs"):
import mrcnn.shapes as shapes
MODEL_DIR = os.path.join(MODEL_PATH, folder_name)
# MODEL_DIR = os.path.join(MODEL_PATH, "mrcnn_development_logs")
# Build configuration object -----------------------------------------------
config = shapes.ShapesConfig()
config.BATCH_SIZE = batch_sz # Batch size is 2 (# GPUs * images/GPU).
config.IMAGES_PER_GPU = batch_sz # Must match BATCH_SIZE
config.STEPS_PER_EPOCH = epoch_steps
config.FCN_INPUT_SHAPE = config.IMAGE_SHAPE[0:2]
# Build shape dataset -----------------------------------------------
dataset_test = shapes.ShapesDataset()
dataset_test.load_shapes(500, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_test.prepare()
# Recreate the model in inference mode
try :
del model
print('delete model is successful')
gc.collect()
except:
pass
KB.clear_session()
model = modellib.MaskRCNN(mode="inference",
config=config,
model_dir=MODEL_DIR,
FCN_layers = FCN_layers )
print(' COCO Model Path : ', COCO_MODEL_PATH)
print(' Checkpoint folder Path: ', MODEL_DIR)
print(' Model Parent Path : ', MODEL_PATH)
print(' Resent Model Path : ', RESNET_MODEL_PATH)
load_model(model, init_with = init_with)
test_generator = data_generator(dataset_test, model.config, shuffle=True,
batch_size=model.config.BATCH_SIZE,
augment = False)
model.config.display()
return [model, dataset_test, test_generator, config]
##------------------------------------------------------------------------------------
## New Shapes TESTING
##------------------------------------------------------------------------------------
def prep_newshapes_test(init_with = 'last', FCN_layers = False, batch_sz = 5, epoch_steps = 4,folder_name= "mrcnn_newshape_test_logs"):
import mrcnn.new_shapes as new_shapes
MODEL_DIR = os.path.join(MODEL_PATH, folder_name)
# Build configuration object -----------------------------------------------
config = new_shapes.NewShapesConfig()
config.BATCH_SIZE = batch_sz # Batch size is 2 (# GPUs * images/GPU).
config.IMAGES_PER_GPU = batch_sz # Must match BATCH_SIZE
config.STEPS_PER_EPOCH = epoch_steps
config.FCN_INPUT_SHAPE = config.IMAGE_SHAPE[0:2]
# Build shape dataset -----------------------------------------------
# Training dataset
dataset_test = new_shapes.NewShapesDataset()
dataset_test.load_shapes(3000, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_test.prepare()
# Recreate the model in inference mode
try :
del model
print('delete model is successful')
gc.collect()
except:
pass
KB.clear_session()
model = modellib.MaskRCNN(mode="inference",
config=config,
model_dir=MODEL_DIR,
FCN_layers = FCN_layers )
print(' COCO Model Path : ', COCO_MODEL_PATH)
print(' Checkpoint folder Path: ', MODEL_DIR)
print(' Model Parent Path : ', MODEL_PATH)
print(' Resent Model Path : ', RESNET_MODEL_PATH)
load_model(model, init_with = init_with)
test_generator = data_generator(dataset_test, model.config, shuffle=True,
batch_size=model.config.BATCH_SIZE,
augment = False)
model.config.display()
return [model, dataset_test, test_generator, config]
##------------------------------------------------------------------------------------
## New Shapes TRAINING
##------------------------------------------------------------------------------------
def prep_newshapes_train(init_with = "last", FCN_layers= False, batch_sz =5, epoch_steps = 4, folder_name= "mrcnn_newshape_training_logs"):
import mrcnn.new_shapes as new_shapes
MODEL_DIR = os.path.join(MODEL_PATH, folder_name)
# Build configuration object -----------------------------------------------
config = new_shapes.NewShapesConfig()
config.BATCH_SIZE = batch_sz # Batch size is 2 (# GPUs * images/GPU).
config.IMAGES_PER_GPU = batch_sz # Must match BATCH_SIZE
config.STEPS_PER_EPOCH = epoch_steps
config.FCN_INPUT_SHAPE = config.IMAGE_SHAPE[0:2]
# Build shape dataset -----------------------------------------------
# Training dataset
dataset_train = new_shapes.NewShapesDataset()
dataset_train.load_shapes(3000, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_train.prepare()
# Validation dataset
dataset_val = new_shapes.NewShapesDataset()
dataset_val.load_shapes(500, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_val.prepare()
try :
del model
print('delete model is successful')
gc.collect()
except:
pass
KB.clear_session()
model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR,FCN_layers = FCN_layers)
print('MODEL_PATH : ', MODEL_PATH)
print('COCO_MODEL_PATH : ', COCO_MODEL_PATH)
print('RESNET_MODEL_PATH : ', RESNET_MODEL_PATH)
print('MODEL_DIR : ', MODEL_DIR)
print('Last Saved Model : ', model.find_last())
load_model(model, init_with = 'last')
train_generator = data_generator(dataset_train, model.config, shuffle=True,
batch_size=model.config.BATCH_SIZE,
augment = False)
config.display()
return [model, dataset_train, train_generator, config]
##------------------------------------------------------------------------------------
## LOAD MODEL
##------------------------------------------------------------------------------------
def load_model(model, init_with = None):
'''
methods to load weights
1 - load a specific file
2 - find a last checkpoint in a specific folder
3 - use init_with keyword
'''
# Which weights to start with?
print('-----------------------------------------------')
print(' Load model with init parm: ', init_with)
# print(' find last chkpt :', model.find_last())
# print(' n)
print('-----------------------------------------------')
## 1- look for a specific weights file
## Load trained weights (fill in path to trained weights here)
# model_path = 'E:\\Models\\mrcnn_logs\\shapes20180428T1819\\mask_rcnn_shapes_5784.h5'
# print(' model_path : ', model_path )
# print("Loading weights from ", model_path)
# model.load_weights(model_path, by_name=True)
# print('Load weights complete')
# ## 2- look for last checkpoint file in a specific folder (not working correctly)
# model.config.LAST_EPOCH_RAN = 5784
# model.model_dir = 'E:\\Models\\mrcnn_logs\\shapes20180428T1819'
# last_model_found = model.find_last()
# print(' last model in MODEL_DIR: ', last_model_found)
# # loc= model.load_weights(model.find_last()[1], by_name=True)
# # print('Load weights complete :', loc)
## 3- Use init_with keyword
## Which weights to start with?
# init_with = "last" # imagenet, coco, or last
if init_with == "imagenet":
# loc=model.load_weights(model.get_imagenet_weights(), by_name=True)
loc=model.load_weights(RESNET_MODEL_PATH, by_name=True)
elif init_with == "coco":
# Load weights trained on MS COCO, but skip layers that
# are different due to the different number of classes
# See README for instructions to download the COCO weights
loc=model.load_weights(COCO_MODEL_PATH, by_name=True,
exclude=["mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox", "mrcnn_mask"])
elif init_with == "last":
# Load the last model you trained and continue training, placing checkpouints in same folder
loc= model.load_weights(model.find_last()[1], by_name=True)
else:
assert init_with != "", "Provide path to trained weights"
print("Loading weights from ", init_with)
loc = model.load_weights(init_with, by_name=True)
print('Load weights complete', loc)
"""
##------------------------------------------------------------------------------------
## Old Shapes DEVELOPMENT
##------------------------------------------------------------------------------------
def prep_oldshapes_dev(init_with = None, FCN_layers = False, batch_sz = 5):
import mrcnn.shapes as shapes
MODEL_DIR = os.path.join(MODEL_PATH, "mrcnn_oldshape_dev_logs")
config = build_config(batch_sz = batch_sz)
dataset_train = shapes.ShapesDataset()
dataset_train.load_shapes(150, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_train.prepare()
try :
del model
print('delete model is successful')
gc.collect()
except:
pass
KB.clear_session()
model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR, FCN_layers = FCN_layers)
print(' COCO Model Path : ', COCO_MODEL_PATH)
print(' Checkpoint folder Path: ', MODEL_DIR)
print(' Model Parent Path : ', MODEL_PATH)
print(' Resent Model Path : ', RESNET_MODEL_PATH)
load_model(model, init_with = init_with)
train_generator = data_generator(dataset_train, model.config, shuffle=True,
batch_size=model.config.BATCH_SIZE,
augment = False)
model.config.display()
return [model, dataset_train, train_generator, config]
##------------------------------------------------------------------------------------
## New Shapes DEVELOPMENT
##------------------------------------------------------------------------------------
def prep_newshapes_dev(init_with = "last", FCN_layers= False, batch_sz = 5):
import mrcnn.new_shapes as new_shapes
MODEL_DIR = os.path.join(MODEL_PATH, "mrcnn_newshape_dev_logs")
config = build_config(batch_sz = batch_sz, newshapes=True)
# Build shape dataset -----------------------------------------------
# Training dataset
dataset_train = new_shapes.NewShapesDataset()
dataset_train.load_shapes(3000, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_train.prepare()
# Validation dataset
dataset_val = new_shapes.NewShapesDataset()
dataset_val.load_shapes(500, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_val.prepare()
try :
del model, train_generator, val_generator, mm
gc.collect()
except:
pass
KB.clear_session()
model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR,FCN_layers = FCN_layers)
print('MODEL_PATH : ', MODEL_PATH)
print('COCO_MODEL_PATH : ', COCO_MODEL_PATH)
print('RESNET_MODEL_PATH : ', RESNET_MODEL_PATH)
print('MODEL_DIR : ', MODEL_DIR)
print('Last Saved Model : ', model.find_last())
load_model(model, init_with = 'last')
train_generator = data_generator(dataset_train, model.config, shuffle=True,
batch_size=model.config.BATCH_SIZE,
augment = False)
config.display()
return [model, dataset_train, train_generator, config]
"""
| StarcoderdataPython |
4046 | # Generated by Django 3.0.3 on 2020-03-24 09:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('exercises', '0018_photo_file'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=80)),
('description', models.TextField(blank=True)),
('exercises', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='categories', to='exercises.Exercise')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('image_url', models.URLField()),
('image_caption', models.CharField(blank=True, max_length=80)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='photos', to='categories.Category')),
],
options={
'abstract': False,
},
),
]
| StarcoderdataPython |
1693417 | # Uses Python3
# Greatest divisor common
import sys
def GCDNaive(a, b):
best = 0
if(b == 0 or a == 0):
return 0
if(b > a):
[a, b] = [b, a]
for i in range(1, a + 1):
if(a % i == 0 and b % i == 0):
best = i
return best
# Euclidean Algorithm
def EuclideanGCD(a, b):
if b == 0:
return a
rem = a % b
return EuclideanGCD(b, rem)
# Minimo multiplicados comum (O menor numero que "a" e "b" conseguem dividir)
def LeastCommonMultiple(a, b):
gcd = EuclideanGCD(a, b)
value1 = a // gcd
value2 = b // gcd
return value1 * value2 * gcd
input = sys.stdin.read().split()
a = int(input[0])
b = int(input[1])
print(LeastCommonMultiple(a, b)) | StarcoderdataPython |
18957 | """
Code for working with data.
In-memory format (as a list):
- board: Tensor (8, 8, 2) [bool; one-hot]
- move: Tensor (64,) [bool; one-hot]
- value: Tensor () [float32]
On-disk format (to save space and quicken loading):
- board: int64
- move: int64
- value: float32
"""
from typing import Dict, Tuple
import tensorflow as tf # type: ignore
from board import BOARD_SHAPE, BOARD_SQUARES, Board, Loc
EXAMPLE_SPEC = {
"board": tf.io.FixedLenFeature([2], tf.int64),
"move": tf.io.FixedLenFeature([], tf.int64),
"value": tf.io.FixedLenFeature([], tf.float32),
}
# Hack to allow storing bitboards efficiently as tf.Int64.
# Necessary because boards are all valid uint64 but not necessarily valid int64.
# Taken from: https://stackoverflow.com/questions/20766813/how-to-convert-signed-to-
# unsigned-integer-in-python
def _signed_representation(unsigned: int) -> int:
"""Convert an "unsigned" int to its equivalent C "signed" representation."""
return (unsigned & ((1 << 63) - 1)) - (unsigned & (1 << 63))
def _unsigned_representation(signed: int) -> int:
"""Convert a "signed" int to its equivalent C "unsigned" representation."""
return signed & 0xFFFFFFFFFFFFFFFF
# See: https://stackoverflow.com/questions/48333210/tensorflow-how-to-convert-an-
# integer-tensor-to-the-corresponding-binary-tensor
def decode_bitboard(encoded: tf.Tensor) -> tf.Tensor:
"""
Convert from uint64 board representation to a tf.Tensor board.
"""
flat = tf.math.mod(
tf.bitwise.right_shift(encoded, tf.range(BOARD_SQUARES, dtype=tf.int64)), 2
)
board = tf.reshape(flat, BOARD_SHAPE)
# Hack to allow using rot90 on a 2D tensor
return tf.image.rot90(tf.expand_dims(board, axis=-1), k=2)[:, :, 0]
def serialize_example(board: Board, move: Loc, value: float) -> str:
"""
Serialize a single training example into a string.
"""
black = _signed_representation(int(board.black))
white = _signed_representation(int(board.white))
features = {
"board": tf.train.Feature(int64_list=tf.train.Int64List(value=[black, white])),
"move": tf.train.Feature(int64_list=tf.train.Int64List(value=[move.as_int])),
"value": tf.train.Feature(float_list=tf.train.FloatList(value=[value])),
}
ex = tf.train.Example(features=tf.train.Features(feature=features))
return ex.SerializeToString()
def preprocess_example(
serialized: str
) -> Tuple[Dict[str, tf.Tensor], Dict[str, tf.Tensor]]:
"""
Turn a serialized example into the training-ready format.
"""
example = tf.io.parse_single_example(serialized, EXAMPLE_SPEC)
bitboards = example["board"]
black_bb = bitboards[0]
white_bb = bitboards[1]
black = decode_bitboard(black_bb)
white = decode_bitboard(white_bb)
board = tf.stack([black, white], axis=-1)
move = tf.one_hot(example["move"], BOARD_SQUARES)
# TODO: better solution to multi-input Keras model training
return (
{"board": board},
{"policy_softmax": move, "tf_op_layer_Tanh": example["value"]},
)
| StarcoderdataPython |
3263889 | palavras = ('Curso', 'Video', 'Internet', 'Gratis', 'Futuro', 'Eeeeduardooooooo')
for p in palavras:
print(f'\nNa palavra {p.upper():_^12} temos as vogais:', end=' ')
for letra in p:
if letra.lower() in 'aeiou':
print(letra.lower(), end=' ')
| StarcoderdataPython |
1718679 | import logging
from types import MethodType
from assembly_line.expections import AssemblyLineDataHelperNotReturnedError
from assembly_line.data_helper import DataHelper
logger = logging.getLogger(__name__)
def _do_pre_process(this, data_helper):
if hasattr(this, 'pre_process'):
res = this.pre_process(data_helper)
if not isinstance(res, DataHelper):
raise AssemblyLineDataHelperNotReturnedError(
'{!r}.pre_process did not return a DataHelperInstance'.format(this)
)
return res
return data_helper
def _do_process(this, data_helper):
if not hasattr(this, 'process'):
raise NotImplementedError('process method should be implemented for every Task class/instance')
res = this.process(data_helper)
if not isinstance(res, DataHelper):
raise AssemblyLineDataHelperNotReturnedError(
'{!r}.process did not return a DataHelperInstance'.format(this)
)
return res
def _do_post_process(this, data_helper):
if hasattr(this, 'post_process'):
res = this.post_process(data_helper)
if not isinstance(res, DataHelper):
raise AssemblyLineDataHelperNotReturnedError(
'{!r}.post_process did not return a DataHelperInstance'.format(this)
)
return res
return data_helper
def _do_setup(this, data_helper):
if hasattr(this, 'setup'):
res = this.setup(data_helper)
if not isinstance(res, DataHelper):
raise AssemblyLineDataHelperNotReturnedError(
'{!r}.setup did not return a DataHelperInstance'.format(this)
)
return res
return data_helper
def _do_teardown(this, data_helper):
if hasattr(this, 'teardown'):
res = this.teardown(data_helper)
if not isinstance(res, DataHelper):
raise AssemblyLineDataHelperNotReturnedError(
'{!r}.teardown did not return a DataHelperInstance'.format(this)
)
return res
return data_helper
class ReprMeta(type):
def __call__(cls, *args, **kwargs):
label = kwargs.pop('label', None)
inst = super(ReprMeta, cls).__call__(*args, **kwargs)
if not label:
if not getattr(inst, 'label', None):
if args and kwargs:
inst.label = f'{cls.__name__} args={args!r}, kwargs={kwargs!r}'
elif args:
inst.label = f'{cls.__name__} args={args!r}'
elif kwargs:
inst.label = f'{cls.__name__} kwargs={kwargs!r}'
else:
inst.label = cls.__name__
else:
inst.label = label
return inst
class Task(object, metaclass=ReprMeta):
log = logger
def __init__(self, *args, **kwargs):
self.do_setup = MethodType(_do_setup, self)
self.do_pre_process = MethodType(_do_pre_process, self)
self.do_process = MethodType(_do_process, self)
self.do_post_process = MethodType(_do_post_process, self)
self.do_teardown = MethodType(_do_teardown, self)
do_setup = classmethod(_do_setup)
do_pre_process = classmethod(_do_pre_process)
do_process = classmethod(_do_process)
do_post_process = classmethod(_do_post_process)
do_teardown = classmethod(_do_teardown)
def __repr__(self):
return getattr(self, 'label', super().__repr__())
| StarcoderdataPython |
3279359 | <reponame>rahulkmr/cookiecutter-flask<filename>{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/settings.py<gh_stars>0
"""
Application configuration.
Most configuration are set via environment variables.
Use a .env file in the project root to change configuration.
"""
from environs import Env
env = Env()
env.read_env()
TESTING = env.bool('TESTING', False)
SECRET_KEY = env.str('SECRET_KEY')
CACHE_TYPE = env.str('CACHE_TYPE', 'simple')
# flask-sqlalchemy configs
database_url = env.str('SQLALCHEMY_DATABASE_URI', None) or \
env.str('DATABASE_URL', None) or \
'postgresql:///{{cookiecutter.project_slug}}'
SQLALCHEMY_DATABASE_URI = database_url
in_development = env.str('FLASK_ENV', '') == 'development'
SQLALCHEMY_TRACK_MODIFICATIONS = in_development
# flask-security configs
SECURITY_URL_PREFIX = env.str('SECURITY_URL_PREFIX', '/account')
SECURITY_CONFIRMABLE = env.bool('SECURITY_CONFIRMABLE', True)
SECURITY_REGISTERABLE = env.bool('SECURITY_REGISTERABLE', True)
SECURITY_RECOVERABLE = env.bool('SECURITY_RECOVERABLE', True)
SECURITY_TRACKABLE = env.bool('SECURITY_TRACKABLE', True)
SECURITY_CHANGEABLE = env.bool('SECURITY_CHANGEABLE', True)
SECURITY_PASSWORD_SALT = env.str('SECURITY_PASSWORD_SALT')
# flask-mail configs
MAIL_SERVER = env.str('MAIL_SERVER', 'localhost')
MAIL_PORT = env.int('MAIL_PORT', 25)
MAIL_USE_TLS = env.bool('MAIL_USE_TLS', True)
MAIL_USERNAME = env.str('MAIL_USERNAME', '')
MAIL_PASSWORD = env.str('MAIL_PASSWORD', '')
MAIL_DEFAULT_SENDER = env.list('MAIL_DEFAULT_SENDER',
['{{ cookiecutter.name }}', '{{ cookiecutter.email }}'])
# webpack configs
WEBPACK_MANIFEST_PATH = env.str('WEBPACK_MANIFEST_PATH',
'./static/build/manifest.json')
WEBPACK_ASSETS_BASE_URL = env.str('WEBPACK_ASSETS_BASE_URL', None)
APISPEC_TITLE = env.str('APISPEC_TITLE', '{{ cookiecutter.project_slug }}')
{%- if cookiecutter.use_celery == 'y' %}
CELERY_BROKER_URL = env.str('CELERY_BROKER_URL',
'redis://localhost:6379')
CELERY_RESULT_BACKEND = env.str('CELERY_RESULT_BACKEND',
'redis://localhost:6379')
{%- endif %}
| StarcoderdataPython |
1655908 | <reponame>cfc603/coming-soon<gh_stars>0
from unittest.mock import Mock, patch
from django.test import TestCase
from model_bakery import baker
from .models import Entry
from .views import EntryCreate
class EntryTest(TestCase):
def test_str(self):
# setup
entry = baker.make(Entry, email="<EMAIL>")
# asserts
self.assertEqual(entry.__str__(), "<EMAIL>")
class EntryCreateTest(TestCase):
@patch("registration.views.redirect")
@patch("registration.views.EntryCreate.get_success_url")
def test_form_invalid(self, get_success_url, redirect):
# setup
get_success_url.return_value = "/success/path/"
view = EntryCreate()
view.form_invalid(Mock())
# asserts
redirect.assert_called_once_with("/success/path/?error=true")
@patch("registration.views.redirect")
@patch("registration.views.EntryCreate.get_success_url")
def test_form_valid(self, get_success_url, redirect):
# setup
get_success_url.return_value = "/success/path/"
view = EntryCreate()
view.form_valid(Mock())
# asserts
redirect.assert_called_once_with("/success/path/?success=true")
| StarcoderdataPython |
49910 | <reponame>mgbin088/vaex<gh_stars>1-10
__version__ = '0.7.0'
__version_tuple__ = (0, 7, 0)
| StarcoderdataPython |
3338970 | <reponame>lxl0928/learning_python<filename>flask/19_user_authentication_1/app/auth/__init__.py
#! usr/bin/python3
# -*- coding: utf-8 -*-
from flask import Blueprint
auth = Blueprint('auth', __name__)
from . import views
| StarcoderdataPython |
1606406 | """Support for Rointe Climate."""
from __future__ import annotations
from datetime import timedelta
import logging
import async_timeout
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_COMFORT,
PRESET_ECO,
PRESET_NONE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.components.rointe.api.device_manager import (
RointeDevice,
RointeDeviceManager,
)
from homeassistant.components.rointe.const import (
DOMAIN,
ROINTE_API_REFRESH_SECONDS,
ROINTE_DEVICE_MANAGER,
ROINTE_HA_DEVICES,
)
from homeassistant.components.rointe.rointe_entity import RointeHAEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import TEMP_CELSIUS
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
PRESET_ROINTE_ICE = "ice_preset"
PRESET_ROINTE_MANUAL = "man_preset"
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Initialize all the rointe heaters found via API."""
device_manager: RointeDeviceManager = hass.data[DOMAIN][entry.entry_id][
ROINTE_DEVICE_MANAGER
]
device_ids = []
async def async_update_data():
"""Fetch data from Rointe's API."""
try:
_LOGGER.info(">> Updating data.... ")
async with async_timeout.timeout(10):
if not await device_manager.sync_devices():
raise UpdateFailed(">>> Unable to sync devices.")
# Data stored in device manager.
return None
except Exception as ex:
_LOGGER("Failed async_update_data: %s", ex)
raise UpdateFailed(f"Error communicating with API: {ex}")
@callback
def async_discover_device(
dev_ids: list[str], coordinator: DataUpdateCoordinator
) -> None:
"""Discover and add a discovered rointe climate devices."""
if not dev_ids:
return
async_add_entities(_setup_entities(hass, entry, dev_ids, coordinator))
entry.async_on_unload(
# Dispatches a signal when the device unloads.
async_dispatcher_connect(
hass, "rointe_discovery_new_climate", async_discover_device
)
)
# Coordinator initialization
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="Rointe",
update_method=async_update_data,
update_interval=timedelta(seconds=ROINTE_API_REFRESH_SECONDS),
)
await coordinator.async_config_entry_first_refresh()
for device in device_manager.rointe_devices.values():
_LOGGER.info("Rointe discovering ID: %s", device.id)
device_ids.append(device.id)
async_discover_device(device_ids, coordinator)
def _setup_entities(
hass: HomeAssistant,
entry: ConfigEntry,
device_ids: list[str],
coordinator: DataUpdateCoordinator,
) -> list[Entity]:
"""Initialize Rointe Heaters."""
device_manager: RointeDeviceManager = hass.data[DOMAIN][entry.entry_id][
ROINTE_DEVICE_MANAGER
]
entities: list[Entity] = []
for device_id in device_ids:
device = device_manager.rointe_devices[device_id]
if device is None:
continue
entities.append(RointeHaClimate(coordinator, device_manager, device))
hass.data[DOMAIN][entry.entry_id][ROINTE_HA_DEVICES].add(device_id)
return entities
class RointeHaClimate(CoordinatorEntity, RointeHAEntity, ClimateEntity):
"""Rointe radiator device."""
def __init__(
self,
coordinator: DataUpdateCoordinator,
device_manager: RointeDeviceManager,
device: RointeDevice,
) -> None:
"""Initialize coordinator and Rointe super class."""
CoordinatorEntity.__init__(self, coordinator)
RointeHAEntity.__init__(self, device=device, device_manager=device_manager)
@property
def is_on(self):
"""Return entity state."""
return self.rointe_device.power
@property
def temperature_unit(self) -> str:
"""Temperature unit."""
return TEMP_CELSIUS
@property
def current_temperature(self) -> float | None:
"""Return the current temperature."""
return self.rointe_device.temp
@property
def max_temp(self) -> float:
"""Max selectable temperature."""
return 30.0
@property
def min_temp(self) -> float:
"""Minimum selectable temperature."""
return 7.0
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
@property
def target_temperature_step(self) -> float | None:
"""Temperature step."""
return 0.5
@property
def hvac_mode(self) -> str:
"""Return the current HVAC mode."""
if self.rointe_device.power:
return HVAC_MODE_HEAT
else:
return HVAC_MODE_OFF
@property
def hvac_modes(self) -> list[str]:
"""Return hvac modes available."""
return [HVAC_MODE_OFF, HVAC_MODE_HEAT, HVAC_MODE_AUTO]
@property
def preset_modes(self) -> list[str]:
"""Return the available preset modes."""
return [
PRESET_COMFORT,
PRESET_ECO,
PRESET_ROINTE_ICE,
PRESET_NONE,
]
@property
def preset_mode(self) -> str:
"""Convert the device's preset to HA preset modes."""
if self.rointe_device.mode == "auto":
# auto modes.
if self.rointe_device.preset == "eco":
return PRESET_ECO
elif self.rointe_device.preset == "comfort":
return PRESET_COMFORT
elif self.rointe_device.preset == "ice":
return PRESET_ROINTE_ICE
else:
return PRESET_NONE
else:
# Manual mode.
return PRESET_NONE
async def async_update(self) -> None:
"""Update entity."""
_LOGGER.info("Entity async_update (coordinator)")
def set_temperature(self, **kwargs):
"""Set new target temperature."""
# target_temperature = float(kwargs["temperature"])
# self._send_command()
pass
def set_hvac_mode(self, hvac_mode):
"""Set new target hvac mode."""
_LOGGER.debug("Setting hvac mode: %s", hvac_mode)
async def async_set_hvac_mode(self, hvac_mode):
"""Set new target hvac mode."""
_LOGGER.debug("Setting async hvac mode: %s", hvac_mode)
| StarcoderdataPython |
3234955 | from os import mkdir, walk, remove
from os.path import exists, join as joinpath
from pickle import PicklingError, UnpicklingError
from collections import namedtuple
from redlib.api.py23 import pickledump, pickleload
from . import const
AutocompInfo = namedtuple('AutocompInfo', ['command', 'access', 'version'])
class DataStoreError(Exception):
FILE_NOT_FOUND = 0
def __init__(self, msg, reason=None):
super(DataStoreError, self).__init__(msg)
self.reason = reason
class DataStore:
pickle_protocol = 2
def __init__(self):
self.check_dir()
def check_dir(self, data=True, autocomp=False, script=False, create=True):
if not exists(const.data_dir):
self.create_dir(const.data_dir, create=create)
if autocomp and not exists(const.autocomp_dir):
self.create_dir(const.autocomp_dir, create=create)
if script and not exists(const.script_dir):
self.create_dir(const.script_dir, create=create)
def create_dir(self, path, create=True):
if create:
mkdir(path)
else:
raise DataStoreError('%s does not exist'%path)
def save_optiontree(self, ot, cmdname):
self.check_dir(data=False, autocomp=True)
with open(joinpath(const.autocomp_dir, cmdname), 'wb') as f:
try:
pickledump(ot, f, protocol=self.pickle_protocol, fix_imports=True)
except PicklingError as e:
print(e)
raise DataStoreError('unable to save option tree')
def load_optiontree(self, cmdname, filepath=None):
filepath = filepath or joinpath(const.autocomp_dir, cmdname)
if not exists(filepath):
filepath = joinpath(const.root_autocomp_dir, cmdname)
if not exists(filepath):
raise DataStoreError('unable to load option tree')
try:
with open(filepath, 'rb') as f:
try:
data = pickleload(f, fix_imports=True)
except UnpicklingError as e:
log.error(str(e))
#ot_version = data[0]
#if ot_version > self.ot_version:
# raise DataStoreError('cannot load greater ot_version, %s > %s'%(version, self.version))
if type(data) == list: # older version (1.0)
return data[1]
else:
return data
except IOError as e:
raise DataStoreError(e)
def remove_optiontree(self, cmdname, exc=False):
filepath = joinpath(const.autocomp_dir, cmdname)
return self.remove_file(filepath, exc=exc)
def remove_all_optiontrees(self):
for name in self.list_optiontree():
self.remove_optiontree(name)
def remove_file(self, filepath, exc=False):
if exists(filepath):
try:
remove(filepath)
return True
except OSError as e:
if exc:
raise DataStoreError(e)
else:
return False
else:
raise DataStoreError('%s not found'%filepath, reason=DataStoreError.FILE_NOT_FOUND)
def list_autocomp_commands(self):
autocomp_list = []
def add_to_list(cmd, access, dirpath):
version = self.load_optiontree(cmd, filepath=joinpath(dirpath, cmd)).prog_version # exc
i = (filter(lambda i : i.command == cmd, autocomp_list) or [None])[0]
if i is None:
autocomp_list.append(AutocompInfo(cmd, [access], [version]))
else:
i.access.append(access)
i.version.append(version)
for _, _, files in walk(const.autocomp_dir):
for f in files:
add_to_list(f, 'user', const.autocomp_dir)
for _, _, files in walk(const.root_autocomp_dir):
for f in files:
add_to_list(f, 'all', const.root_autocomp_dir)
return autocomp_list
| StarcoderdataPython |
55809 | from django.contrib.auth.decorators import login_required
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, Http404
from django.core.exceptions import PermissionDenied
from django.db import transaction
from django.db.models import Count, Sum, F, Func
from datetime import datetime
import json
from postgresqleu.util.db import exec_to_dict
from postgresqleu.util.request import get_int_or_error
from .models import ConferenceRegistration
from .models import VolunteerSlot, VolunteerAssignment
from .util import send_conference_notification_template, get_conference_or_404
def _check_admin(request, conference):
if request.user.is_superuser:
return True
else:
return conference.administrators.filter(pk=request.user.id).exists() or conference.series.administrators.filter(pk=request.user.id).exists()
def _get_conference_and_reg(request, urlname):
conference = get_conference_or_404(urlname)
is_admin = _check_admin(request, conference)
if is_admin:
reg = ConferenceRegistration.objects.get(conference=conference, attendee=request.user)
else:
try:
reg = conference.volunteers.get(attendee=request.user)
except ConferenceRegistration.DoesNotExist:
raise Http404("Volunteer entry not found")
return (conference, is_admin, reg)
def send_volunteer_notification(conference, assignment, subject, template):
if not conference.notifyvolunteerstatus:
return
# No filter aggregates in our version of Django, so direct SQL it is
pending = exec_to_dict("SELECT count(*) FILTER (WHERE NOT org_confirmed) AS admin, count(*) FILTER (WHERE NOT vol_confirmed) AS volunteer FROM confreg_volunteerassignment a INNER JOIN confreg_volunteerslot s ON s.id=a.slot_id WHERE s.conference_id=%(confid)s", {
'confid': conference.id,
})[0]
send_conference_notification_template(
conference,
subject,
'confreg/mail/{}'.format(template), {
'conference': conference,
'assignment': assignment,
'pending': pending,
},
)
def _get_volunteer_stats(conference):
stats = ConferenceRegistration.objects.filter(conference=conference) \
.filter(volunteers_set=conference) \
.annotate(num_assignments=Count('volunteerassignment')) \
.annotate(total_time=Sum(Func(
Func(F('volunteerassignment__slot__timerange'), function='upper'),
Func(F('volunteerassignment__slot__timerange'), function='lower'),
function='age'))) \
.order_by('lastname', 'firstname')
return [{
'name': r.fullname,
'count': r.num_assignments,
'time': str(r.total_time or '0:00:00'),
} for r in stats]
def _slot_return_data(slot):
return {
'id': slot.id,
'max_staff': slot.max_staff,
'min_staff': slot.min_staff,
'assignments': [{
'id': a.id,
'volid': a.reg.id,
'volunteer': a.reg.fullname,
'vol_confirmed': a.vol_confirmed,
'org_confirmed': a.org_confirmed,
} for a in slot.volunteerassignment_set.all()],
}
@login_required
@transaction.atomic
def volunteerschedule_api(request, urlname, adm=False):
try:
(conference, can_admin, reg) = _get_conference_and_reg(request, urlname)
except ConferenceRegistration.DoesNotExist:
raise PermissionDenied()
is_admin = can_admin and adm
if request.method == 'GET':
# GET just always returns the complete volunteer schedule
slots = VolunteerSlot.objects.prefetch_related('volunteerassignment_set', 'volunteerassignment_set__reg').filter(conference=conference)
return HttpResponse(json.dumps({
'slots': [_slot_return_data(slot) for slot in slots],
'volunteers': [{
'id': vol.id,
'name': vol.fullname,
} for vol in conference.volunteers.all().order_by('firstname', 'lastname')],
'meta': {
'isadmin': is_admin,
'regid': reg.id,
},
'stats': _get_volunteer_stats(conference),
}), content_type='application/json')
if request.method != 'POST':
raise Http404()
if 'op' not in request.POST:
raise Http404()
slotid = get_int_or_error(request.POST, 'slotid')
volid = get_int_or_error(request.POST, 'volid')
# We should always have a valid slot
slot = get_object_or_404(VolunteerSlot, conference=conference, pk=slotid)
err = None
if request.POST['op'] == 'signup':
if volid != 0:
raise PermissionDenied("Invalid post data")
err = _signup(request, conference, reg, is_admin, slot)
elif request.POST['op'] == 'remove':
err = _remove(request, conference, reg, is_admin, slot, volid)
elif request.POST['op'] == 'confirm':
err = _confirm(request, conference, reg, is_admin, slot, volid)
elif request.POST['op'] == 'add':
err = _add(request, conference, reg, is_admin, slot, volid)
else:
raise Http404()
if err:
return HttpResponse(
json.dumps({'err': err}),
content_type='application/json',
status=500,
)
# Req-query the database to pick up any changes, and return the complete object
slot = VolunteerSlot.objects.prefetch_related('volunteerassignment_set', 'volunteerassignment_set__reg').filter(conference=conference, pk=slot.pk)[0]
return HttpResponse(json.dumps({
'err': None,
'slot': _slot_return_data(slot),
'stats': _get_volunteer_stats(conference),
}), content_type='application/json')
@login_required
def volunteerschedule(request, urlname, adm=False):
try:
(conference, can_admin, reg) = _get_conference_and_reg(request, urlname)
except ConferenceRegistration.DoesNotExist:
return HttpResponse("Must be registered for conference to view volunteer schedule")
is_admin = can_admin and adm
slots = VolunteerSlot.objects.filter(conference=conference).order_by('timerange', 'title')
return render(request, 'confreg/volunteer_schedule.html', {
'basetemplate': is_admin and 'confreg/confadmin_base.html' or 'confreg/volunteer_base.html',
'conference': conference,
'admin': is_admin,
'can_admin': can_admin,
'reg': reg,
'slots': slots,
'helplink': 'volunteers',
})
def _signup(request, conference, reg, adm, slot):
if VolunteerAssignment.objects.filter(slot=slot, reg=reg).exists():
return "Already a volunteer for selected slot"
elif slot.countvols >= slot.max_staff:
return "Volunteer slot is already full"
elif VolunteerAssignment.objects.filter(reg=reg, slot__timerange__overlap=slot.timerange).exists():
return "Cannot sign up for an overlapping slot"
else:
a = VolunteerAssignment(slot=slot, reg=reg, vol_confirmed=True, org_confirmed=False)
a.save()
send_volunteer_notification(conference, a, 'Volunteer signed up', 'admin_notify_volunteer_signup.txt')
def _add(request, conference, reg, adm, slot, volid):
addreg = get_object_or_404(ConferenceRegistration, conference=conference, id=volid)
if VolunteerAssignment.objects.filter(slot=slot, reg=addreg).exists():
return "Already a volunteer for selected slot"
elif slot.countvols >= slot.max_staff:
return "Volunteer slot is already full"
elif VolunteerAssignment.objects.filter(reg=addreg, slot__timerange__overlap=slot.timerange).exists():
return "Cannot add to an overlapping slot"
else:
VolunteerAssignment(slot=slot, reg=addreg, vol_confirmed=False, org_confirmed=True).save()
def _remove(request, conference, reg, is_admin, slot, aid):
if is_admin:
a = get_object_or_404(VolunteerAssignment, slot=slot, id=aid)
else:
a = get_object_or_404(VolunteerAssignment, slot=slot, reg=reg, id=aid)
if a.org_confirmed and not is_admin:
return "Cannot remove a confirmed assignment. Please contact the volunteer schedule coordinator for manual processing."
else:
a.delete()
def _confirm(request, conference, reg, is_admin, slot, aid):
if is_admin:
# Admins can make organization confirms
a = get_object_or_404(VolunteerAssignment, slot=slot, id=aid)
if a.org_confirmed:
return "Assignment already confirmed"
else:
a.org_confirmed = True
a.save()
else:
# Regular users can confirm their own sessions only
a = get_object_or_404(VolunteerAssignment, slot=slot, reg=reg, id=aid)
if a.vol_confirmed:
return "Assignment already confirmed"
else:
a.vol_confirmed = True
a.save()
send_volunteer_notification(conference, a, 'Volunteer slot confirmed', 'admin_notify_volunteer_confirmed.txt')
def ical(request, urlname, token):
conference = get_conference_or_404(urlname)
reg = get_object_or_404(ConferenceRegistration, regtoken=token)
assignments = VolunteerAssignment.objects.filter(reg=reg).order_by('slot__timerange')
resp = render(request, 'confreg/volunteer_schedule.ical', {
'conference': conference,
'assignments': assignments,
'now': datetime.utcnow(),
}, content_type='text/calendar')
resp['Content-Disposition'] = 'attachment; filename="{}_volunteer.ical"'.format(conference.urlname)
return resp
| StarcoderdataPython |
1626762 | <filename>Ex_loops.py<gh_stars>0
import random
print("program for user enter names and print random name")
people = []
for x in range(0,8):
person = input("Please enter a name: ")
people.append(person)
index = random.randint(0,7)
random_person = people[index]
print("Picked random person is:" , random_person) | StarcoderdataPython |
1702399 | <filename>meerk40t/balor/sender.py
# Balor Galvo Laser Control Module
# Copyright (C) 2021-2022 Gnostic Instruments, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import threading
import time
import usb.core
import usb.util
from meerk40t.balor.command_list import CommandList, CommandSource
class BalorException(Exception):
pass
class BalorConfigException(BalorException):
pass
class BalorMachineException(BalorException):
pass
class BalorCommunicationException(BalorException):
pass
class BalorDataValidityException(BalorException):
pass
# Marked with ? - currently not seen in the wild
DISABLE_LASER = 0x0002
RESET = 0x0003
ENABLE_LASER = 0x0004
EXECUTE_LIST = 0x0005
SET_PWM_PULSE_WIDTH = 0x0006 # ?
GET_REGISTER = 0x0007
GET_SERIAL_NUMBER = 0x0009 # In EzCAD mine is 32012LI43405B, Version 4.02, LMC V4 FIB
GET_LIST_STATUS = 0x000A
GET_XY_POSITION = 0x000C # Get current galvo position
SET_XY_POSITION = 0x000D # Travel the galvo xy to specified position
LASER_SIGNAL_OFF = 0x000E # ?
LASER_SIGNAL_ON = 0x000F # ?
WRITE_CORRECTION_LINE = 0x0010 # ?
RESET_LIST = 0x0012
RESTART_LIST = 0x0013
WRITE_CORRECTION_TABLE = 0x0015
SET_CONTROL_MODE = 0x0016
SET_DELAY_MODE = 0x0017
SET_MAX_POLY_DELAY = 0x0018
SET_END_OF_LIST = 0x0019
SET_FIRST_PULSE_KILLER = 0x001A
SET_LASER_MODE = 0x001B
SET_TIMING = 0x001C
SET_STANDBY = 0x001D
SET_PWM_HALF_PERIOD = 0x001E
STOP_EXECUTE = 0x001F # Since observed in the wild
STOP_LIST = 0x0020 # ?
WRITE_PORT = 0x0021
WRITE_ANALOG_PORT_1 = 0x0022 # At end of cut, seen writing 0x07FF
WRITE_ANALOG_PORT_2 = 0x0023 # ?
WRITE_ANALOG_PORT_X = 0x0024 # ?
READ_PORT = 0x0025
SET_AXIS_MOTION_PARAM = 0x0026
SET_AXIS_ORIGIN_PARAM = 0x0027
GO_TO_AXIS_ORIGIN = 0x0028
MOVE_AXIS_TO = 0x0029
GET_AXIS_POSITION = 0x002A
GET_FLY_WAIT_COUNT = 0x002B # ?
GET_MARK_COUNT = 0x002D # ?
SET_FPK_2E = 0x002E # First pulse killer related, SetFpkParam2
# My ezcad lists 40 microseconds as FirstPulseKiller
# EzCad sets it 0x0FFB, 1, 0x199, 0x64
FIBER_CONFIG_1 = 0x002F #
FIBER_CONFIG_2 = 0x0030 #
LOCK_INPUT_PORT = 0x0031 # ?
SET_FLY_RES = 0x0032 # Unknown fiber laser parameter being set
# EzCad sets it: 0x0000, 0x0063, 0x03E8, 0x0019
FIBER_OPEN_MO = 0x0033 # "IPG (i.e. fiber) Open MO" - MO is probably Master Oscillator
# (In BJJCZ documentation, the pin 18 on the IPG connector is
# called "main oscillator"; on the raycus docs it is "emission enable.")
# Seen at end of marking operation with all
# zero parameters. My Ezcad has an "open MO delay"
# of 8 ms
FIBER_GET_StMO_AP = 0x0034 # Unclear what this means; there is no
# corresponding list command. It might be to
# get a status register related to the source.
# It is called IPG_GETStMO_AP in the dll, and the abbreviations
# MO and AP are used for the master oscillator and power amplifier
# signal lines in BJJCZ documentation for the board; LASERST is
# the name given to the error code lines on the IPG connector.
GET_USER_DATA = 0x0036 # ?
GET_FLY_PULSE_COUNT = 0x0037 # ?
GET_FLY_SPEED = 0x0038 # ?
ENABLE_Z_2 = 0x0039 # ? AutoFocus on/off
ENABLE_Z = 0x003A # AutoFocus on/off
SET_Z_DATA = 0x003B # ?
SET_SPI_SIMMER_CURRENT = 0x003C # ?
IS_LITE_VERSION = 0x0040 # Tell laser to nerf itself for ezcad lite apparently
GET_MARK_TIME = (
0x0041 # Seen at end of cutting, only and always called with param 0x0003
)
SET_FPK_PARAM = 0x0062 # Probably "first pulse killer" = fpk
class Sender:
"""This is a simplified control class for the BJJCZ (Golden Orange,
Beijing JCZ) LMCV4-FIBER-M and compatible boards. All operations are blocking
so, it should probably run in its own thread for nontrivial applications.
It does have an .abort() method that it is expected will be called
asynchronously from another thread."""
sleep_time = 0.001
# We include this "blob" here (the contents of which are all well-understood) to
# avoid introducing a dependency on job generation from within the sender.
# It just consists of the new job command followed by a bunch of NOPs.
_abort_list_chunk = bytearray([0x51, 0x80] + [0x00] * 10) + bytearray( # New job
([0x02, 0x80] + [0x00] * 10) * 255
) # NOP
_packet_size = 256 * 12
def get_packet_size(self):
return (
self._packet_size
) # TODO maybe this should get it from the usb connection class,
# n.b. not instance which will not exist at the time it's needed necessarily
def __init__(self, service, debug=False):
self.service = service
self._lock = threading.Lock()
self._terminate_execution = False
self._footswitch_callback = None
self._usb_connection = None
self._write_port = 0x0000
self._debug = debug
def is_open(self):
return self._usb_connection is not None
def open(self):
mock = self.service.mock
machine_index = self.service.machine_index
if self._usb_connection is not None:
raise BalorCommunicationException("Attempting to open an open connection.")
if not mock:
connection = UsbConnection(machine_index, debug=self._debug)
else:
connection = MockConnection(machine_index, debug=self._debug)
connection.open()
self._usb_connection = connection
self._init_machine()
time.sleep(
0.05
) # We sacrifice this time at the altar of the unknown race condition
return True
def close(self):
self.abort()
if self._usb_connection is not None:
self._usb_connection.close()
self._usb_connection = None
def job(self, *args, **kwargs):
return CommandList(*args, **kwargs, sender=self)
def command(self, *args, **kwargs):
self._send_command(*args, **kwargs)
def _send_command(self, *args, **kwargs):
if self._usb_connection is None:
self.open()
return self._usb_connection.send_command(*args, **kwargs)
def _send_correction_entry(self, *args):
if self._usb_connection is None:
self.open()
self._usb_connection.send_correction_entry(*args)
def _send_list_chunk(self, *args):
if self._usb_connection is None:
self.open()
self._usb_connection.send_list_chunk(*args)
def _init_machine(self):
"""Initialize the machine."""
self.serial_number = self.raw_get_serial_no()
self.version = self.raw_get_version()
self.raw_get_st_mo_ap()
cor_file = self.service.corfile if self.service.corfile_enabled else None
first_pulse_killer = self.service.first_pulse_killer
pwm_pulse_width = self.service.pwm_pulse_width
pwm_half_period = self.service.pwm_half_period
standby_param_1 = self.service.standby_param_1
standby_param_2 = self.service.standby_param_2
timing_mode = self.service.timing_mode
delay_mode = self.service.delay_mode
laser_mode = self.service.laser_mode
control_mode = self.service.control_mode
fpk2_p1 = self.service.fpk2_p1
fpk2_p2 = self.service.fpk2_p2
fpk2_p3 = self.service.fpk2_p3
fpk2_p4 = self.service.fpk2_p3
fly_res_p1 = self.service.fly_res_p1
fly_res_p2 = self.service.fly_res_p2
fly_res_p3 = self.service.fly_res_p3
fly_res_p4 = self.service.fly_res_p4
# Unknown function
self.raw_reset()
# Load in-machine correction table
cor_table = None
if cor_file is not None:
try:
cor_table = self._read_correction_file(cor_file)
except FileNotFoundError:
raise BalorConfigException(".cor file location did not exist")
self._send_correction_table(cor_table)
self.raw_enable_laser()
self.raw_set_control_mode(control_mode, 0)
self.raw_set_laser_mode(laser_mode, 0)
self.raw_set_delay_mode(delay_mode, 0)
self.raw_set_timing(timing_mode, 0)
self.raw_set_standby(standby_param_1, standby_param_2, 0, 0)
self.raw_set_first_pulse_killer(first_pulse_killer, 0)
self.raw_set_pwm_half_period(pwm_half_period, 0)
# unknown function
self.raw_set_pwm_pulse_width(pwm_pulse_width, 0)
# "IPG_OpenMO" (main oscillator?)
self.raw_fiber_open_mo(0, 0)
# Unclear if used for anything
self._send_command(GET_REGISTER, 0)
# 0x0FFB is probably a 12 bit rendering of int12 -5
# Apparently some parameters for the first pulse killer
self.raw_set_fpk_param_2(fpk2_p1, fpk2_p2, fpk2_p3, fpk2_p4)
# Unknown fiber laser related command
self.raw_set_fly_res(fly_res_p1, fly_res_p2, fly_res_p3, fly_res_p4)
# Is this appropriate for all laser engraver machines?
self.raw_write_port(self._write_port)
# Conjecture is that this puts the output port out of a
# high impedance state (based on the name in the DLL,
# ENABLEZ)
# Based on how it's used, it could also be about latching out
# of the data that has been set up.
self.raw_enable_z()
# We don't know what this does, since this laser's power is set
# digitally
self.raw_write_analog_port_1(0x07FF, 0)
self.raw_enable_z()
def _read_correction_file(self, filename):
table = []
with open(filename, "rb") as f:
f.seek(0x24)
for j in range(65):
for k in range(65):
dx = int.from_bytes(f.read(4), "little", signed=True)
dx = dx if dx >= 0 else -dx + 0x8000
dy = int.from_bytes(f.read(4), "little", signed=True)
dy = dy if dy >= 0 else -dy + 0x8000
table.append([dx & 0xFFFF, dy & 0xFFFF])
return table
def _send_correction_table(self, table=None):
"""Send the onboard correction table to the machine."""
self.raw_write_correction_table(True)
if table is None:
for n in range(65**2):
self.raw_write_correction_line(0, 0, 0 if n == 0 else 1)
else:
for n in range(65**2):
self.raw_write_correction_line(
table[n][0], table[n][1], 0 if n == 0 else 1
)
def is_ready(self):
"""Returns true if the laser is ready for more data, false otherwise."""
self._send_command(GET_REGISTER, 0x0001)
return bool(self._usb_connection.status & 0x20)
def is_busy(self):
"""Returns true if the machine is busy, false otherwise;
Note that running a lighting job counts as being busy."""
self._send_command(GET_REGISTER, 0x0001)
return bool(self._usb_connection.status & 0x04)
def is_ready_and_not_busy(self):
self._send_command(GET_REGISTER, 0x0001)
return bool(self._usb_connection.status & 0x20) and not bool(
self._usb_connection.status & 0x04
)
def wait_finished(self):
while not self.is_ready_and_not_busy():
time.sleep(self.sleep_time)
if self._terminate_execution:
return
def execute(
self, command_list: CommandSource, loop_count=1, callback_finished=None
):
"""Run a job. loop_count is the number of times to repeat the
job; if it is inf, it repeats until aborted. If there is a job
already running, it will be aborted and replaced. Optionally,
calls a callback function when the job is finished.
The loop job can either be regular data in multiples of 3072 bytes, or
it can be a callable that provides data as above on command."""
self._terminate_execution = False
with self._lock:
self.wait_finished()
self.raw_reset_list()
self.port_on(bit=0)
if command_list.movement:
self.raw_fiber_open_mo(1, 0)
loop_index = 0
execute_list = False
while loop_index < loop_count:
packet_count = 0
if command_list.tick is not None:
command_list.tick(command_list, loop_index)
for packet in command_list.packet_generator():
if self._terminate_execution:
return False
ready = False
while not ready:
# Wait until ready.
if self._terminate_execution:
return False
self._send_command(
GET_REGISTER, 0x0001 if not execute_list else 0x0000
) # 0x0007
ready = bool(self._usb_connection.status & 0x20)
self._usb_connection.send_list_chunk(packet)
self.raw_set_end_of_list(
0x0001 if not execute_list else 0x0000
) # 0x00019
if not execute_list and packet_count >= 1:
self.raw_execute_list()
execute_list = True
packet_count += 1
if not execute_list:
self.raw_execute_list()
execute_list = True
# when done, SET_END_OF_LIST(0), SET_CONTROL_MODE(1), 7(1)
self.raw_set_control_mode(1, 0)
busy = True
while busy:
# Wait until no longer busy.
if self._terminate_execution:
return False
self._send_command(GET_REGISTER, 0x0001) # 0x0007
busy = bool(self._usb_connection.status & 0x04)
loop_index += 1
self.port_on(bit=0)
# self.raw_set_standby(0x70D0, 0x0014)
if command_list.movement:
self.raw_fiber_open_mo(0, 0)
if callback_finished is not None:
callback_finished()
return True
loop_job = execute
def abort(self):
"""Aborts any job in progress and puts the machine back into an
idle ready condition."""
self._terminate_execution = True
with self._lock:
self.raw_stop_execute()
self.raw_fiber_open_mo(0, 0)
self.raw_reset_list()
self._send_list_chunk(self._abort_list_chunk)
self.raw_set_end_of_list()
self.raw_execute_list()
while self.is_busy():
time.sleep(self.sleep_time)
self.set_xy(0x8000, 0x8000)
def set_footswitch_callback(self, callback_footswitch):
"""Sets the callback function for the footswitch."""
self._footswitch_callback = callback_footswitch
def get_condition(self):
"""Returns the 16-bit condition register value (from whatever
command was run last.)"""
return self._usb_connection.status
def port_toggle(self, bit):
self._write_port ^= 1 << bit
self.raw_write_port(self._write_port)
def port_on(self, bit):
self._write_port |= 1 << bit
self.raw_write_port(self._write_port)
def port_off(self, bit):
self._write_port = ~((~self._write_port) | (1 << bit))
self.raw_write_port(self._write_port)
def get_port(self, bit=None):
if bit is None:
return self._write_port
return (self._write_port >> bit) & 1
def light_on(self):
self.port_on(bit=8) # 0x100
def light_off(self):
self.port_off(bit=8)
def read_port(self):
port = self.raw_read_port()
if port[0] & 0x8000 and self._footswitch_callback:
callback = self._footswitch_callback
self._footswitch_callback = None
callback(port)
return port
def set_xy(self, x, y):
"""Change the galvo position. If the machine is running a job,
this will abort the job."""
self.raw_set_xy_position(x, y)
def get_xy(self):
"""Returns the galvo position."""
return self.raw_get_xy_position()
#############################
# Raw LMC Interface Commands.
#############################
def raw_disable_laser(self):
"""
No parameters.
@return:
"""
return self._send_command(DISABLE_LASER)
def raw_reset(self):
self._send_command(RESET)
def raw_enable_laser(self):
"""
No parameters.
@return:
"""
return self._send_command(ENABLE_LASER)
def raw_execute_list(self):
"""
No parameters.
@return: value response
"""
return self._send_command(EXECUTE_LIST)
def raw_set_pwm_pulse_width(self, s1: int, value: int):
"""
2 Param: Stack, Value
@param s1:
@param value:
@return: value response
"""
return self._send_command(SET_PWM_PULSE_WIDTH, s1, value)
def raw_get_version(self):
"""
No set parameters but 1 is always sent.
@return: value response
"""
return self._send_command(GET_REGISTER, 1)
def raw_get_serial_no(self):
"""
No parameters
Reply is presumably a serial number.
@return: value response
"""
return self._send_command(GET_SERIAL_NUMBER)
def raw_get_list_status(self):
"""
No parameters
@return: value response
"""
return self._send_command(GET_LIST_STATUS)
def raw_get_xy_position(self):
"""
No parameters
The reply to this is the x, y coords and should be parsed.
@return: value response
"""
return self._send_command(GET_XY_POSITION)
def raw_set_xy_position(self, x, y):
"""
Move to X Y location
@param x:
@param y:
@return: value response
"""
return self._send_command(SET_XY_POSITION, int(x), int(y))
def raw_laser_signal_off(self):
"""
No parameters
@return: value response
"""
return self._send_command(LASER_SIGNAL_OFF)
def raw_laser_signal_on(self):
"""
No parameters
@return: value response
"""
return self._send_command(LASER_SIGNAL_ON)
def raw_write_correction_line(self, dx, dy, nonfirst):
"""
3 parameters
Writes a single line of a correction table. 1 entries.
dx, dy, first, 0.
Does not read reply.
@param dx:
@param dy:
@param nonfirst: either 0x0000 for first entry or 0x0100 for non-first.
@return:
"""
self._send_command(WRITE_CORRECTION_LINE, dx, dy, nonfirst, read=False)
def raw_reset_list(self):
"""
No parameters.
@return: value response
"""
return self._send_command(RESET_LIST)
def raw_restart_list(self):
"""
No parameters.
@return: value response
"""
return self._send_command(RESTART_LIST)
def raw_write_correction_table(self, has_table: bool):
"""
1 parameter
If the parameter is true, no table needs to be sent.
@param has_table:
@return: value response
"""
return self._send_command(WRITE_CORRECTION_TABLE, int(has_table))
def raw_set_control_mode(self, s1: int, value: int):
"""
2 parameters.
stack, value
@param s1:
@param value:
@return: value response
"""
return self._send_command(SET_CONTROL_MODE, int(s1), int(value))
def raw_set_delay_mode(self, s1: int, value: int):
"""
2 parameters.
stack, value
@param s1:
@param value:
@return: value response
"""
return self._send_command(SET_DELAY_MODE, int(s1), int(value))
def raw_set_max_poly_delay(self, s1: int, value: int):
"""
2 parameters.
stack, value
@param s1:
@param value:
@return: value response
"""
return self._send_command(SET_MAX_POLY_DELAY, int(s1), int(value))
def raw_set_end_of_list(self, a=0, b=0):
"""
No parameters
@return: value response
"""
# It does so have parameters, in the pcap...
return self._send_command(SET_END_OF_LIST, a, b)
def raw_set_first_pulse_killer(self, s1: int, value: int):
"""
2 parameters.
stack, value
@param s1:
@param value:
@return: value response
"""
return self._send_command(SET_FIRST_PULSE_KILLER, s1, value)
def raw_set_laser_mode(self, s1: int, value: int):
"""
2 parameters.
stack, value
@param s1:
@param value:
@return: value response
"""
return self._send_command(SET_LASER_MODE, s1, value)
def raw_set_timing(self, s1: int, value: int):
"""
2 parameters.
stack, value
@param s1:
@param value:
@return: value response
"""
return self._send_command(SET_TIMING, s1, value)
def raw_set_standby(self, v1: int, v2: int, v3: int, value: int):
"""
4 parameters
variable, variable, variable, value
@param v1:
@param v2:
@param v3:
@param value:
@return: value response
"""
return self._send_command(SET_STANDBY, v1, v2, v3, value)
def raw_set_pwm_half_period(self, s1: int, value: int):
"""
2 parameters
stack, value
@param s1:
@param value:
@return: value response
"""
return self._send_command(SET_PWM_HALF_PERIOD, s1, value)
def raw_stop_execute(self):
"""
No parameters.
@return: value response
"""
return self._send_command(STOP_EXECUTE)
def raw_stop_list(self):
"""
No parameters
@return: value response
"""
return self._send_command(STOP_LIST)
def raw_write_port(self, v1: int = 0, s1: int = 0, value: int = 0):
"""
3 parameters.
variable, stack, value
@param v1:
@param s1:
@param value:
@return: value response
"""
return self._send_command(WRITE_PORT, v1, s1, value)
def raw_write_analog_port_1(self, s1: int, value: int):
"""
2 parameters.
stack, value
@param s1:
@param value:
@return: value response
"""
return self._send_command(WRITE_ANALOG_PORT_1, s1, value)
def raw_write_analog_port_2(self, s1: int, value: int):
"""
3 parameters.
0, stack, value
@param s1:
@param value:
@return: value response
"""
return self._send_command(WRITE_ANALOG_PORT_2, 0, s1, value)
def raw_write_analog_port_x(self, v1: int, s1: int, value: int):
"""
3 parameters.
variable, stack, value
@param v1:
@param s1:
@param value:
@return: value response
"""
return self._send_command(WRITE_ANALOG_PORT_X, v1, s1, value)
def raw_read_port(self):
"""
No parameters
@return: Status Information
"""
return self._send_command(READ_PORT)
def raw_set_axis_motion_param(self, v1: int, s1: int, value: int):
"""
3 parameters.
variable, stack, value
@return: value response
"""
return self._send_command(SET_AXIS_MOTION_PARAM, v1, s1, value)
def raw_set_axis_origin_param(self, v1: int, s1: int, value: int):
"""
3 parameters.
variable, stack, value
@return: value response
"""
return self._send_command(SET_AXIS_ORIGIN_PARAM, v1, s1, value)
def raw_goto_axis_origin(self, v0: int):
"""
1 parameter
variable
@param v0:
@return: value response
"""
return self._send_command(GO_TO_AXIS_ORIGIN, v0)
def raw_move_axis_to(self, axis, coord):
"""
This typically accepted 1 32 bit int and used bits 1:8 and then 16:24 as parameters.
@param axis: axis being moved
@param coord: coordinate being matched
@return: value response
"""
return self._send_command(MOVE_AXIS_TO, axis, coord)
def raw_get_axis_pos(self, s1: int, value: int):
"""
2 parameters
stack, value
@param s1:
@param value:
@return: axis position?
"""
return self._send_command(GET_AXIS_POSITION, s1, value)
def raw_get_fly_wait_count(self, b1: bool):
"""
1 parameter
bool
@param b1:
@return: flywaitcount?
"""
return self._send_command(GET_FLY_WAIT_COUNT, int(b1))
def raw_get_mark_count(self, p1: bool):
"""
1 parameter
bool
@param p1:
@return: markcount?
"""
return self._send_command(GET_MARK_COUNT, int(p1))
def raw_set_fpk_param_2(self, v1, v2, v3, s1):
"""
4 parameters
variable, variable, variable stack
@param v1:
@param v2:
@param v3:
@param s1:
@return: value response
"""
return self._send_command(SET_FPK_2E, v1, v2, v3, s1)
def raw_set_fiber_config(self, p1, p2):
"""
Calls fiber_config_2 with setting parameters
@param p1:
@param p2:
@return:
"""
self.raw_fiber_config_1(0, p1, p2)
def raw_get_fiber_config(self):
"""
Calls fiber_config_1 with getting parameters.
@return:
"""
self.raw_fiber_config_1(1, 0, 0)
def raw_fiber_config_1(self, p1, p2, p3):
"""
Seen to call both a get and set config value.
@param p1:
@param p2:
@param p3:
@return:
"""
return self._send_command(FIBER_CONFIG_1, p1, p2, p3)
def raw_fiber_config_2(self, v1, v2, v3, s1):
return self._send_command(FIBER_CONFIG_2, v1, v2, v3, s1)
def raw_lock_input_port(self, p1):
"""
p1 varies based on call., 1 for get, 2, for enable, 4 for clear
@param p1:
@return:
"""
self._send_command(LOCK_INPUT_PORT, p1)
def raw_clear_lock_input_port(self):
self.raw_lock_input_port(0x04)
def raw_enable_lock_input_port(self):
self.raw_lock_input_port(0x02)
def raw_get_lock_input_port(self):
self.raw_lock_input_port(0x01)
def raw_set_fly_res(self, p1, p2, p3, p4):
"""
On-the-fly settings.
@param p1:
@param p2:
@param p3:
@param p4:
@return:
"""
return self._send_command(SET_FLY_RES, p1, p2, p3, p4)
def raw_fiber_open_mo(self, s1: int, value: int):
"""
2 parameters
stack, value
@param s1:
@param value:
@return: value response
"""
return self._send_command(FIBER_OPEN_MO, s1, value)
def raw_get_st_mo_ap(self):
"""
No parameters
@return: value response
"""
return self._send_command(FIBER_GET_StMO_AP)
def raw_get_user_data(self):
"""
No parameters
@return: user_parameters
"""
return self._send_command(GET_USER_DATA)
def raw_get_fly_pulse_count(self):
"""
@return: fly pulse count
"""
return self._send_command(GET_FLY_PULSE_COUNT)
def raw_get_fly_speed(self, p1, p2, p3, p4):
"""
@param p1:
@param p2:
@param p3:
@param p4:
@return:
"""
self._send_command(GET_FLY_SPEED, p1, p2, p3, p4)
def raw_enable_z(self):
"""
No parameters. Autofocus on/off
@return: value response
"""
return self._send_command(ENABLE_Z)
def raw_enable_z_2(self):
"""
No parameters
Alternate command. Autofocus on/off
@return: value response
"""
return self._send_command(ENABLE_Z_2)
def raw_set_z_data(self, v1, s1, v2):
"""
3 parameters
variable, stack, variable
@param v1:
@param s1:
@param v2:
@return: value response
"""
return self._send_command(SET_Z_DATA, v1, s1, v2)
def raw_set_spi_simmer_current(self, v1, s1):
"""
2 parameters
variable, stack
@param v1:
@param s1:
@return: value response
"""
return self._send_command(SET_SPI_SIMMER_CURRENT, v1, s1)
def raw_is_lite_version(self):
"""
no parameters.
Only called for true.
@return: value response
"""
return self._send_command(IS_LITE_VERSION, 1)
def raw_get_mark_time(self):
"""
Parameter is always set to 3.
@return:
"""
self._send_command(GET_MARK_TIME, 3)
def raw_set_fpk_param(self, v1, v2, v3, s1):
"""
Probably "first pulse killer" = fpk
4 parameters
variable, variable, variable, stack
@param v1:
@param v2:
@param v3:
@param s1:
@return: value response
"""
return self._send_command(SET_FPK_PARAM, v1, v2, v3, s1)
class UsbConnection:
chunk_size = 12 * 256
ep_hodi = 0x01 # endpoint for the "dog," i.e. dongle.
ep_hido = 0x81 # fortunately it turns out that we can ignore it completely.
ep_homi = 0x02 # endpoint for host out, machine in. (query status, send ops)
ep_himo = 0x88 # endpoint for host in, machine out. (receive status reports)
def __init__(self, machine_index=0, debug=None):
self.machine_index = machine_index
self.device = None
self.status = None
self._debug = debug
def open(self):
devices = list(usb.core.find(find_all=True, idVendor=0x9588, idProduct=0x9899))
if len(devices) == 0:
raise BalorMachineException("No compatible engraver machine was found.")
try:
device = list(devices)[self.machine_index]
except IndexError:
# Can't find device
raise BalorMachineException("Invalid machine index %d" % self.machine_index)
# if the permissions are wrong, these will throw usb.core.USBError
device.set_configuration()
try:
device.reset()
except usb.core.USBError:
pass
self.device = device
if self._debug:
self._debug("Connected.")
def close(self):
self.status = None
if self._debug:
self._debug("Disconnected.")
def is_ready(self):
self.send_command(READ_PORT, 0)
return self.status & 0x20
def send_correction_entry(self, correction):
"""Send an individual correction table entry to the machine."""
# This is really a command and should just be issued without reading.
query = bytearray([0x10] + [0] * 11)
query[2 : 2 + 5] = correction
if self.device.write(self.ep_homi, query, 100) != 12:
raise BalorCommunicationException("Failed to write correction entry")
def send_command(self, code, *parameters, read=True):
"""Send a command to the machine and return the response.
Updates the host condition register as a side effect."""
query = bytearray([0] * 12)
query[0] = code & 0x00FF
query[1] = (code >> 8) & 0x00FF
for n, parameter in enumerate(parameters):
query[2 * n + 2] = parameter & 0x00FF
query[2 * n + 3] = (parameter >> 8) & 0x00FF
if self.device.write(self.ep_homi, query, 100) != 12:
raise BalorCommunicationException("Failed to write command")
if self._debug:
self._debug("---> " + str(query))
if read:
response = self.device.read(self.ep_himo, 8, 100)
if len(response) != 8:
raise BalorCommunicationException("Invalid response")
if self._debug:
self._debug("<--- " + str(response))
self.status = response[6] | (response[7] << 8)
return response[2] | (response[3] << 8), response[4] | (response[5] << 8)
else:
return 0, 0
def send_list_chunk(self, data):
"""Send a command list chunk to the machine."""
if len(data) != self.chunk_size:
raise BalorDataValidityException("Invalid chunk size %d" % len(data))
sent = self.device.write(self.ep_homi, data, 100)
if sent != len(data):
raise BalorCommunicationException("Could not send list chunk")
if self._debug:
self._debug("---> " + str(data))
class MockConnection:
def __init__(self, machine_index=0, debug=None):
self.machine_index = machine_index
self._debug = debug
self.device = True
@property
def status(self):
import random
return random.randint(0, 255)
def open(self):
self.device = True
if self._debug:
self._debug("Connected.")
def close(self):
if self._debug:
self._debug("Disconnected.")
def send_correction_entry(self, correction):
"""Send an individual correction table entry to the machine."""
pass
def send_command(self, code, *parameters, read=True):
"""Send a command to the machine and return the response.
Updates the host condition register as a side effect."""
if self._debug:
self._debug("---> " + str(code) + " " + str(parameters))
time.sleep(0.005)
# This should be replaced with a robust connection to the simulation code
# so the fake laser can give sensical responses
if read:
import random
return random.randint(0, 255), random.randint(0, 255)
else:
return 0, 0
def send_list_chunk(self, data):
"""Send a command list chunk to the machine."""
if len(data) != 0xC00:
raise BalorDataValidityException("Invalid chunk size %d" % len(data))
if self._debug:
self._debug("---> " + str(data))
| StarcoderdataPython |
129154 | <reponame>ziransun/wpt<filename>websockets/cookies/support/set-cookie.py
import urllib
def main(request, response):
response.headers.set('Set-Cookie', urllib.unquote(request.url_parts.query))
return [("Content-Type", "text/plain")], ""
| StarcoderdataPython |
3258589 | from genericpath import isdir
import multiprocessing
from os import listdir
from os.path import isfile, join
from typing import Any, Dict, List
from analyzer.src.statistics import Statistics
from analyzer.src.utils import get_analyzer_res_path, get_collector_res_path, load_json_file, remove_keys, save_json_file
from analyzer.src.metrics import Metrics
from analyzer.src.features import Features
from analyzer.src.experiments import Experiment, Experiments
from tqdm import tqdm
class Analyzer:
"""This class contains methods for analyzing the collected metrics on the repositories."""
@staticmethod
def get_repos(repo_count: int, skip_repos: int) -> Dict[str, List[str]]:
"""
Returns a dict mapping the repository paths to a list of result files.
:param repo_count: Number of repositories to get
:param skip_repos: Number of repositories to skip
:return: Dict mapping repository paths to result files
"""
owners: List[str] = listdir(get_collector_res_path())
repos: Dict[str, List[str]] = dict()
for owner in owners:
owner_path = get_collector_res_path(owner=owner)
if isdir(owner_path):
owned_repos: List[str] = listdir(owner_path)
for owned_repo in owned_repos:
repo_path: str = get_collector_res_path(owner=owner, repo=owned_repo)
if isdir(repo_path):
repos[repo_path] = [f for f in listdir(
repo_path) if isfile(join(repo_path, f))]
return {k: repos[k] for k in list(repos)[skip_repos:repo_count + skip_repos]}
@staticmethod
def analyze(
repo_count: int,
skip_repos: int,
analyze_repos: bool,
statistic_tests: bool,
experiment_names: List[str]
) -> None:
"""
Analyzes a given number of repositories.
:param repo_count: Number of repositories to analyze
:param skip_repos: Number of repositories to skip
:param experiments: The experiments to run on the data
"""
if analyze_repos:
Analyzer.analyze_repos(repo_count, skip_repos, experiment_names)
if statistic_tests:
Statistics.analyze_results()
@staticmethod
def analyze_repos(repo_count: int, skip_repos: int, experiment_names: List[str]) -> None:
"""
Collects the raw data for each experiment on the dataset.
:param repo_count: Number of repositories to collect data of
:param skip_repos: Number of repositories to skip
"""
init_experiments = Experiments.initialized(experiment_names)
result_experiments = Experiments.initialized(experiment_names)
repos: Dict[str, List[str]] = Analyzer.get_repos(repo_count, skip_repos)
processes = 2 * multiprocessing.cpu_count() + 1
pool = multiprocessing.Pool(processes=processes)
with tqdm(total=repo_count) as t:
for path, files in repos.items():
repo_result = pool.apply_async(
Analyzer.analyze_repo, args=(init_experiments, path, files))
result_experiments.merge(repo_result.get())
t.update()
pool.close()
pool.join()
result = result_experiments.as_dict()
save_json_file(result, get_analyzer_res_path(), name="results_with_raw_values.json")
filtered_result = remove_keys(result, "values")
save_json_file(filtered_result, get_analyzer_res_path(),
name="results_without_raw_values.json")
@staticmethod
def analyze_repo(experiments: Experiments, path: str, files: List[str]) -> Experiments:
"""
Analyzes a repository.
:param experiments: The experiments to add the results to
:param path: The path of the repository
:param files: The list of result files in the repository
"""
for file in files:
Analyzer.analyze_file(experiments, path, file)
return experiments
@staticmethod
def analyze_file(experiments: Experiments, path: str, name: str) -> None:
"""
Analyzes a single result file.
:param experiments: The experiments to add the results to
:param path: Path to the result file
:param name: Name of the result file
"""
result_file = load_json_file(path, name)
if not result_file:
return
nodes_experiment = experiments.get(Experiment.NODES)
if nodes_experiment:
for node in result_file["node"]:
feature = Features.get_feature_by_token(node["name"])
if feature is None:
continue
new_node = Metrics(node["data"])
nodes_experiment.merge_feature(feature, new_node)
spaces_experiment = experiments.get(Experiment.SPACES)
if spaces_experiment:
for feature in Features.as_list():
for space in result_file["rca"]:
if space["kind"] == "unit":
continue
is_inside = Analyzer.feature_in_space(feature, result_file["finder"], space)
new_space = Metrics(space["data"])
if is_inside:
spaces_experiment.merge_feature(feature, new_space)
else:
spaces_experiment.merge_feature("no_" + feature, new_space)
files_experiment = experiments.get(Experiment.FILES)
if files_experiment:
for space in result_file["rca"]:
if space["kind"] == "unit":
new_space = Metrics(space["data"])
files_experiment.merge_feature("all_features", new_space)
@staticmethod
def feature_in_space(
feature: str,
findings: List[Dict[str, Any]],
space: Dict[str, Any]
) -> bool:
"""
:param feature: Name of the feature to look for
:param findings: List of findings in the file
:param space: Dict of the space to be searched
:return: Whether or not the feature could be found in the given space
"""
for finding in findings:
if Features.get_feature_by_token(finding["name"]) == feature and \
finding["start_line"] >= space["start_line"] and \
finding["end_line"] <= space["end_line"]:
return True
return False
| StarcoderdataPython |
40571 | <filename>launch/full_recording.launch.py
import launch
from launch.substitutions import Command, LaunchConfiguration
from launch_ros.actions import LifecycleNode
from launch.actions import EmitEvent
from launch.actions import RegisterEventHandler
from launch_ros.events.lifecycle import ChangeState
from launch_ros.events.lifecycle import matches_node_name
from launch_ros.event_handlers import OnStateTransition
from launch.actions import LogInfo
from launch.events import matches_action
from launch.event_handlers.on_shutdown import OnShutdown
import launch_ros
import lifecycle_msgs.msg
import os
def generate_launch_description():
pkg_share = launch_ros.substitutions.FindPackageShare(package='ros2_com').find('ros2_com')
default_model_path = os.path.join(pkg_share, 'descriptions/columbus_description.urdf')
use_sim_time = LaunchConfiguration('use_sim_time')
robot_state_publisher_node = launch_ros.actions.Node(
package='robot_state_publisher',
executable='robot_state_publisher',
parameters=[{'robot_description': Command(['xacro ', LaunchConfiguration('model')]),
'use_sim_time': use_sim_time}]
)
joint_state_publisher_node = launch_ros.actions.Node(
package='joint_state_publisher',
executable='joint_state_publisher',
name='joint_state_publisher',
parameters=[{'use_sim_time': use_sim_time}],
)
clock_server = launch_ros.actions.Node(
package='ros2_com',
executable='clock_server',
name='clock_server'
)
map_saver_server = launch_ros.actions.Node(
package='ros2_com',
executable='map_saver',
name='map_saver_server'
)
rosbag_node = launch.actions.ExecuteProcess(
cmd=['ros2', 'bag', 'record', '-a'],
output='screen'
)
robot_localization_node = launch_ros.actions.Node(
package='robot_localization',
executable='ekf_node',
name='ekf_filter_node',
output='screen',
parameters=[os.path.join(pkg_share, 'config/ekf.yaml'),
{'use_sim_time': use_sim_time}]
)
slam_toolbox_node = launch_ros.actions.Node(
package='slam_toolbox',
executable='async_slam_toolbox_node',
name='slam_toolbox',
parameters=[os.path.join(pkg_share, 'config/mapper_params_online_async.yaml'),
{'use_sim_time': use_sim_time}]
)
localization_node = launch_ros.actions.Node(
package='slam_toolbox',
executable='localization_slam_toolbox_node',
name='slam_toolbox',
output='screen',
parameters=[os.path.join(pkg_share, 'config/localization_params.yaml'),
{"use_sim_time" : use_sim_time}],
)
odom_publisher_node = launch_ros.actions.Node(
package='ros2_com',
executable='odom_publisher',
name='odom_publisher',
output='screen',
parameters=[os.path.join(pkg_share, 'config/robot_config.yaml'),
{'use_sim_time': use_sim_time}],
)
ouster_node = LifecycleNode(package='ros2_ouster',
executable='ouster_driver',
name="ouster_driver",
output='screen',
emulate_tty=True,
parameters=[os.path.join(pkg_share, 'config/ouster_config.yaml'), {'use_sim_time': use_sim_time}],
arguments=['--ros-args', '--log-level', 'INFO'],
namespace='/',
)
configure_event = EmitEvent(
event=ChangeState(
lifecycle_node_matcher=matches_action(ouster_node),
transition_id=lifecycle_msgs.msg.Transition.TRANSITION_CONFIGURE,
)
)
activate_event = RegisterEventHandler(
OnStateTransition(
target_lifecycle_node=ouster_node, goal_state='inactive',
entities=[
LogInfo(
msg="[LifecycleLaunch] Ouster driver node is activating."),
EmitEvent(event=ChangeState(
lifecycle_node_matcher=matches_action(ouster_node),
transition_id=lifecycle_msgs.msg.Transition.TRANSITION_ACTIVATE,
)),
],
)
)
# TODO make lifecycle transition to shutdown before SIGINT
shutdown_event = RegisterEventHandler(
OnShutdown(
on_shutdown=[
EmitEvent(event=ChangeState(
lifecycle_node_matcher=matches_node_name(node_name='ouster_driver'),
transition_id=lifecycle_msgs.msg.Transition.TRANSITION_ACTIVE_SHUTDOWN,
)),
LogInfo(
msg="[LifecycleLaunch] Ouster driver node is exiting."),
],
)
)
return launch.LaunchDescription([
launch.actions.DeclareLaunchArgument(name='model', default_value=default_model_path,
description='Absolute path to robot urdf file'),
launch.actions.DeclareLaunchArgument(name='use_sim_time', default_value='false',
description='Flag to enable use_sim_time'),
map_saver_server,
clock_server,
rosbag_node,
robot_state_publisher_node,
slam_toolbox_node,
# localization_node,
odom_publisher_node,
ouster_node,
activate_event,
configure_event,
shutdown_event
])
| StarcoderdataPython |
1786748 | <gh_stars>1-10
from pyvultr.base_api import SupportHttpMethod
from pyvultr.v2 import BareMetalPlanItem, Plan
from tests.v2 import BaseTestV2
class TestPlan(BaseTestV2):
def test_list(self):
"""Test list plan."""
with self._get("response/plans") as mock:
_excepted_result = mock.python_body["plans"][0]
excepted_result = Plan.from_dict(_excepted_result)
_real_result = self.api_v2.plan.list()
real_result: Plan = _real_result.first()
self.assertEqual(mock.url, "https://api.vultr.com/v2/plans")
self.assertEqual(mock.method, SupportHttpMethod.GET.value)
self.assertEqual(real_result, excepted_result)
def test_list_bare_metal(self):
"""Test list bare metal plan."""
with self._get("response/plans_bare_metal") as mock:
_excepted_result = mock.python_body["plans_metal"][0]
excepted_result = BareMetalPlanItem.from_dict(_excepted_result)
_real_result = self.api_v2.plan.list_bare_metal()
real_result: BareMetalPlanItem = _real_result.first()
self.assertEqual(mock.url, "https://api.vultr.com/v2/plans-metal")
self.assertEqual(mock.method, SupportHttpMethod.GET.value)
self.assertEqual(real_result, excepted_result)
| StarcoderdataPython |
180396 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.dokku_utils import subprocess_check_output
import subprocess
import re
DOCUMENTATION = """
---
module: dokku_ps_scale
short_description: Manage process scaling for a given dokku application
options:
app:
description:
- The name of the app
required: True
default: null
aliases: []
scale:
description:
- A map of scale values where proctype => qty
required: True
default: {}
aliases: []
skip_deploy:
description:
- Whether to skip the corresponding deploy or not. If the task is idempotent
then leaving skip_deploy as false will not trigger a deploy.
required: false
default: false
author: <NAME>
requirements: [ ]
"""
EXAMPLES = """
- name: scale web and worker processes
dokku_ps_scale:
app: hello-world
scale:
web: 4
worker: 4
- name: scale web and worker processes without deploy
dokku_ps_scale:
app: hello-world
skip_deploy: true
scale:
web: 4
worker: 4
"""
def dokku_ps_scale(data):
command = "dokku --quiet ps:scale {0}".format(data["app"])
output, error = subprocess_check_output(command)
if error is not None:
return output, error
# strip all spaces from output lines
output = [re.sub(r"\s+", "", line) for line in output]
scale = {}
for line in output:
if ":" not in line:
continue
proctype, qty = line.split(":", 1)
scale[proctype] = int(qty)
return scale, error
def dokku_ps_scale_set(data):
is_error = True
has_changed = False
meta = {"present": False}
proctypes_to_scale = []
existing, error = dokku_ps_scale(data)
for proctype, qty in data["scale"].items():
if qty == existing.get(proctype, None):
continue
proctypes_to_scale.append("{0}={1}".format(proctype, qty))
if len(proctypes_to_scale) == 0:
is_error = False
has_changed = False
return (is_error, has_changed, meta)
command = "dokku ps:scale {0}{1} {2}".format(
"--skip-deploy " if data["skip_deploy"] is True else "",
data["app"],
" ".join(proctypes_to_scale),
)
try:
subprocess.check_call(command, shell=True)
is_error = False
has_changed = True
except subprocess.CalledProcessError as e:
meta["error"] = str(e)
return (is_error, has_changed, meta)
def main():
fields = {
"app": {"required": True, "type": "str"},
"scale": {"required": True, "type": "dict", "no_log": True},
"skip_deploy": {"required": False, "type": "bool"},
}
module = AnsibleModule(argument_spec=fields, supports_check_mode=False)
is_error, has_changed, result = dokku_ps_scale_set(module.params)
if is_error:
module.fail_json(msg=result["error"], meta=result)
module.exit_json(changed=has_changed, meta=result)
if __name__ == "__main__":
main()
| StarcoderdataPython |
4836254 | <filename>data_processing/EMScompare_resources.py
import os
import pandas as pd
import matplotlib.pyplot as plt
import sys
sys.path.append('../')
from load_paths import load_box_paths
import matplotlib as mpl
import matplotlib.dates as mdates
from datetime import date, timedelta, datetime
import seaborn as sns
from processing_helpers import *
from plotting.colors import load_color_palette
mpl.rcParams['pdf.fonttype'] = 42
today = datetime.today()
datapath, projectpath, wdir,exe_dir, git_dir = load_box_paths()
plotdir = os.path.join(projectpath, 'Plots + Graphs', 'Emresource Plots')
def emresource_by_ems() :
df = pd.read_csv(os.path.join(datapath, 'covid_IDPH', 'Corona virus reports', 'emresource_by_hospital.csv'))
cols = ['confirmed_covid_deaths_prev_24h',
'confirmed_covid_icu',
'covid_non_icu']
gdf = df.groupby(['date_of_extract', 'region'])[cols].agg(np.sum).reset_index()
gdf = gdf.sort_values(by=['date_of_extract', 'region'])
gdf.to_csv(os.path.join(datapath, 'covid_IDPH', 'Corona virus reports', 'emresource_by_EMSregion.csv'), index=False)
def plot_emresource(scale='') :
ems_regions = {
'northcentral' : [1, 2],
'northeast' : [7, 8, 9, 10, 11],
'central' : [3, 6],
'southern' : [4, 5]
}
ref_df = pd.read_csv(os.path.join(datapath, 'covid_IDPH', 'Corona virus reports',
'emresource_by_region.csv'))
sxmin = '2020-03-24'
xmin = datetime.strptime(sxmin, '%Y-%m-%d')
xmax = datetime.today()
datetoday = xmax.strftime('%y%m%d')
ref_df['suspected_and_confirmed_covid_icu'] = ref_df['suspected_covid_icu'] + ref_df['confirmed_covid_icu']
ref_df['date'] = pd.to_datetime(ref_df['date_of_extract'])
first_day = datetime.strptime('2020-03-24', '%Y-%m-%d')
ref_df = ref_df.rename(columns={
'confirmed_covid_deaths_prev_24h' : 'deaths',
'confirmed_covid_icu' : 'ICU conf',
'confirmed_covid_on_vents' : 'vents conf',
'suspected_and_confirmed_covid_icu' : 'ICU conf+susp',
'covid_non_icu' : 'non ICU'
})
channels = ['ICU conf+susp', 'ICU conf', 'vents conf', 'deaths', 'non ICU']
ref_df = ref_df[['date', 'covid_region'] + channels]
palette = load_color_palette('wes')
formatter = mdates.DateFormatter("%m-%d")
fig_all = plt.figure(figsize=(10,8))
fig = plt.figure(figsize=(14,10))
fig.subplots_adjust(left=0.07, right=0.97, top=0.95, bottom=0.05, hspace=0.25)
def format_plot(ax) :
ax.set_xlim(xmin, )
ax.xaxis.set_major_formatter(formatter)
ax.xaxis.set_major_locator(mdates.MonthLocator())
if scale == 'log' :
ax.set_yscale('log')
for ri, (restore_region, ems_list) in enumerate(ems_regions.items()) :
ax_all = fig_all.add_subplot(2,2,ri+1)
ax = fig.add_subplot(4,6,6*ri+1)
pdf = ref_df[ref_df['covid_region'].isin(ems_list)].groupby('date').agg(np.sum).reset_index()
for (c,name) in enumerate(channels):
if name == 'non ICU' :
df = pdf[pdf['date'] >= date(2020,5,6)]
else :
df = pdf
df['moving_ave'] = df[name].rolling(window = 7, center=True).mean()
ax_all.plot(df['date'].values, df['moving_ave'], color=palette[c], label=name)
ax_all.scatter(df['date'].values, df[name], s=10, linewidth=0, color=palette[c], alpha=0.3, label='')
ax.plot(df['date'].values, df['moving_ave'], color=palette[c], label=name)
ax.scatter(df['date'].values, df[name], s=10, linewidth=0, color=palette[c], alpha=0.3, label='')
ax_all.set_title(restore_region)
format_plot(ax_all)
if ri == 1 :
ax_all.legend()
format_plot(ax)
ax.set_ylabel(restore_region)
ax.set_title('total')
for ei, ems in enumerate(ems_list) :
ax = fig.add_subplot(4,6,6*ri+1+ei+1)
df = ref_df[ref_df['covid_region'] == ems]
for (c,name) in enumerate(channels):
df['moving_ave'] = df[name].rolling(window=7, center=True).mean()
ax.plot(df['date'].values, df['moving_ave'], color=palette[c], label=name)
ax.scatter(df['date'].values, df[name], s=10, linewidth=0, color=palette[c], alpha=0.3, label='')
ax.set_title('covid region %d' % ems)
format_plot(ax)
if ems == 2 :
ax.legend(bbox_to_anchor=(1.5, 1))
fig_all.savefig(os.path.join(plotdir, 'EMResource_by_restore_region_%s.png' % scale))
fig_all.savefig(os.path.join(plotdir, 'EMResource_by_restore_region_%s.pdf' % scale), format='PDF')
fig.savefig(os.path.join(plotdir, 'EMResource_by_covid_region_%s.png' % scale))
fig.savefig(os.path.join(plotdir, 'EMResource_by_covid_region_%s.pdf' % scale), format='PDF')
if __name__ == '__main__' :
plot_emresource('nolog')
plot_emresource('log')
emresource_by_ems()
# plt.show()
| StarcoderdataPython |
3307349 | import sys, requests
from datetime import datetime,timedelta
import pandas as pd
loc = ["47.964718", "7.955852"]
d_from_date = datetime.strptime('2017-10-01' , '%Y-%m-%d')
d_to_date = datetime.strptime('2018-08-10' , '%Y-%m-%d')
delta = d_to_date - d_from_date
latitude = loc[0]
longitude = loc[1]
with open('/home/kh41/02_SolarAnlage/api', 'r') as file:
for line in file:
api_key=line.rstrip()
option_list = "exclude=currently,minutely,hourly,alerts&units=si"
df = pd.DataFrame()
print("\nLocation: home")
for i in range(delta.days+1):
my_df = pd.DataFrame([0])
new_date = (d_from_date + timedelta(days=i)).strftime('%Y-%m-%d')
search_date = new_date+"T00:00:00"
response = requests.get("https://api.darksky.net/forecast/"+api_key+"/"+latitude+","+longitude+","+search_date+"?"+option_list)
json_res = response.json()
date_id = (d_from_date + timedelta(days=i)).strftime('%Y-%m-%d %A')
my_df['date'] = date_id
print("\n"+date_id)
unit_type = ' degC'
temp_min = str(json_res['daily']['data'][0]['apparentTemperatureMin'])
temp_max = str(json_res['daily']['data'][0]['apparentTemperatureMax'])
# print("Min temperature: "+temp_min+unit_type)
# print("Max temperature: "+temp_max+unit_type)
# print("Summary: " + json_res['daily']['data'][0]['summary'])
precip_type = None
precip_prob = None
try:
precip_type = json_res['daily']['data'][0]['precipType']
except:
precip_type = -1
try:
cloud_cover = json_res['daily']['data'][0]['cloudCover']
except:
cloud_cover = -1
try:
humidity = json_res['daily']['data'][0]['humidity']
except:
humidity = -1
# visibility = json_res['daily']['data'][0]['visibility']
sunrise = json_res['daily']['data'][0]['sunriseTime']
sunset = json_res['daily']['data'][0]['sunsetTime']
try:
precip_prob = json_res['daily']['data'][0]['precipProbability']
except:
precip_prob = -1
# print("Precip type: {}".format(precip_type))
# print("cloudCover: {}".format(cloud_cover))
# print("humidity: {}".format(humidity))
# print("sunrise: {}".format(datetime.fromtimestamp(sunrise).strftime('%Y-%m-%d %H:%M:%S')))
# print("sunset: {}".format(datetime.fromtimestamp(sunset).strftime('%Y-%m-%d %H:%M:%S')))
# except:
# precip_type = -1
# cloud_cover = -1
# humidity = -1
# sunrise = -1
# sunset = -1
# precip_prob = -1
my_df['precip_type'] = precip_type
my_df['precip_prob'] = precip_prob
my_df['cloud_cover'] = cloud_cover
my_df['humidity'] = humidity
my_df['sunrise'] = datetime.fromtimestamp(sunrise).strftime('%Y-%m-%d %H:%M:%S')
my_df['sunset'] = datetime.fromtimestamp(sunset).strftime('%Y-%m-%d %H:%M:%S')
my_df['longitude'] = longitude
my_df['latitude'] = latitude
# print(my_df.head())
df = df.append(my_df)
df = df.reset_index(drop=True)
df = df.drop(columns=['0'])
# calculate sun uptime in seconds
def getSunTime(sunrise, sunset):
sunset = datetime.strptime(sunset, '%Y-%m-%d %H:%M:%S')
sunrise = datetime.strptime(sunrise, '%Y-%m-%d %H:%M:%S')
# print(sunset, sunrise)
sun_time = (sunset - sunrise).seconds
# print(sun_time)
# sys.exit()
return sun_time
df['sun_time'] = df.apply(lambda x: getSunTime(x['sunrise'], x['sunset']), axis=1)
df.to_csv('/home/kh41/02_SolarAnlage/weather.csv')
| StarcoderdataPython |
3323678 | <filename>python-ml-book/ch02/perceptron.py
"""
Implementation of perceptron algorithm from Chapter 2 of "Python Machine Learning"
"""
import numpy as np
def train_perceptron(observations, labels, learning_rate=0.1, max_training_iterations=10):
"""
Trains a (binary) perceptron, returning a function that can predict / classify
given a new observations, as well as insight into how the training progressed via
a log of the weights and number of errors for each iteration.
:param observations: array of rows
:param labels: correct label classification for each row: [1, -1, 1, 1, ...]
:param learning_rate: how fast to update weights
:param max_training_iterations: max number of times to iterate through observations
:return: (prediction_fn, weights_log, errors_log)
"""
the_weights = np.zeros(1 + observations.shape[1])
weights_log = []
errors_log = []
def net_input(observation, weights):
return np.dot(observation, weights[1:]) + weights[0]
def predict(observation, weights=the_weights):
return np.where(net_input(observation, weights) >= 0.0, 1, -1)
for _ in range(max_training_iterations):
errors = 0
weights_log.append(np.copy(the_weights))
for observation, correct_output in zip(observations, labels):
weight_delta = learning_rate * (correct_output - predict(observation))
the_weights[1:] += weight_delta * observation
the_weights[0] += weight_delta
errors += int(weight_delta != 0.0)
errors_log.append(errors)
if errors == 0:
break
return predict, weights_log, errors_log
| StarcoderdataPython |
3281956 | #!/usr/bin/python -u
# -*- coding: latin-1 -*-
#
# A programming puzzle from Einav in Z3
#
# From
# 'A programming puzzle from Einav'
# http://gcanyon.wordpress.com/2009/10/28/a-programming-puzzle-from-einav/
# '''
# My friend Einav gave me this programming puzzle to work on. Given
# this array of positive and negative numbers:
# 33 30 -10 -6 18 7 -11 -23 6
# ...
# -25 4 16 30 33 -23 -4 4 -23
#
# You can flip the sign of entire rows and columns, as many of them
# as you like. The goal is to make all the rows and columns sum to positive
# numbers (or zero), and then to find the solution (there are more than one)
# that has the smallest overall sum. So for example, for this array:
# 33 30 -10
# -16 19 9
# -17 -12 -14
# You could flip the sign for the bottom row to get this array:
# 33 30 -10
# -16 19 9
# 17 12 14
# Now all the rows and columns have positive sums, and the overall total is
# 108.
# But you could instead flip the second and third columns, and the second
# row, to get this array:
# 33 -30 10
# 16 19 9
# -17 12 14
# All the rows and columns still total positive, and the overall sum is just
# 66. So this solution is better (I don't know if it's the best)
# A pure brute force solution would have to try over 30 billion solutions.
# I wrote code to solve this in J. I'll post that separately.
# '''
#
# This Z3 model was written by <NAME> (<EMAIL>)
# See also my Z3 page: http://hakank.org/z3/
#
from __future__ import print_function
from z3_utils_hakank import *
def main():
# sol = Solver()
sol = SolverFor("QF_NIA")
#
# data
#
# small problem
# rows = 3;
# cols = 3;
# data = [
# [ 33, 30, -10],
# [-16, 19, 9],
# [-17, -12, -14]
# ]
# Full problem
rows = 27
cols = 9
data = [
[33, 30, 10, -6, 18, -7, -11, 23, -6],
[16, -19, 9, -26, -8, -19, -8, -21, -14],
[17, 12, -14, 31, -30, 13, -13, 19, 16],
[-6, -11, 1, 17, -12, -4, -7, 14, -21],
[18, -31, 34, -22, 17, -19, 20, 24, 6],
[33, -18, 17, -15, 31, -5, 3, 27, -3],
[-18, -20, -18, 31, 6, 4, -2, -12, 24],
[27, 14, 4, -29, -3, 5, -29, 8, -12],
[-15, -7, -23, 23, -9, -8, 6, 8, -12],
[33, -23, -19, -4, -8, -7, 11, -12, 31],
[-20, 19, -15, -30, 11, 32, 7, 14, -5],
[-23, 18, -32, -2, -31, -7, 8, 24, 16],
[32, -4, -10, -14, -6, -1, 0, 23, 23],
[25, 0, -23, 22, 12, 28, -27, 15, 4],
[-30, -13, -16, -3, -3, -32, -3, 27, -31],
[22, 1, 26, 4, -2, -13, 26, 17, 14],
[-9, -18, 3, -20, -27, -32, -11, 27, 13],
[-17, 33, -7, 19, -32, 13, -31, -2, -24],
[-31, 27, -31, -29, 15, 2, 29, -15, 33],
[-18, -23, 15, 28, 0, 30, -4, 12, -32],
[-3, 34, 27, -25, -18, 26, 1, 34, 26],
[-21, -31, -10, -13, -30, -17, -12, -26, 31],
[23, -31, -19, 21, -17, -10, 2, -23, 23],
[-3, 6, 0, -3, -32, 0, -10, -25, 14],
[-19, 9, 14, -27, 20, 15, -5, -27, 18],
[11, -6, 24, 7, -17, 26, 20, -31, -25],
[-25, 4, -16, 30, 33, 23, -4, -4, 23]
]
#
# variables
#
x = {}
for i in range(rows):
for j in range(cols):
x[i, j] = makeIntVar(sol, 'x[%i,%i]' % (i, j), -100, 100)
x_flat = [x[i, j] for i in range(rows) for j in range(cols)]
row_sums = [makeIntVar(sol, 'row_sums(%i)' % i, 0, 300)
for i in range(rows)]
col_sums = [makeIntVar(sol, 'col_sums(%i)' % j, 0, 300)
for j in range(cols)]
row_signs = [makeIntVarVals(sol, 'row_signs(%i)' % i,[-1, 1])
for i in range(rows)]
col_signs = [makeIntVarVals(sol, 'col_signs(%i)' % j, [-1, 1])
for j in range(cols)]
# total sum: to be minimized
total_sum = makeIntVar(sol, 'total_sum', 0, 1000)
#
# constraints
#
for i in range(rows):
for j in range(cols):
sol.add(x[i, j] == data[i][j] * row_signs[i] * col_signs[j])
sol.add(total_sum == Sum([data[i][j] * row_signs[i] * col_signs[j]
for i in range(rows) for j in range(cols)]))
# row sums
for i in range(rows):
sol.add(row_sums[i] == Sum([row_signs[i] * col_signs[j] * data[i][j]
for j in range(cols)]))
# column sums
for j in range(cols):
sol.add(col_sums[j] == Sum([row_signs[i] * col_signs[j] * data[i][j]
for i in range(rows)]))
# objective
# sol.minimize(total_sum)
num_solutions = 0
while sol.check() == sat:
num_solutions += 1
mod = sol.model()
print('total_sum:', mod.eval(total_sum))
print('row_sums:', [mod.eval(row_sums[i]) for i in range(rows)])
print('col_sums:', [mod.eval(col_sums[j]) for j in range(cols)])
print('row_signs:', [mod.eval(row_signs[i]) for i in range(rows)])
print('col_signs:', [mod.eval(col_signs[j]) for j in range(cols)])
print('x:')
for i in range(rows):
for j in range(cols):
print('%3i' % mod.eval(x[i, j]).as_long(), end=' ')
print()
print()
getLessSolution(sol,mod,total_sum)
print()
print('num_solutions:', num_solutions)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3368806 | import logging
from viadot.tasks import AzureSQLCreateTable, AzureSQLDBQuery
logger = logging.getLogger(__name__)
SCHEMA = "sandbox"
TABLE = "test"
def test_azure_sql_create_table():
create_table_task = AzureSQLCreateTable()
create_table_task.run(
schema=SCHEMA,
table=TABLE,
dtypes={"id": "INT", "name": "VARCHAR(25)"},
if_exists="replace",
)
def test_azure_sql_run_sqldb_query_empty_result():
sql_query_task = AzureSQLDBQuery()
list_table_info_query = f"""
SELECT *
FROM sys.tables t
JOIN sys.schemas s
ON t.schema_id = s.schema_id
WHERE s.name = '{SCHEMA}' AND t.name = '{TABLE}'
"""
exists = bool(sql_query_task.run(list_table_info_query))
assert exists
result = sql_query_task.run(f"SELECT * FROM {SCHEMA}.{TABLE}")
assert result == []
def test_azure_sql_run_insert_query():
sql_query_task = AzureSQLDBQuery()
sql_query_task.run(f"INSERT INTO {SCHEMA}.{TABLE} VALUES (1, 'Mike')")
result = list(sql_query_task.run(f"SELECT * FROM {SCHEMA}.{TABLE}")[0])
assert result == [1, "Mike"]
def test_azure_sql_run_drop_query():
sql_query_task = AzureSQLDBQuery()
result = sql_query_task.run(f"DROP TABLE {SCHEMA}.{TABLE}")
assert result is True
list_table_info_query = f"""
SELECT *
FROM sys.tables t
JOIN sys.schemas s
ON t.schema_id = s.schema_id
WHERE s.name = '{SCHEMA}' AND t.name = '{TABLE}'
"""
exists = bool(sql_query_task.run(list_table_info_query))
assert not exists
| StarcoderdataPython |
1621067 | <gh_stars>1-10
import numpy as np
import xarray as xr
import cmocean
import cartopy
import cartopy.crs as ccrs
import matplotlib as mpl
import matplotlib.ticker as mticker
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from paths import path_results
from regions import boolean_mask, SST_index_bounds
from plotting import discrete_cmap
plt.rcParams['figure.constrained_layout.use'] = False
def map_robinson(xa, domain, cmap, minv, maxv, label, filename=None, text1=None, text2=None, rects=None, sig=None, clon=0):
fig, ax = make_map(xa=xa, domain=domain, proj='rob', cmap=cmap, minv=minv, maxv=maxv, label=label,
filename=filename, text1=text1, text2=text2, rects=rects, sig=sig, clon=clon)
return fig, ax
def map_eq_earth(xa, domain, cmap, minv, maxv, label, filename=None, text1=None, text2=None, rects=None, sig=None, clon=0):
fig, ax = make_map(xa=xa, domain=domain, proj='ee', cmap=cmap, minv=minv, maxv=maxv, label=label,
filename=filename, text1=text1, text2=text2, rects=rects, sig=sig, clon=clon)
return fig, ax
def make_map(xa, domain, proj, cmap, minv, maxv, label, filename=None, text1=None, text2=None, rects=None, sig=None, clon=0):
""" global map (Robinson or Equal Earth projection) of xa
optional: significance shading, polygons, text, central longitude, file output
"""
assert type(xa)==xr.core.dataarray.DataArray
assert domain in ['atm', 'ocn_T', 'ocn_U', 'ocn_rect', 'ocn_low', 'ocn_had']
assert proj in ['ee', 'rob', 'aa']
if proj=='aa':
fig = plt.figure(figsize=(4,6.5), constrained_layout=True)
ax = plt.axes(projection=ccrs.LambertAzimuthalEqualArea(central_longitude=-30, central_latitude=20))
ax.set_extent((-6e6, 3.5e6, -8.5e6, 1e7), crs=ccrs.LambertAzimuthalEqualArea())
cax, kw = mpl.colorbar.make_axes(ax,location='bottom',pad=0.01,shrink=0.9)
else:
fig = plt.figure(figsize=(8,5))
if proj=='ee':
ax = fig.add_subplot(1, 1, 1,
projection=ccrs.EqualEarth(central_longitude=clon))
elif proj=='rob':
ax = fig.add_subplot(1, 1, 1,
projection=ccrs.Robinson(central_longitude=clon))
ax.set_position([.02,.05,.96,.93])
cax, kw = mpl.colorbar.make_axes(ax,location='bottom',pad=0.03,shrink=0.8)
if domain in ['atm']:#, 'ocn_low']:
lats = xa.lat
lons = xa.lon
elif domain in ['ocn_T', 'ocn_low']:
lats = xa.TLAT
lons = xa.TLONG
elif domain=='ocn_U':
lats = xa.ULAT
lons = xa.ULONG
elif domain=='ocn_rect':
lats = xa.t_lat
lons = xa.t_lon
elif domain=='ocn_had':
lats = xa.latitude
lons = xa.longitude
lons, lats = np.meshgrid(lons, lats)
im = ax.pcolormesh(lons, lats, xa.values,
cmap=cmap, vmin=minv, vmax=maxv,
transform=ccrs.PlateCarree(),
)
# significance outline
if type(sig)==xr.core.dataarray.DataArray:
if domain=='ocn_had': flons, flats = lons.flatten(), lats.flatten()
else: flons, flats = lons.values.flatten(), lats.values.flatten()
ax.tricontour(flons, flats, sig.values.flatten(), levels=[.5],
linestyles='dashed', linewidths=1.5, #cmap='gray',
transform=ccrs.PlateCarree(),
)
# coastlines/land
if domain=='atm':
ax.coastlines()
elif domain in ['ocn_T', 'ocn_U', 'ocn_had', 'ocn_low']:
ax.add_feature(cartopy.feature.LAND,
zorder=2, edgecolor='black', facecolor='w')
# text
if text1!=None:
ax.text(0, 1, text1, ha='left' , va='top',
transform=ax.transAxes, fontsize=16)
if text2!=None:
ax.text(1, 1, text2, ha='right', va='top',
transform=ax.transAxes, fontsize=16)
# SST index polygons
if rects!=None:
if type(rects)==np.ndarray: rects = [rects]
assert type(rects)==list
for rect in rects:
assert type(rect)==np.ndarray
ax.add_patch(mpatches.Polygon(xy=rect,
facecolor='none', edgecolor='k',
linewidth=2, zorder=2,
transform=ccrs.PlateCarree(), ), )
# grid
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=False)
gl.ylocator = mticker.FixedLocator([-90, -60, -30, 0, 30, 60, 90])
gl.xlocator = mticker.FixedLocator([-180, -120, -60, 0, 60, 120, 180])
# colorbar
ax.add_feature(cartopy.feature.LAND,
zorder=2, edgecolor='black', facecolor='w')
plt.colorbar(im, cax=cax, label=label, orientation='horizontal')
# output
if filename!=None: plt.savefig(filename, dpi=100)
return fig, ax
def map_ocn_robinson(xr_DataArray, cmap, minv, maxv, label, filename=None, grid='T'):
fig = plt.figure(figsize=(8,5))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.Robinson())
ax.set_position([.02,.05,.96,.93])
cax, kw = mpl.colorbar.make_axes(ax,location='bottom',pad=0.03,shrink=0.8)
cbar = fig.colorbar(im, cax=cax, extend='both', **kw)
cbar.ax.tick_params(labelsize=14)
label = cbar.set_label(label, size=16)
if filename!=None: plt.savefig(filename)
return fig
def rect_polygon(extent):
assert type(extent)==tuple
(lonmin,lonmax,latmin,latmax) = extent
n=50
xs = [np.linspace(lonmin,lonmax,n), np.linspace(lonmax,lonmax,n),
np.linspace(lonmax,lonmin,n), np.linspace(lonmin,lonmin,n)]
ys = [np.linspace(latmin,latmin,n), np.linspace(latmin,latmax,n),
np.linspace(latmax,latmax,n), np.linspace(latmax,latmin,n)]
xs = [item for sublist in xs for item in sublist]
ys = [item for sublist in ys for item in sublist]
poly_coords = np.swapaxes(np.array([xs, ys]),0,1)
return poly_coords
def regr_map(ds, index, run, fn=None):
""" map of regression slope with 95% significance countours and SST index polygons """
if run in ['ctrl', 'rcp']: domain = 'ocn'
elif run in ['lpd', 'lpi']: domain = 'ocn_low'
elif run=='had': domain = 'ocn_had'
MASK = boolean_mask(domain=domain, mask_nr=0)
xa = ds.slope.where(MASK)
if domain in ['ocn', 'ocn_low']:
xa = xa.assign_coords(TLONG=ds.TLONG)
if index in ['AMO', 'SOM']:
rects = rect_polygon(SST_index_bounds(index))
clon = 300
nv = .4
elif index in ['PDO', 'IPO']:
rects = rect_polygon(SST_index_bounds(index))
clon = 200
nv = .4
elif index=='TPI':
rects = [rect_polygon(SST_index_bounds('TPI1')),
rect_polygon(SST_index_bounds('TPI2')),
rect_polygon(SST_index_bounds('TPI3')),
]
clon = 200
nv = .3
# choose two-tailed 95% significance level
# as boolean map
sig = ds.pval#.where(MASK)
# tail1 = np.where(sig<0.025, 1, 0)
tail1 = np.where(sig<0.005, 1, 0)
# tail2 = np.where(sig>0.975, 1, 0)
tail2 = np.where(sig>99.5, 1, 0)
sig.values = tail1 + tail2
# if run in ['ctrl', 'rcp', 'had']: sig = sig.where(MASK)
proj = 'rob'
cm = discrete_cmap(16, cmocean.cm.balance)
label ='regression slope [K/K]'
text1 = f'SST({index})\nregr.'
if run=='had':
text2 = f'{run.upper()}\n{ds.first_year+1870}-\n{ds.last_year+1870}'
elif run in ['ctrl', 'lpd']:
text2 = f'{run.upper()}\n{ds.first_year}-\n{ds.last_year}'
if run in ['ctrl', 'rcp']:
domain = 'ocn_T'
f, ax = make_map(xa=xa, domain=domain, proj=proj, cmap=cm, minv=-nv, maxv=nv,
label=label, filename=fn, text1=text1, text2=text2,
rects=rects, sig=sig, clon=clon)
def add_cyclic_POP(da):
""" add a cyclis point to a 2D POP output field to remove missing data line in cartopy """
assert 'TLAT' in da.coords
assert 'TLONG' in da.coords
lats = np.zeros((len(da.nlat),len(da.nlon)+1))
lons = np.zeros((len(da.nlat),len(da.nlon)+1))
data = np.zeros((len(da.nlat),len(da.nlon)+1))
lats[:,:-1] = da.TLAT
lons[:,:-1] = da.TLONG
data[:,:-1] = da.data
lats[:,-1] = da.TLAT[:,0]
lons[:,-1] = da.TLONG[:,0]
data[:,-1] = da.data[:,0]
dims = ['nlat','nlon']
new_da = xr.DataArray(data=data, dims=dims, coords={'TLAT':(dims,lats), 'TLONG':(dims,lons)})
return new_da
def add_cyclic_rectangular(da):
""" add a cyclis point to a rectangular lat-lon field to remove missing data line in cartopy """
assert 'lat' in da.coords
assert 'lon' in da.coords
lons = np.zeros((len(da.lon)+1))
data = np.zeros((len(da.lat),len(da.lon)+1))
lons[:-1] = da.lon
data[:,:-1] = da.data
lons[-1] = da.lon[0]
data[:,-1] = da.data[:,0]
dims = ['lat','lon']
new_da = xr.DataArray(data=data, dims=dims, coords={'lat':da.lat, 'lon':lons})
return new_da | StarcoderdataPython |
1625248 | <reponame>patpio/drf_images_api<gh_stars>1-10
from django.db import models
class ExpiringLink(models.Model):
url = models.URLField()
token = models.UUIDField()
created_at = models.DateTimeField(auto_now_add=True)
duration = models.IntegerField()
def __str__(self):
return f'{self.url}'
| StarcoderdataPython |
3203476 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
import torch
def assert_values_are_close(input, other, rtol=1e-05, atol=1e-06):
are_close = torch.allclose(input, other, rtol=rtol, atol=atol)
if not are_close:
abs_diff = torch.abs(input - other)
abs_other = torch.abs(other)
max_atol = torch.max((abs_diff - rtol * abs_other))
max_rtol = torch.max((abs_diff - atol) / abs_other)
err_msg = "The maximum atol is {}, maximum rtol is {}".format(max_atol, max_rtol)
assert False, err_msg
| StarcoderdataPython |
112581 | <reponame>nybrandnewschool/review4d
import contextlib
__all__ = [
'suppress_messages',
'messages_suppressed',
]
@contextlib.contextmanager
def suppress_messages(ui):
previous_value = getattr(ui, 'messages_suppressed', False)
try:
ui.messages_suppressed = True
yield
finally:
ui.messages_suppressed = previous_value
def messages_suppressed(ui):
return getattr(ui, 'messages_suppressed', False)
| StarcoderdataPython |
65780 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright (c) 2019, Linear Labs Technologies
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import torch
import tqdm
import math
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import torchvision.models as models
import yajl as json
from tensorboardX import SummaryWriter
import random
from PIL import Image
import torch
import torch.nn as nn
import torchvision.models as models
from torch.nn.utils.rnn import pack_padded_sequence
from ..Common.utils import *
from ..Common.Compiler import Compile,cfg2nets
from ..DataSet.TestSet import TestSet
from BLOX.Modules.ReplayMemory import ReplayMemory
from collections import namedtuple
from itertools import count
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
Transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward'))
device = 'cuda' if torch.cuda.is_available() else 'cpu'
from BLOX.Common.Strings import TITLE as TEXT
from BLOX.Core.Recordable import METRICS,ADDITIONAL_METRICS,Vizzy
class Tester:
def __init__(self,args):
try: self.config = json.load(open(args.config,'r'))
except:
if isinstance(args,dict): self.config = args
elif isinstance(args,str): self.config = json.load(open(args,'r'))
else:raise ValueError('Incorrect data type passed to Trainer class')
if self.config['Verbose']:print(TEXT+('\n'*8))
def run(self):
config = self.config
torch.cuda.empty_cache()
model = cfg2nets(config)
# data = DataLoader(TestSet(config['DataSet']), batch_size=config['BatchSize'] if 'BatchSize' in config else 1,shuffle=True )
data = TestSet(config['DataSet'])
writer = SummaryWriter(config['TensorboardX']['Dir'] if 'Dir' in config['TensorboardX'] else 'runs')
tlosses = np.zeros(config['Epochs'])
dlosses = np.zeros(config['Epochs'])
evaluator = create_supervised_evaluator(model,
device=device)
for m in config['TensorboardX']['Log']:
if m not in METRICS or m == 'Loss':continue
mtrc = METRICS[m]()
mtrc.attach(evaluator,m)
pbar = tqdm.tqdm(
initial=0, leave=False, total=len(data),
)
add_metrics = {}
@evaluator.on(Events.ITERATION_COMPLETED)
def log_training_loss(engine):
i = engine.state.iteration
if (i%(config['TensorboardX']['LogEvery']))==0 and config['TensorboardX']['LogEvery'] > 0 and writer:
for m in engine.state.metrics.keys():
if m in METRICS:writer.add_scalar(m, engine.state.metrics[m], engine.state.iteration)
try:
for m in config['TensorboardX']['Log']:
if m in ADDITIONAL_METRICS:
if m not in add_metrics:
add_metrics[m] = {
'y_h':[],
'y':[]
}
add_metrics[m]['y'].append( engine.state.output[1].view(-1).numpy() )
add_metrics[m]['y_h'].append( engine.state.output[0].view(-1).data.numpy() )
except:pass
pbar.update(config['TensorboardX']['LogEvery'])
try:
evaluator.run(data, max_epochs=1)
except:pass
pbar.close()
try:
for m in config['TensorboardX']['Log']:
if m in ADDITIONAL_METRICS:
getattr(Vizzy,m)(Vizzy,ADDITIONAL_METRICS[m]( add_metrics[m]['y_h'],add_metrics[m]['y'] ))
except Exception as e:pass
| StarcoderdataPython |
1778605 | <reponame>erpnext/foundation
# -*- coding: utf-8 -*-
# Copyright (c) 2017, EOSSF and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from frappe.website.website_generator import WebsiteGenerator
class FrappeApp(WebsiteGenerator):
def validate(self):
if not self.route:
self.route = 'apps/' + self.scrub(self.app_name)
def get_context(self, context):
context.parents = [dict(label='All Apps', route='apps', title='All Apps')]
def get_list_context(context):
context.no_breadcrumbs = True
context.title = 'Apps / Extensions for ERPNext' | StarcoderdataPython |
3215618 | from rest_framework.permissions import BasePermission
class IsSelfUser(BasePermission):
def has_object_permission(self, request, view, obj) -> bool:
return request.user == obj
class IsAdminOrSelfUser(IsSelfUser):
def has_object_permission(self, request, view, obj) -> bool:
is_self_user: bool = super().has_object_permission(request, view, obj)
return is_self_user or request.user.is_staff
| StarcoderdataPython |
174566 | <gh_stars>0
TAIGA_USER = '<EMAIL>'
TAIGA_PASSWORD = '<PASSWORD>'
PROJECT_SLUG = 'test_taiga_user-fake-project-1'
DONE_SLUG = 'Done'
| StarcoderdataPython |
188904 | from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework import viewsets, mixins, status
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import TokenAuthentication
from core.models import Tag, Ingredient, Recipe
from .serializers import TagSerializer,\
IngredientSerializer,\
RecipeSerializer,\
RecipeDetailSerializer,\
ImageUploadSerializer
class BaseRecipeAttrViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin):
"""
Base ViewSet for Tags and Ingredients as they contain much common funcs
"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_queryset(self):
"""
Returns objects for current user only
bool() gives False only when object is None type.
"""
assigned_only = bool(self.request.query_params.get('assigned_only'))
queryset = self.queryset
if assigned_only:
queryset = queryset.filter(recipe__isnull=False)
return queryset.filter(user=self.request.user).order_by('-name')
def perform_create(self, serializer):
"""
Allocates the current user as user when creating objects
"""
return serializer.save(user=self.request.user)
class TagViewSet(BaseRecipeAttrViewSet):
"""
ViewSet for the tags
"""
serializer_class = TagSerializer
queryset = Tag.objects.all()
class IngredientApiViewSet(BaseRecipeAttrViewSet):
"""
ViewSet for Ingredients
"""
serializer_class = IngredientSerializer
queryset = Ingredient.objects.all()
class RecipeViewSet(viewsets.ModelViewSet):
"""
ViewSet for the Recipe api
"""
serializer_class = RecipeSerializer
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
queryset = Recipe.objects.all()
def _param_to_ints(self, querystring):
"""
Converts string parameters list to integer list
"""
return [int(str_id) for str_id in querystring.split(',')]
def get_queryset(self):
queryset = self.queryset
tags = self.request.query_params.get('tags')
ingredients = self.request.query_params.get('ingredients')
if tags:
tag_ids = self._param_to_ints(tags)
queryset = queryset.filter(tags__id__in=tag_ids)
if ingredients:
ing_ids = self._param_to_ints(ingredients)
queryset = queryset.filter(ingredients__id__in=ing_ids)
return queryset.filter(user=self.request.user)
def get_serializer_class(self):
"""
selects RecipeDetailSerializer as serializer_class when getting details
of the recipe otherwise returns the parent variable
"""
if self.action == 'retrieve':
return RecipeDetailSerializer
elif self.action == 'upload_image':
return ImageUploadSerializer
return self.serializer_class
def perform_create(self, serializer):
"""
assigns the request's user as the user of the recipe when created
"""
serializer.save(user=self.request.user)
@action(methods=['POST'], detail=True, url_path='upload-image')
def upload_image(self, request, pk=None):
"""
Uploads an image to the recipe app
"""
recipe = self.get_object()
serializer = self.get_serializer(
recipe,
data=request.data
)
if serializer.is_valid():
serializer.save()
return Response(
serializer.data,
status=status.HTTP_200_OK
)
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
| StarcoderdataPython |
1776615 | <reponame>ChrisLR/BasicDungeonRL
from bflib import units
from bflib.characters import classes
from bflib.spells import listing
from bflib.spells.base import Spell
from bflib.spells.duration import SpellDuration
from bflib.spells.range import SpellRange
@listing.register_spell
class BladeBarrier(Spell):
name = "Blade Barrier"
class_level_map = {
classes.Cleric: 6,
}
duration = SpellDuration(duration_per_level=units.CombatRound(1))
range = SpellRange(base_range=units.Feet(90))
| StarcoderdataPython |
119832 | # Copyright (c) 2013 python-gerrit Developers.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from gerrit import ssh
from gerrit import filters
class Query(ssh.Client):
def __init__(self, *args, **kwargs):
super(Query, self).__init__(*args, **kwargs)
self._filters = filters.Items()
def __iter__(self):
return iter(self._execute())
def _execute(self):
"""Executes the query and yields items."""
query = [
'gerrit', 'query',
'--current-patch-set',
str(self._filters),
'--format=JSON']
results = self.client.exec_command(' '.join(query))
stdin, stdout, stderr = results
for line in stdout:
normalized = json.loads(line)
# Do not return the last item
# since it is the summary of
# of the query
if "rowCount" in normalized:
raise StopIteration
yield normalized
def filter(self, *filters):
"""Adds generic filters to use in the query
For example:
- is:open
- is:
:param filters: List or tuple of projects
to add to the filters.
"""
self._filters.extend(filters)
return self
class Review(ssh.Client):
"""Single review instance.
This can be used to approve, block or modify
a review.
:params review: The commit sha or review,patch-set
to review.
"""
def __init__(self, review, *args, **kwargs):
super(Review, self).__init__(*args, **kwargs)
self._review = review
self._status = None
self._verified = None
self._code_review = None
def verify(self, value):
"""The verification score for this review."""
self._verified = value
def review(self, value):
"""The score for this review."""
self._code_review = value
def status(self, value):
"""Sets the status of this review
Available options are:
- restore
- abandon
- workinprogress
- readyforreview
"""
self._status = value
def commit(self, message=None):
"""Executes the command
:params message: The message
to use as a comment for this
action.
"""
flags = filters.Items()
if self._status:
flags.add_flags(self._status)
if self._code_review is not None:
flags.add_flags("code-review %s" % self._code_review)
if self._verified is not None:
flags.add_flags("verified %s" % self._verified)
if message:
flags.add_flags("message '%s'" % message)
query = ['gerrit', 'review', str(flags), str(self._review)]
results = self.client.exec_command(' '.join(query))
stdin, stdout, stderr = results
# NOTE(flaper87): Log error messages
error = []
for line in stderr:
error.append(line)
# True if success
return not error
| StarcoderdataPython |
1749941 | class Solution:
def minimumTotal(self, triangle: List[List[int]]) -> int:
minSum = triangle[-1]
for i in range(len(triangle) - 2, -1, -1):
for j in range(i + 1):
minSum[j] = min(minSum[j], minSum[j + 1]) + triangle[i][j]
return minSum[0]
| StarcoderdataPython |
3328075 | """
The predicting pipeline
"""
import sys
import logging
import pandas as pd
import click
from src.data.make_dataset import read_data
from src.features.make_features import full_transform
from src.models.fit_predict_model import predict_model
from src.models.model_dump import load_model
from src.entities.predict_pipeline_parameters import read_predict_pipeline_params, PredictingPipelineParams
logger = logging.getLogger(__name__)
handler = logging.StreamHandler(sys.stdout)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
def predict_pipeline(config_path: str):
"""
Read predict parameters from .yaml file
:param config_path:
:return:
"""
predict_pipeline_params = read_predict_pipeline_params(config_path)
return predict_pipeline_run(predict_pipeline_params)
def predict_pipeline_run(predict_pipeline_params: PredictingPipelineParams) -> None:
"""
The pipeline itself
:param predict_pipeline_params:
:return:
"""
logger.info(f"Start predict pipeline")
df = read_data(predict_pipeline_params.dataset_path)
df_transformed = full_transform(df)
logger.info(f"{df_transformed.shape[0]} entries are given and successfully transformed")
model = load_model(predict_pipeline_params.dump_model)
pred_labels, _ = predict_model(model, df_transformed)
logger.info("Predictions are made")
pd.Series(
pred_labels,
index=df_transformed.index,
name="prediction").to_csv(predict_pipeline_params.prediction_path)
logger.info("Results written to directory")
@click.command(name="predict_pipeline")
@click.argument("config_path")
def predict_pipeline_command(config_path: str):
"""
We transmit the filepath to the .yaml config
:param config_path:
:return:
"""
predict_pipeline(config_path)
if __name__ == "__main__":
predict_pipeline_command()
| StarcoderdataPython |
3394365 | <gh_stars>0
import abc
from collections import defaultdict
import datetime
import uuid
class BaseController(object):
__metaclass__ = abc.ABCMeta
def __init__(self):
"""Must set:
self.queue: list of app_models to sync
self.messages: list of controller messages
self.app_models: collectors of app_models
"""
self.creation_date = datetime.datetime.now()
@abc.abstractmethod
def setup_app_model(self, app_model, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def get_app_model(self, app_model):
raise NotImplementedError
@abc.abstractmethod
def get_status(self, app_model):
raise NotImplementedError
@abc.abstractmethod
def set_status(self, app_model, status):
raise NotImplementedError
@abc.abstractmethod
def set_resync(self, app_model, resync):
raise NotImplementedError
@abc.abstractmethod
def add_message(self, app_model, message):
raise NotImplementedError
@abc.abstractmethod
def set_start_date(self, app_model, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def set_completion_date(self, app_model, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def remove_from_queue(self, app_model):
raise NotImplementedError
@abc.abstractmethod
def remove_app_model(self, app_model):
raise NotImplementedError
def start_sync(self, app_model, **kwargs):
start_date = kwargs.get('start_date', None)
self.set_status(app_model, 'running')
self.add_message(app_model, 'Start operation')
self.set_start_date(app_model, start_date=start_date)
def complete_sync(self, app_model, status, **kwargs):
message = kwargs.get('message', None)
if message is None:
if status == 'success':
message = 'Completed successfully'
elif status == 'failed':
message = 'Operation failed'
self.set_status(app_model, status)
self.add_message(app_model, message)
self.set_completion_date(app_model)
self.remove_from_queue(app_model)
class DictController(BaseController):
def __init__(self):
super(DictController, self).__init__()
self.queue = []
self.messages = []
self.app_models = defaultdict(dict)
def setup_app_model(self, app_model, **kwargs):
if self.app_models[app_model]:
self.set_resync(app_model, True)
self.queue.append(app_model)
return
submit_code = kwargs.get('submit_code', '%s' % uuid.uuid4())
submission_date = kwargs.get('submission_date', datetime.datetime.now())
dependencies = kwargs.get('dependencies', [])
is_dependency = kwargs.get('is_dependency', False)
self.app_models[app_model] = {
'app_model': app_model,
'status': None,
'messages': [],
'submit_code': submit_code,
'dependencies': dependencies,
'completion_date': None,
'submission_date': submission_date,
'start_date': submission_date,
'resync': False,
'is_dependency': is_dependency,
'synced_elements': [],
}
self.queue.append(app_model)
def get_app_model(self, app_model):
return self.app_models.get(app_model)
def get_status(self, app_model):
return self.app_models.get(app_model, {}).get('status')
def set_status(self, app_model, status):
self.app_models[app_model]['status'] = status
def set_resync(self, app_model, resync):
self.app_models[app_model]['resync'] = resync
def add_message(self, app_model, message):
time_message = (datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), message)
if app_model == '':
self.messages.append(time_message)
else:
self.app_models[app_model]['messages'].append(time_message)
def set_synced_elements(self, app_model, synced_elements):
self.app_models[app_model]['synced_elements'] = synced_elements
def set_start_date(self, app_model, start_date=None):
if start_date is None:
start_date = datetime.datetime.now()
self.app_models[app_model]['start_date'] = start_date
def set_completion_date(self, app_model, completion_date=None):
if completion_date is None:
completion_date = datetime.datetime.now()
self.app_models[app_model]['completion_date'] = completion_date
def remove_from_queue(self, app_model):
"""This method is not semaphore-proof."""
queue = []
removed = False
for queued_app_model in self.queue:
if queued_app_model == app_model and not removed:
removed = True
else:
queue.append(queued_app_model)
self.queue = queue
def remove_app_model(self, app_model):
try:
del self.app_models[app_model]
except KeyError:
pass
def get_controller(controller_class):
if controller_class == 'DictController':
sync_controller = DictController()
else:
raise Exception("Invalid controller class: %s" % controller_class)
return sync_controller
| StarcoderdataPython |
1706938 | <gh_stars>0
import asyncio
import typing
from queue import Empty, Queue
from starlette.requests import Request
from tartiflette import Resolver, Subscription
from ._utils import Dog, PubSub
@Resolver("Query.hello")
async def hello(parent, args, context, info) -> str:
name = args.get("name", "stranger")
return "Hello " + name
@Resolver("Query.whoami")
async def resolve_whoami(parent, args, context, info) -> str:
request: Request = context["req"]
user = request.state.user
return "a mystery" if user is None else user
@Resolver("Query.foo")
async def resolve_foo(parent, args, context, info) -> str:
get_foo = context.get("get_foo", lambda: "default")
return get_foo()
@Subscription("Subscription.dogAdded")
async def on_dog_added(parent, args, ctx, info) -> typing.AsyncIterator[dict]:
pubsub: PubSub = ctx["pubsub"]
queue = Queue()
@pubsub.on("dog_added")
def on_dog(dog: Dog):
queue.put(dog)
while True:
try:
dog = queue.get_nowait()
except Empty:
await asyncio.sleep(0.01)
continue
else:
queue.task_done()
if dog is None:
break
yield {"dogAdded": dog._asdict()}
| StarcoderdataPython |
3375175 | <reponame>berland/resqpy
import pytest
import os
import numpy as np
import resqpy.model as rq
import resqpy.grid as grr
import resqpy.fault as rqf
import resqpy.derived_model as rqdm
import resqpy.olio.transmission as rqtr
def test_fault_connection_set(tmp_path):
gm = os.path.join(tmp_path, 'resqpy_test_fgcs.epc')
model = rq.Model(gm, new_epc = True, create_basics = True, create_hdf5_ext = True)
# unsplit grid
g0 = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
g0.write_hdf5()
g0.create_xml(title = 'G0 unsplit')
g0_fcs, g0_fa = rqtr.fault_connection_set(g0)
assert g0_fcs is None
assert g0_fa is None
# J face split with no juxtaposition
throw = 2.0
g1 = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
g1.grid_representation = 'IjkGrid'
pu_pillar_count = (g1.nj + 1) * (g1.ni + 1)
pu = g1.points_ref(masked = False).reshape(g1.nk + 1, pu_pillar_count, 3)
p = np.zeros((g1.nk + 1, pu_pillar_count + g1.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, g1.ni + 1 : 2 * (g1.ni + 1), :]
p[:, 2 * (g1.ni + 1):, 2] += throw
g1.points_cached = p
g1.has_split_coordinate_lines = True
g1.split_pillars_count = g1.ni + 1
g1.split_pillar_indices_cached = np.array([i for i in range(g1.ni + 1, 2 * (g1.ni + 1))], dtype = int)
g1.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
g1.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
g1.write_hdf5()
g1.create_xml(title = 'G1 J no juxtaposition')
# model.store_epc()
# model.h5_release()
g1_fcs, g1_fa = rqtr.fault_connection_set(g1)
assert g1_fcs is None
assert g1_fa is None
# I face split with no juxtaposition
throw = 2.0
g1 = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
g1.grid_representation = 'IjkGrid'
pu_pillar_count = (g1.nj + 1) * (g1.ni + 1)
pr = g1.points_ref(masked = False)
pr[:, :, -1, 2] += throw
pu = pr.reshape(g1.nk + 1, pu_pillar_count, 3)
p = np.zeros((g1.nk + 1, pu_pillar_count + g1.nj + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pr[:, :, 1, :].reshape(g1.nk + 1, g1.nj + 1, 3)
p[:, pu_pillar_count:, 2] += throw
g1.points_cached = p
g1.has_split_coordinate_lines = True
g1.split_pillars_count = g1.ni + 1
g1.split_pillar_indices_cached = np.array([i for i in range(1, (g1.nj + 1) * (g1.ni + 1), g1.nj + 1)], dtype = int)
g1.cols_for_split_pillars = np.array((1, 1, 3, 3), dtype = int)
g1.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
g1.write_hdf5()
g1.create_xml(title = 'G1 I no juxtaposition')
# model.store_epc()
g1_fcs, g1_fa = rqtr.fault_connection_set(g1)
assert g1_fcs is None
assert g1_fa is None
# J face split with full juxtaposition of kji0 (1, 0, *) with (0, 1, *)
# pattern 4, 4 (or 3, 3)
throw = 1.0
g2 = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
g2.grid_representation = 'IjkGrid'
pu_pillar_count = (g2.nj + 1) * (g2.ni + 1)
pu = g2.points_ref(masked = False).reshape(g2.nk + 1, pu_pillar_count, 3)
p = np.zeros((g2.nk + 1, pu_pillar_count + g2.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, g2.ni + 1 : 2 * (g2.ni + 1), :]
p[:, 2 * (g2.ni + 1):, 2] += throw
g2.points_cached = p
g2.has_split_coordinate_lines = True
g2.split_pillars_count = g2.ni + 1
g2.split_pillar_indices_cached = np.array([i for i in range(g2.ni + 1, 2 * (g2.ni + 1))], dtype = int)
g2.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
g2.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
g2.write_hdf5()
g2.create_xml(title = 'G2 J full juxtaposition of kji0 (1, 0, *) with (0, 1, *)')
# model.store_epc()
g2_fcs, g2_fa = rqtr.fault_connection_set(g2)
assert g2_fcs is not None
assert g2_fa is not None
# show_fa(g2, g2_fcs, g2_fa)
assert g2_fcs.count == 2
assert np.all(np.isclose(g2_fa, 1.0, atol = 0.01))
# I face split with full juxtaposition of kji0 (1, *, 0) with (0, *, 1)
# pattern 4, 4 (or 3, 3) diagram 1
throw = 1.0
g1 = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
g1.grid_representation = 'IjkGrid'
pu_pillar_count = (g1.nj + 1) * (g1.ni + 1)
pr = g1.points_ref(masked = False)
pr[:, :, -1, 2] += throw
pu = pr.reshape(g1.nk + 1, pu_pillar_count, 3)
p = np.zeros((g1.nk + 1, pu_pillar_count + g1.nj + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pr[:, :, 1, :].reshape(g1.nk + 1, g1.nj + 1, 3)
p[:, pu_pillar_count:, 2] += throw
g1.points_cached = p
g1.has_split_coordinate_lines = True
g1.split_pillars_count = g1.ni + 1
g1.split_pillar_indices_cached = np.array([i for i in range(1, (g1.nj + 1) * (g1.ni + 1), g1.nj + 1)], dtype = int)
g1.cols_for_split_pillars = np.array((1, 1, 3, 3), dtype = int)
g1.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
g1.write_hdf5()
g1.create_xml(title = 'G2 I full juxtaposition of kji0 (1, *, 0) with (0, *, 1)')
# model.store_epc()
g1_fcs, g1_fa = rqtr.fault_connection_set(g1)
assert g1_fcs is not None
assert g1_fa is not None
# show_fa(g1, g1_fcs, g1_fa)
assert g1_fcs.count == 2
assert np.all(np.isclose(g1_fa, 1.0, atol = 0.01))
# J face split with half juxtaposition of kji0 (*, 0, *) with (*, 1, *); and (1, 0, *) with (0, 1, *)
# pattern 5, 5 (or 2, 2) diagram 2
throw = 0.5
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1):, 2] += throw
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G3 J half juxtaposition of kji0 (*, 0, *) with (*, 1, *); and (1, 0, *) with (0, 1, *)')
# model.store_epc()
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 6
assert np.all(np.isclose(grid_fa, 0.5, atol = 0.01))
# I face split with half juxtaposition of kji0 (*, *, 0) with (*, *, 1); and (1, *, 0) with (0, *, 1)
# pattern 5, 5 (or 2, 2) diagram 2
throw = 0.5
g1 = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
g1.grid_representation = 'IjkGrid'
pu_pillar_count = (g1.nj + 1) * (g1.ni + 1)
pr = g1.points_ref(masked = False)
pr[:, :, -1, 2] += throw
pu = pr.reshape(g1.nk + 1, pu_pillar_count, 3)
p = np.zeros((g1.nk + 1, pu_pillar_count + g1.nj + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pr[:, :, 1, :].reshape(g1.nk + 1, g1.nj + 1, 3)
p[:, pu_pillar_count:, 2] += throw
g1.points_cached = p
g1.has_split_coordinate_lines = True
g1.split_pillars_count = g1.ni + 1
g1.split_pillar_indices_cached = np.array([i for i in range(1, (g1.nj + 1) * (g1.ni + 1), g1.nj + 1)], dtype = int)
g1.cols_for_split_pillars = np.array((1, 1, 3, 3), dtype = int)
g1.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
g1.write_hdf5()
g1.create_xml(title = 'G3 I half juxtaposition of kji0 (*, *, 0) with (*, *, 1); and (1, *, 0) with (0, *, 1)')
# model.store_epc()
g1_fcs, g1_fa = rqtr.fault_connection_set(g1)
assert g1_fcs is not None
assert g1_fa is not None
# show_fa(g1, g1_fcs, g1_fa)
assert g1_fcs.count == 6
assert np.all(np.isclose(g1_fa, 0.5, atol = 0.01))
# J face split with 0.25 juxtaposition of kji0 (*, 0, *) with (*, 1, *); and 0.75 of (1, 0, *) with (0, 1, *)
# pattern 5, 5 (or 2, 2) diagram 2
throw = 0.75
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1):, 2] += throw
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G4 J 0.25 juxtaposition of kji0 (*, 0, *) with (*, 1, *); and 0.75 of (1, 0, *) with (0, 1, *)')
# model.store_epc()
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 6
# following assertion assumes lists are in certain order, which is not a functional requirement
assert np.all(np.isclose(grid_fa,
np.array([[0.25, 0.25],
[0.75, 0.75],
[0.25, 0.25],
[0.25, 0.25],
[0.75, 0.75],
[0.25, 0.25]]), atol = 0.01))
# I face split with 0.25 juxtaposition of kji0 (*, *, 0) with (*, *, 1); and 0.75 of (1, *, 0) with (0, *, 1)
# pattern 5, 5 (or 2, 2) diagram 2
throw = 0.75
g1 = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
g1.grid_representation = 'IjkGrid'
pu_pillar_count = (g1.nj + 1) * (g1.ni + 1)
pr = g1.points_ref(masked = False)
pr[:, :, -1, 2] += throw
pu = pr.reshape(g1.nk + 1, pu_pillar_count, 3)
p = np.zeros((g1.nk + 1, pu_pillar_count + g1.nj + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pr[:, :, 1, :].reshape(g1.nk + 1, g1.nj + 1, 3)
p[:, pu_pillar_count:, 2] += throw
g1.points_cached = p
g1.has_split_coordinate_lines = True
g1.split_pillars_count = g1.ni + 1
g1.split_pillar_indices_cached = np.array([i for i in range(1, (g1.nj + 1) * (g1.ni + 1), g1.nj + 1)], dtype = int)
g1.cols_for_split_pillars = np.array((1, 1, 3, 3), dtype = int)
g1.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
g1.write_hdf5()
g1.create_xml(title = 'G4 I 0.25 juxtaposition of kji0 (*, *, 0) with (*, *, 1); and 0.75 of (1, *, 0) with (0, *, 1)')
g1_fcs, g1_fa = rqtr.fault_connection_set(g1)
assert g1_fcs is not None
assert g1_fa is not None
# show_fa(g1, g1_fcs, g1_fa)
assert g1_fcs.count == 6
# following assertion assumes lists are in certain order, which is not a functional requirement
assert np.all(np.isclose(g1_fa,
np.array([[0.25, 0.25],
[0.75, 0.75],
[0.25, 0.25],
[0.25, 0.25],
[0.75, 0.75],
[0.25, 0.25]]), atol = 0.01))
# J face split with full full (1, 0, 0) with (0, 1, 0); and 0.5 of (*, 0, 1) with (*, 1, 1) and layer crossover
# diagrams 4 & 2
throw = 1.0
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1) : -1, 2] += throw
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G5 J full (1, 0, 0) with (0, 1, 0); and 0.5 of (*, 0, 1) with (*, 1, 1) and layer crossover')
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 4
# following assertion assumes lists are in certain order, which is not a functional requirement
assert np.all(np.isclose(grid_fa,
np.array([[1. , 1. ],
[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5]]), atol = 0.01))
# I face split with full (1, 0, 0) with (0, 0, 1); and 0.5 of (*, 1, 0) with (*, 1, 1) and layer crossover
throw = 1.0
g1 = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
g1.grid_representation = 'IjkGrid'
pu_pillar_count = (g1.nj + 1) * (g1.ni + 1)
pr = g1.points_ref(masked = False)
pr[:, :, -1, 2] += throw
pu = pr.reshape(g1.nk + 1, pu_pillar_count, 3)
p = np.zeros((g1.nk + 1, pu_pillar_count + g1.nj + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pr[:, :, 1, :].reshape(g1.nk + 1, g1.nj + 1, 3)
p[:, pu_pillar_count : -1, 2] += throw
g1.points_cached = p
g1.has_split_coordinate_lines = True
g1.split_pillars_count = g1.ni + 1
g1.split_pillar_indices_cached = np.array([i for i in range(1, (g1.nj + 1) * (g1.ni + 1), g1.nj + 1)], dtype = int)
g1.cols_for_split_pillars = np.array((1, 1, 3, 3), dtype = int)
g1.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
g1.write_hdf5()
g1.create_xml(title = 'G5 I full (1, 0, 0) with (0, 0, 1); and 0.5 of (*, 1, 0) with (*, 1, 1) and layer crossover')
g1_fcs, g1_fa = rqtr.fault_connection_set(g1)
assert g1_fcs is not None
assert g1_fa is not None
# show_fa(g1, g1_fcs, g1_fa)
assert g1_fcs.count == 4
# following assertion assumes lists are in certain order, which is not a functional requirement
assert np.all(np.isclose(g1_fa,
np.array([[1. , 1. ],
[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5]]), atol = 0.01))
grid_fa
# J face split diagram 4
throw = 0.5
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1) : -1, 2] += throw
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G6 J diagram 4')
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 6
# following assertion assumes lists are in certain order, which is not a functional requirement
assert np.all(np.isclose(grid_fa,
np.array([[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5],
[0.75, 0.75],
[0.25, 0.25],
[0.75, 0.75]]), atol = 0.01))
# J face split
# diagram 5
throw = 0.5
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1):, 2] += throw
p[1:, 2 * (grid.ni + 1), 2] -= 0.9
p[1:, 3 * (grid.ni + 1), 2] -= 1.0
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G7 diagram 5')
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 7
assert np.all(np.isclose(grid_fa,
np.array([[0.375, 0.75],
[0.125, 0.125],
[0.125, 0.25],
[0.75, 0.75],
[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5]]), atol = 0.01))
# bl.set_log_level('info')
# J face split
# diagram 5m
throw = 0.5
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1):, 2] += throw
p[1:, 3 * (grid.ni + 1) - 1, 2] -= 0.9
p[1:, 4 * (grid.ni + 1) - 1, 2] -= 1.0
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G7m diagram 5m')
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 7
assert np.all(np.isclose(grid_fa,
np.array([[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5],
[0.375, 0.75],
[0.125, 0.125],
[0.125, 0.25],
[0.75, 0.75]]), atol = 0.01))
# bl.set_log_level('info')
# J face split
# deeper layer half thickness of top layer
throw = 0.5
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1):, 2] += throw
p[-1, :, 2] -= 0.5
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G8 lower layer half thickness; throw 0.5')
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 4
assert np.all(np.isclose(grid_fa,
np.array([[0.5, 0.5],
[1.0, 0.5],
[0.5, 0.5],
[1.0, 0.5]]), atol = 0.01))
# J face split
# deeper layer half thickness of top layer
throw = 0.75
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1):, 2] += throw
p[-1, :, 2] -= 0.5
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G9 lower layer half thickness; throw 0.75')
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 4
assert np.all(np.isclose(grid_fa,
np.array([[0.25, 0.25],
[1.0, 0.5],
[0.25, 0.25],
[1.0, 0.5]]), atol = 0.01))
# J face split
# deeper layer half thickness of top layer
throw = 0.25
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1):, 2] += throw
p[-1, :, 2] -= 0.5
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G10 lower layer half thickness; throw 0.25')
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 6
assert np.all(np.isclose(grid_fa,
np.array([[0.75, 0.75],
[0.5, 0.25],
[0.5, 0.5],
[0.75, 0.75],
[0.5, 0.25],
[0.5, 0.5]]), atol = 0.01))
# J face split
# deeper layer half thickness of top layer
throw = 0.75
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1):, 2] += throw
p[:, 2 * (grid.ni + 1), 2] -= 1.0
p[:, 3 * (grid.ni + 1), 2] -= 1.0
p[-1, :, 2] -= 0.5
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G11 lower layer half thickness; throw 0.75 except for -0.25 on one wing')
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 6
assert np.all(np.isclose(grid_fa,
np.array([[11.0/16, 11.0/16], # 0.6875, 0.6875
[1.0/32, 1.0/16], # 0.03125, 0.0625
[0.5, 0.25],
[0.4375, 0.4375],
[0.25, 0.25],
[1.0, 0.5]]), atol = 0.01))
# J face split
# deeper layer half thickness of top layer
throw = 0.75
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1):, 2] += throw
p[:, 3 * (grid.ni + 1) - 1, 2] -= 1.0
p[:, 4 * (grid.ni + 1) - 1, 2] -= 1.0
p[-1, :, 2] -= 0.5
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G12 lower layer half thickness; throw 0.75 except for -0.25 on one wing')
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 6
assert np.all(np.isclose(grid_fa,
np.array([[0.25, 0.25],
[1.0, 0.5],
[11.0/16, 11.0/16], # 0.6875, 0.6875
[1.0/32, 1.0/16], # 0.03125, 0.0625
[0.5, 0.25],
[0.4375, 0.4375]]), atol = 0.01))
# J face split
# deeper layer half thickness of top layer
throw = 0.75
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1):, 2] += throw
p[:, 3 * (grid.ni + 1) - 1, 2] -= 1.5
p[:, 4 * (grid.ni + 1) - 1, 2] -= 1.5
p[-1, :, 2] -= 0.5
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G13 lower layer half thickness; throw 0.75 except for -0.25 on one wing')
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 6
assert np.all(np.isclose(grid_fa,
np.array([[0.25, 0.25],
[1.0, 0.5],
[5.0/8, 5.0/8], # 0.625, 0.625
[1.0/6, 1.0/3],
[1.0/3, 1.0/6],
[1.0/3, 1.0/3]]), atol = 0.01))
# J face split
# deeper layer half thickness of top layer
throw = 0.75
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1):, 2] += throw
p[0, 2 * (grid.ni + 1), 2] += 0.5
p[0, 3 * (grid.ni + 1), 2] += 0.5
p[1:, 2 * (grid.ni + 1), 2] -= 0.5
p[1:, 3 * (grid.ni + 1), 2] -= 0.5
p[-1, :, 2] -= 0.5
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G14 lower layer half thickness; throw 0.75 except for top layer pinching on one wing')
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 5
assert np.all(np.isclose(grid_fa,
np.array([[1.0/16, 1.0/8], # 0.0625, 0.125
[0.75, 0.75],
[0.125, 0.125],
[0.25, 0.25],
[1.0, 0.5]]), atol = 0.01))
# J face split
# deeper layer half thickness of top layer
throw = -0.25
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1):, 2] += throw
p[0, 2 * (grid.ni + 1), 2] += 0.5
p[0, 3 * (grid.ni + 1), 2] += 0.5
p[-1, :, 2] -= 0.5
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G15 lower layer half thickness; throw -0.25 except for top layer pinching on one wing')
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 6
assert np.all(np.isclose(grid_fa,
np.array([[11.0/16, 11.0/12],
[0.25, 0.5],
[0.5, 0.5],
[0.75, 0.75],
[0.25, 0.5],
[0.5, 0.5]]), atol = 0.01))
# J face split
# deeper layer half thickness of top layer
throw = -0.75
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1):, 2] += throw
p[:, 2 * (grid.ni + 1), 2] -= 1.0
p[:, 3 * (grid.ni + 1), 2] -= 1.0
p[-1, :, 2] -= 0.5
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G16 lower layer half thickness; throw -0.75 except for -1.75 on one wing')
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 4
assert np.all(np.isclose(grid_fa,
np.array([[1.0/32, 1.0/32],
[0.25, 0.5],
[0.25, 0.25],
[0.5, 1.0]]), atol = 0.01))
# J face split
# deeper layer half thickness of top layer
throw = +0.75
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1):, 2] += throw
p[:, 2 * (grid.ni + 1), 2] += 1.0
p[:, 3 * (grid.ni + 1), 2] += 1.0
p[-1, :, 2] -= 0.5
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G17 lower layer half thickness; throw +0.75 except for +1.75 on one wing')
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 4
assert np.all(np.isclose(grid_fa,
np.array([[1.0/32, 1.0/32],
[0.5, 0.25],
[0.25, 0.25],
[1.0, 0.5]]), atol = 0.01))
# J face split
# deeper layer half thickness of top layer
throw = +0.75
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1):, 2] += throw
p[:, 3 * (grid.ni + 1) - 1, 2] += 1.0
p[:, 4 * (grid.ni + 1) - 1, 2] += 1.0
p[-1, :, 2] -= 0.5
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G18 lower layer half thickness; throw +0.75 except for +1.75 on one wing')
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 4
assert np.all(np.isclose(grid_fa,
np.array([[0.25, 0.25],
[1.0, 0.5],
[1.0/32, 1.0/32],
[0.5, 0.25]]), atol = 0.01))
# J face split
# deeper layer half thickness of top layer
throw = +0.25
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1):, 2] += throw
p[0, 2 * (grid.ni + 1), 2] -= 0.5
p[0, 3 * (grid.ni + 1), 2] -= 0.5
p[-1, :, 2] -= 0.5
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G19 lower layer half thickness; throw +0.25 except for top edge -0.25 on one wing')
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 6
assert np.all(np.isclose(grid_fa,
np.array([[15.0/16, 0.75],
[0.5, 0.2],
[0.5, 0.5],
[0.75, 0.75],
[0.5, 0.25],
[0.5, 0.5]]), atol = 0.01))
# J face split
# deeper layer half thickness of top layer
throw = +0.25
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1):, 2] += throw
p[0, 3 * (grid.ni + 1) - 1, 2] -= 0.5
p[0, 4 * (grid.ni + 1) - 1, 2] -= 0.5
p[-1, :, 2] -= 0.5
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G20 lower layer half thickness; throw +0.25 except for top edge -0.25 on one wing')
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 6
assert np.all(np.isclose(grid_fa,
np.array([[0.75, 0.75],
[0.5, 0.25],
[0.5, 0.5],
[15.0/16, 0.75],
[0.5, 0.2],
[0.5, 0.5]]), atol = 0.01))
# bl.set_log_level('info')
# J face split
# deeper layer half thickness of top layer
throw = +0.25
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1):, 2] += throw
p[1, 2 * (grid.ni + 1), 2] -= 0.5
p[1, 3 * (grid.ni + 1), 2] -= 0.5
p[-1, :, 2] -= 0.5
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G21 lower layer half thickness; throw +0.25 except for mid edge -0.25 on one wing')
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 7
assert np.all(np.isclose(grid_fa,
np.array([[11.0/16, 11.0/12],
[1.0/16, 1.0/12],
[1.0/8, 1.0/12],
[7.0/8, 7.0/12],
[0.75, 0.75],
[0.5, 0.25],
[0.5, 0.5]]), atol = 0.01))
# bl.set_log_level('info')
# bl.set_log_level('debug')
# local_log_out = bl.log_fresh()
# display(local_log_out)
# J face split
# deeper layer half thickness of top layer
throw = +0.5
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1):, 2] += throw
p[:2, 2 * (grid.ni + 1), 2] -= 0.5
p[:2, 3 * (grid.ni + 1), 2] -= 0.5
p[-1, :, 2] -= 0.5
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G22 lower layer half thickness; throw +0.5 except for top, mid edge on one wing')
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 5
assert np.all(np.isclose(grid_fa,
np.array([[0.75, 0.75],
[0.5, 0.25],
[0.5, 1.0/3],
[0.5, 0.5],
[1.0, 0.5]]), atol = 0.01))
# bl.set_log_level('debug')
# local_log_out = bl.log_fresh()
# display(local_log_out)
# J face split
# deeper layer half thickness of top layer
throw = +0.5
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1):, 2] += throw
p[:2, 3 * (grid.ni + 1) - 1, 2] -= 0.5
p[:2, 4 * (grid.ni + 1) - 1, 2] -= 0.5
p[-1, :, 2] -= 0.5
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G23 lower layer half thickness; throw +0.5 except for top, mid edge on one wing')
model.store_epc()
model.h5_release()
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 5
assert np.all(np.isclose(grid_fa,
np.array([[0.5, 0.5],
[1.0, 0.5],
[0.75, 0.75],
[0.5, 0.25],
[0.5, 1.0/3]]), atol = 0.01))
def test_add_faults(tmp_path):
def write_poly(filename, a, mode = 'w'):
nines = 999.0
with open(filename, mode = mode) as fp:
for row in range(len(a)):
fp.write(f'{a[row, 0]:8.3f} {a[row, 1]:8.3f} {a[row, 2]:8.3f}\n')
fp.write(f'{nines:8.3f} {nines:8.3f} {nines:8.3f}\n')
epc = os.path.join(tmp_path, 'tic_tac_toe.epc')
model = rq.new_model(epc)
grid = grr.RegularGrid(model, extent_kji = (1, 3, 3), set_points_cached = True)
grid.write_hdf5()
grid.create_xml(write_geometry = True)
model.store_epc()
del model
# single straight fault
a = np.array([[-0.2, 2.0, -0.1],
[ 3.2, 2.0, -0.1]])
f = os.path.join(tmp_path, 'ttt_f1.dat')
write_poly(f, a)
g = rqdm.add_faults(epc, source_grid = None, lines_file_list = [f], inherit_properties = False,
new_grid_title = 'ttt_f1 straight')
# single zig-zag fault
a = np.array([[-0.2, 1.0, -0.1],
[ 1.0, 1.0, -0.1],
[ 1.0, 2.0, -0.1],
[ 3.2, 2.0, -0.1]])
f = os.path.join(tmp_path, 'ttt_f2.dat')
write_poly(f, a)
g = rqdm.add_faults(epc, source_grid = None, lines_file_list = [f], inherit_properties = True,
new_grid_title = 'ttt_f2 zig_zag')
# single zig-zag-zig fault
a = np.array([[-0.2, 1.0, -0.1],
[ 1.0, 1.0, -0.1],
[ 1.0, 2.0, -0.1],
[ 2.0, 2.0, -0.1],
[ 2.0, 1.0, -0.1],
[ 3.2, 1.0, -0.1]])
f = os.path.join(tmp_path, 'ttt_f3.dat')
write_poly(f, a)
g = rqdm.add_faults(epc, source_grid = None, lines_file_list = [f], inherit_properties = False,
new_grid_title = 'ttt_f3 zig_zag_zig')
# horst block
a = np.array([[-0.2, 1.0, -0.1],
[ 3.2, 1.0, -0.1]])
b = np.array([[ 3.2, 2.0, -0.1],
[-0.2, 2.0, -0.1]])
fa = os.path.join(tmp_path, 'ttt_f4a.dat')
fb = os.path.join(tmp_path, 'ttt_f4b.dat')
write_poly(fa, a)
write_poly(fb, b)
g = rqdm.add_faults(epc, source_grid = None, lines_file_list = [fa, fb], inherit_properties = True,
new_grid_title = 'ttt_f4 horst')
# asymmetrical horst block
lr_throw_dict = {
'ttt_f4a': (0.0, -0.3),
'ttt_f4b': (0.0, -0.6)}
g = rqdm.add_faults(epc, source_grid = None, lines_file_list = [fa, fb], left_right_throw_dict = lr_throw_dict,
inherit_properties = True, new_grid_title = 'ttt_f5 horst')
assert g is not None
# scaled version of asymmetrical horst block
model = rq.Model(epc)
grid = model.grid(title = 'ttt_f5 horst')
assert grid is not None
gcs_roots = model.roots(obj_type = 'GridConnectionSetRepresentation', related_uuid = grid.uuid)
assert gcs_roots is not None
scaling_dict = {'ttt_f4a': 3.0, 'ttt_f4b': 1.7}
for i, gcs_root in enumerate(gcs_roots):
gcs = rqf.GridConnectionSet(model, connection_set_root = gcs_root)
rqdm.fault_throw_scaling(epc, source_grid = grid, scaling_factor = None,
connection_set = gcs, scaling_dict = scaling_dict,
ref_k0 = 0, ref_k_faces = 'top',
cell_range = 0, offset_decay = 0.5,
store_displacement = False,
inherit_properties = True, inherit_realization = None, inherit_all_realizations = False,
new_grid_title = f'ttt_f6 scaled {i+1}', new_epc_file = None)
model = rq.Model(epc)
grid = model.grid(title = f'ttt_f6 scaled {i+1}')
assert grid is not None
# two intersecting straight faults
a = np.array([[-0.2, 2.0, -0.1],
[ 3.2, 2.0, -0.1]])
b = np.array([[1.0, -0.2, -0.1],
[1.0, 3.2, -0.1]])
f = os.path.join(tmp_path, 'ttt_f7.dat')
write_poly(f, a)
write_poly(f, b, mode = 'a')
g = rqdm.add_faults(epc, source_grid = None, lines_file_list = [f], inherit_properties = True,
new_grid_title = 'ttt_f7')
# re-open and check a few things
model = rq.Model(epc)
assert len(model.titles(obj_type = 'IjkGridRepresentation')) == 8
g1 = model.grid(title = 'ttt_f7')
assert g1.split_pillars_count == 5
cpm = g1.create_column_pillar_mapping()
assert cpm.shape == (3, 3, 2, 2)
extras = (cpm >= 16)
assert np.count_nonzero(extras) == 7
assert np.all(np.sort(np.unique(cpm)) == np.arange(21))
| StarcoderdataPython |
3288075 | from django.urls import path
from . import views
urlpatterns = [
path('movies/', views.MoviesListApi.as_view()),
path('movies/<uuid:pk>/', views.MoviesDetailApi.as_view())
]
| StarcoderdataPython |
3236440 | <filename>gumiyabot/bancho.py
# -*- coding: utf-8 -*-
"""
<NAME> (osu!) irc3 plugin.
"""
import asyncio
import irc3
# Bancho does not comply with the IRC spec (thanks peppy) so we need to account
# for that or else the irc3 module will not read any data
class BanchoConnection(irc3.IrcConnection):
"""asyncio protocol to handle Bancho connections"""
def data_received(self, data):
"""Handle data received from Bancho.
Bancho does not send trailing carriage returns at the end of IRC
commands (i.e. it ends a command with \n instead of \r\n).
"""
if not data.endswith(b'\r\n'):
data = data.rstrip(b'\n') + b'\r\n'
return super(BanchoConnection, self).data_received(data)
@irc3.plugin
class BaseBanchoPlugin:
def __init__(self, bot):
self.bot = bot
self.bancho_queue = self.bot.config.get('bancho_queue')
asyncio.ensure_future(self.get_bancho_msg())
@irc3.event(irc3.rfc.CONNECTED)
def connected(self, **kw):
self.bot.log.info('[bancho] Connected to bancho as {}'.format(self.bot.nick))
@asyncio.coroutine
def get_bancho_msg(self):
while True:
(target, msg) = yield from self.bancho_queue.get()
self.bot.privmsg(target, msg)
| StarcoderdataPython |
3352881 | from numpy import sqrt
from pandas import DataFrame
from sklearn.datasets import make_friedman1
from sklearn.feature_selection import SelectFromModel, RFE
from sklearn.linear_model import LinearRegression, Lasso, Ridge
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.tree import DecisionTreeRegressor
def LinearRegression_model(xtrain , xtest , ytrain , ytest):
model = LinearRegression().fit(xtrain , ytrain)
log_detial = {'Model' : '' , 'Train-S' : 0 , 'Test-S' : 0 , 'R2' : 0 , 'RMSE' : 0 , 'AE' : 0}
log_detial['Model'] = 'LinearRegression'
log_detial['Train-S'] = model.score(xtrain,ytrain)
log_detial['Test-S'] = model.score(xtest,ytest)
log_detial['R2'] = r2_score(ytest,model.predict(xtest))
log_detial['RMSE'] = sqrt(mean_squared_error(ytest,model.predict(xtest)))
log_detial['AE'] = mean_absolute_error(ytest,model.predict(xtest))
return log_detial
def Lasso_model(xtrain, xtest, ytrain, ytest):
model = Lasso().fit(xtrain, ytrain)
log_detial = {'Model': '', 'Train-S': 0, 'Test-S': 0, 'R2': 0, 'RMSE': 0, 'AE': 0}
log_detial['Model'] = 'LassoRegression'
log_detial['Train-S'] = model.score(xtrain, ytrain)
log_detial['Test-S'] = model.score(xtest, ytest)
log_detial['R2'] = r2_score(ytest, model.predict(xtest))
log_detial['RMSE'] = sqrt(mean_squared_error(ytest, model.predict(xtest)))
log_detial['AE'] = mean_absolute_error(ytest, model.predict(xtest))
return log_detial
def Rigde_model(xtrain, xtest, ytrain, ytest):
model = Ridge().fit(xtrain , ytrain)
log_detial = {'Model': '', 'Train-S': 0, 'Test-S': 0, 'R2': 0, 'RMSE': 0, 'AE': 0}
log_detial['Model'] = 'RigdeRegression'
log_detial['Train-S'] = model.score(xtrain, ytrain)
log_detial['Test-S'] = model.score(xtest, ytest)
log_detial['R2'] = r2_score(ytest, model.predict(xtest))
log_detial['RMSE'] = sqrt(mean_squared_error(ytest, model.predict(xtest)))
log_detial['AE'] = mean_absolute_error(ytest, model.predict(xtest))
return log_detial
def DecisionTreeRegressor_model(xtrain, xtest, ytrain, ytest):
model = DecisionTreeRegressor().fit(xtrain , ytrain)
log_detial = {'Model': '', 'Train-S': 0, 'Test-S': 0, 'R2': 0, 'RMSE': 0, 'AE': 0}
log_detial['Model'] = 'DecisionTreeRegressor'
log_detial['Train-S'] = model.score(xtrain, ytrain)
log_detial['Test-S'] = model.score(xtest, ytest)
log_detial['R2'] = r2_score(ytest, model.predict(xtest))
log_detial['RMSE'] = sqrt(mean_squared_error(ytest, model.predict(xtest)))
log_detial['AE'] = mean_absolute_error(ytest, model.predict(xtest))
return log_detial
base_model = [
LinearRegression_model,
Lasso_model,
Rigde_model,
DecisionTreeRegressor_model
]
def Feature_Selection_Select(k , function , model , xtrain, xtest, ytrain, ytest ):
clf = Pipeline([
('feature_selection', SelectFromModel(k)),
('regression', model)
])
clf.fit(xtrain, ytrain)
log_detial = {'Model': '', 'Select-Method' : '' , 'Select-Function' : '' , 'Feature-Count': 0, 'Train-S': 0, 'Test-S': 0, 'R2': 0, 'RMSE': 0, 'AE': 0}
log_detial['Model'] = str(model.__class__).split('.')[-1].replace("'>" , '')
log_detial['Select-Method'] = "SelectFromModel"
log_detial['Select-Function'] = function
log_detial['Feature-Count'] = clf.named_steps['feature_selection'].get_support().sum()
log_detial['Train-S'] = clf.score(xtrain, ytrain)
log_detial['Test-S'] = clf.score(xtest, ytest)
log_detial['R2'] = r2_score(ytest, clf.predict(xtest))
log_detial['RMSE'] = sqrt(mean_squared_error(ytest, clf.predict(xtest)))
log_detial['AE'] = mean_absolute_error(ytest, clf.predict(xtest))
return (log_detial)
def Feature_Selection_Recursive(k , function , model , xtrain, xtest, ytrain, ytest ):
selector = RFE(function, k)
selector = selector.fit(xtrain, ytrain)
xtrain = selector.transform(xtrain)
xtest = selector.transform(xtest)
clf = model
clf.fit(xtrain, ytrain)
log_detial = {'Model': '', 'Select-Method' : '' , 'Select-Function' : '' , 'Feature-Count': 0, 'Train-S': 0, 'Test-S': 0, 'R2': 0, 'RMSE': 0, 'AE': 0}
log_detial['Model'] = str(clf.__class__).split('.')[-1].replace("'>" , '')
log_detial['Select-Method'] = 'Recursive'
log_detial['Select-Function'] = str(function.__class__).split('.')[-1].replace("'>" , '')
log_detial['Feature-Count'] = k
log_detial['Train-S'] = clf.score(xtrain, ytrain)
log_detial['Test-S'] = clf.score(xtest, ytest)
log_detial['R2'] = r2_score(ytest, clf.predict(xtest))
log_detial['RMSE'] = sqrt(mean_squared_error(ytest, clf.predict(xtest)))
log_detial['AE'] = mean_absolute_error(ytest, clf.predict(xtest))
return (log_detial)
def Base_Model_Profile(xtrain, xtest, ytrain, ytest):
log = list()
for i in base_model:
log.append(i(xtrain, xtest, ytrain, ytest))
return DataFrame(log)
def Feature_Selection_Profile(xtrain, xtest, ytrain, ytest):
log = list()
for i in [Ridge(), Lasso(), DecisionTreeRegressor()]:
for j in [LinearRegression() , DecisionTreeRegressor()]:
log.append(
Feature_Selection_Select(i,str(i.__class__).split('.')[-1].replace("'>", ''),j,xtrain, xtest, ytrain, ytest)
)
for i in range(1 , xtrain.shape[1]):
for j in [LinearRegression() , DecisionTreeRegressor()]:
for k in [LinearRegression() , DecisionTreeRegressor()]:
log.append(
Feature_Selection_Recursive(i, j , k , xtrain, xtest, ytrain, ytest)
)
return DataFrame(log)
def test_data():
X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
xtrain, xtest, ytrain, ytest = train_test_split(X, y, test_size=.30, random_state=0)
return xtrain, xtest, ytrain, ytest
def test_base_profile():
xtrain, xtest, ytrain, ytest = test_data()
print(Base_Model_Profile(xtrain, xtest, ytrain, ytest))
def test_Feature_Selection_Profile():
xtrain, xtest, ytrain, ytest = test_data()
print(Feature_Selection_Profile(xtrain, xtest, ytrain, ytest))
# test_base_profile()
# test_Feature_Selection_Profile() | StarcoderdataPython |
1788300 | <gh_stars>1-10
# -*- encoding:utf-8 -*-
"""
量化波动程度模块
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..TLineBu.ABuTLine import AbuTLine
from ..CoreBu.ABuPdHelper import pd_rolling_std, pd_ewm_mean, pd_ewm_std, pd_resample
from ..UtilBu import ABuStatsUtil
from ..UtilBu.ABuDTUtil import plt_show
def show_wave_return(kl_pd):
"""
可视化收益的移动平均std和加权移动平均std
注意会修改kl_pd,只做测试使用,内部未做copy处理,
如不能改动,外部自copy操作,再传递进来
:param kl_pd: 金融时间序列,pd.DataFrame对象
"""
pre_close = kl_pd['close'].shift(1)
# noinspection PyTypeChecker
kl_pd['return'] = np.where(pre_close == 0, 0, np.log(kl_pd['close'] / pre_close))
kl_pd['mov_std'] = pd_rolling_std(kl_pd['return'], window=20, center=False) * np.sqrt(20)
kl_pd['std_ewm'] = pd_ewm_std(kl_pd['return'], span=20, min_periods=20, adjust=True) * np.sqrt(20)
kl_pd[['close', 'mov_std', 'std_ewm', 'return']].plot(subplots=True, figsize=(16, 12), grid=True)
plt.show()
def calc_wave_std(kl_pd, xd=21, ewm=True, show=True):
"""
计算收益的移动平均std或者加权移动平均std技术线,使用
AbuTLine封装技术线实体,不会修改kl_pd,返回AbuTLine对象
:param kl_pd: 金融时间序列,pd.DataFrame对象
:param xd: 计算移动平均std或加权移动平均std使用的窗口参数,默认21
:param ewm: 是否使用加权移动平均std计算
:param show: 是否可视化,可视化使用AbuTLine.show接口
:return: 返回AbuTLine对象
"""
pre_close = kl_pd['close'].shift(1)
# noinspection PyTypeChecker
change = np.where(pre_close == 0, 0, np.log(kl_pd['close'] / pre_close))
if ewm:
roll_std = pd_ewm_std(change, span=xd, min_periods=1, adjust=True) * np.sqrt(xd)
else:
roll_std = pd_rolling_std(change, window=xd, min_periods=1, center=False) * np.sqrt(xd)
# min_periods=1还是会有两个nan,填了
roll_std = pd.Series(roll_std).fillna(method='bfill')
# 主要目的就是通过roll_std构造AbuTLine对象line
line = AbuTLine(roll_std, 'wave std')
if show:
line.show()
return line
def calc_wave_abs(kl_pd, xd=21, show=True):
"""
计算金融时间序列kl_pd在的绝对波动,通过参数xd对波动进行重采样
在默认xd=21情况下,变成了月震荡幅度,使用ABuStatsUtil.demean对
月震荡幅度进行去均值操作后得到技术线demean_wave,AbuTLine包裹
技术线返回
:param kl_pd: 金融时间序列,pd.DataFrame对象
:param xd: 对波动进行重采样的周期,单位天,int
:param show: 是否可视化
:return: 返回AbuTLine对象
"""
# 不考虑正负,只考虑波动,np.abs(kl_pd['p_change'])
abs_pct_change = np.abs(kl_pd['p_change'])
xd_resample = '%dD' % xd
# 通过pd_resample重采样,使用how=sum, 即默认xd=21情况下,变成了月震荡幅度
change_ratio_sum = pd_resample(abs_pct_change, xd_resample, how='sum')
"""
eg: change_ratio_sum
2014-07-24 37.13
2014-08-14 39.33
2014-09-04 25.16
2014-09-25 27.53
2014-10-16 27.78
...
2016-04-14 25.17
2016-05-05 42.07
2016-05-26 18.93
2016-06-16 33.25
2016-07-07 10.79
"""
# 使用ABuStatsUtil.demean进行去均值操作
demean_wave = ABuStatsUtil.demean(change_ratio_sum)
"""
eg: demean_wave
2014-07-24 -1.6303
2014-08-14 0.5697
2014-09-04 -13.6003
2014-09-25 -11.2303
...
2016-05-05 3.3097
2016-05-26 -19.8303
2016-06-16 -5.5103
2016-07-07 -27.9703
"""
line = AbuTLine(demean_wave, 'demean sum change wave')
if show:
# 计算pd_resample how='mean'只是为了_show_wave里面显示价格曲线
xd_mean_close = pd_resample(kl_pd.close, xd_resample, how='mean')
# 这里不使用AbuTLine.show,因为需要绘制另一个对比line,价格均线xd_mean_close
_show_wave(demean_wave, line.high, line.mean, line.low, xd_mean_close)
# TODO AbuTLine中添加多条线的标准对比方法,左右双轴和数据变化方式
return line
def calc_wave_weight_abs(kl_pd, xd=21, span=3, show=True):
"""
计算金融时间序列kl_pd的绝对波动,通过参数xd对波动进行重采样
在默认xd=21情况下,变成了月震荡幅度,使用ABuStatsUtil.demean对
月震荡幅度进行去均值操作后得到技术线demean_wave,与calc_wave_abs不同,
使用squared * np.sign(demean_wave)放大了wave,即大的愈加大,且
继续对squared_wave进行时间加权得到技术线形成ewm_wave,AbuTLine包裹技术线返回
:param kl_pd: 金融时间序列,pd.DataFrame对象
:param xd: 对波动进行重采样的周期,单位天,int
:param span: 对squared_wave进行时间加权的窗口参数,int
:param show: 是否可视化
:return: 返回AbuTLine对象
"""
# 不考虑正负,只考虑波动,np.abs(kl_pd['p_change'])
abs_pct_change = np.abs(kl_pd['p_change'])
xd_resample = '%dD' % xd
# 通过pd_resample重采样,使用how=sum, 即默认xd=21情况下,变成了月震荡幅度
change_ratio_sum = pd_resample(abs_pct_change, xd_resample, how='sum')
# 使用ABuStatsUtil.demean进行去均值操作
demean_wave = ABuStatsUtil.demean(change_ratio_sum)
# 与calc_wave_abs不同,使用squared * np.sign(demean_wave)放大了wave,即大的愈加大
squared_wave = (demean_wave ** 2) * np.sign(demean_wave)
# ewmd的span最后决定了一切, span默认值之对应xd默认值,xd变动, span也要变
ewm_wave = pd_ewm_mean(squared_wave, span=span, min_periods=span, adjust=True)
line = AbuTLine(ewm_wave, 'squared ewm wave')
if show:
# 计算pd_resample how='mean'只是为了_show_wave里面显示价格曲线
xd_mean_close = pd_resample(kl_pd.close, xd_resample, how='mean')
# 这里不使用AbuTLine.show,因为需要绘制另一个对比line,价格均线xd_mean_close
_show_wave(ewm_wave, line.high, line.mean, line.low, xd_mean_close)
return line
def _show_wave(wave, above, wave_mean, below, xd_mean_close):
"""
calc_wave_abs和calc_wave_weight_abs形成技术线的可视化方法
不使用AbuTLine.show,因为需要绘制另一个对比line,价格均线xd_mean_close,
使用双坐标轴的方式进行可视化
"""
with plt_show():
fig, ax1 = plt.subplots()
plt.plot(wave)
plt.axhline(above, color='c')
plt.axhline(wave_mean, color='r')
plt.axhline(below, color='g')
_ = plt.setp(plt.gca().get_xticklabels(), rotation=30)
plt.legend(['wave', 'above', 'wave_mean', 'below'],
bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# 采用左右两个y轴坐标显示
# noinspection PyUnusedLocal
ax2 = ax1.twinx()
plt.plot(xd_mean_close, c='y')
plt.plot(xd_mean_close, 'ro')
plt.legend(['mean close'],
bbox_to_anchor=(1.05, 1), loc=3, borderaxespad=0.)
# 当时间序列太长时使用将时间显示倾斜30度角
_ = plt.setp(plt.gca().get_xticklabels(), rotation=30)
plt.title('wave line')
| StarcoderdataPython |
4829854 | <reponame>dvjr22/StockPicker<filename>ProjectCode/002_stock_miner.py
#!/usr/bin/env python
# coding: utf-8
# # References
# http://theautomatic.net/yahoo_fin-documentation/
# http://theautomatic.net/2020/05/05/how-to-download-fundamentals-data-with-python/
# https://algotrading101.com/learn/yahoo-finance-api-guide/>
# https://blog.quantinsti.com/quantitative-value-investing-strategy-python/
# https://www.activestate.com/blog/top-10-python-packages-for-finance-and-financial-modeling/
# https://medium.com/automation-generation/teaching-your-computer-to-invest-with-python-commission-free-automated-investing-5ade10961e08
# # Imports
import pandas as pd
import yahoo_fin.stock_info as si
import time
import timeit
start = timeit.default_timer()
# # Variables
ratio_valuation_function=['Price/Book (mrq)','Trailing P/E','Forward P/E 1', 'PEG Ratio (5 yr expected) 1','Price/Sales (ttm)']
ratio_stat=['Total Debt/Equity (mrq)', 'Diluted EPS (ttm)', 'Trailing Annual Dividend Yield 3', 'Forward Annual Dividend Yield 4', '% Held by Insiders 1','% Held by Institutions 1', 'Return on Equity (ttm)','Return on Assets (ttm)','Quarterly Earnings Growth (yoy)', 'current_price']
# ratio_stat=['Total Debt/Equity (mrq)', 'Diluted EPS (ttm)', 'Trailing Annual Dividend Yield 3',\
# 'Forward Annual Dividend Yield 4', '% Held by Insiders 1','% Held by Institutions 1',\
# 'Return on Equity (ttm)','Return on Assets (ttm)','Quarterly Earnings Growth (yoy)', \
# 'current_price','Beta (5Y Monthly)']
# # Get Updated S&P 500
# complete sp 500 list
# file generated: get_sp_500_update.py
# ticker_df = pd.read_csv('../ProjectDatasets/sp_500_symbols.csv', sep=',')
# tickers = ticker_df['Symbol'].tolist()
# get latest sp500
tickers = si.tickers_sp500()
# tickersDF = si.tickers_sp500(True)
# tickers = tickers[:4].copy()
print(len(tickers))
# # Company Info
tickersDF = si.tickers_sp500(True)
tickersDF.index = tickersDF['Symbol']
tickersDF.index.rename('ticker', inplace=True)
tickersDF.drop(columns=['Symbol'], inplace=True)
# # Sort Data
table=pd.DataFrame()
ticker_index = []
retry_ticker = []
count = 0
for p in tickers:
# print(p)
try:
data=si.get_stats(p)
data.index=data["Attribute"]
data=data.drop(labels="Attribute",axis=1)
raw_table=data.T
raw_table['current_price'] = round(si.get_live_price(p),2)
table=table.append(raw_table) #Table having Data about the company
ticker_index.append(p)
except:
count = count+1
print('Bad Ticker {}: {}'.format(count, p))
retry_ticker.append(p)
if len(retry_ticker) > 0:
time.sleep(60*20)
count = 0
for p in retry_ticker:
# print(p)
try:
data=si.get_stats(p)
data.index=data["Attribute"]
data=data.drop(labels="Attribute",axis=1)
raw_table=data.T
raw_table['current_price'] = round(si.get_live_price(p),2)
table=table.append(raw_table) #Table having Data about the company
ticker_index.append(p)
time.sleep(10)
except:
count = count+1
print('Bad Ticker 2nd attempt {}: {}'.format(count, p))
time.sleep(60*5)
table.index=ticker_index
table1 = table[ratio_stat]
time.sleep(60*20)
table=pd.DataFrame()
tickers = ticker_index.copy()
new_index = []
retry_ticker = []
count = 0
for p in tickers:
# print(p)
try:
extra_ratio=si.get_stats_valuation(p)
extra_ratio = extra_ratio.iloc[:,0:2]
extra_ratio.index=extra_ratio['Unnamed: 0']
extra_ratio=extra_ratio.drop(labels='Unnamed: 0',axis=1)
new_table=extra_ratio.T
table=table.append(new_table) #Table having Data about the company
new_index.append(p)
time.sleep(10)
except:
count = count+1
print('Bad Ticker {}: {}'.format(count, p))
retry_ticker.append(p)
time.sleep(60*5)
if len(retry_ticker) > 0:
time.sleep(60*20)
count = 0
for p in retry_ticker:
# print(p)
try:
data=si.get_stats(p)
data.index=data["Attribute"]
data=data.drop(labels="Attribute",axis=1)
raw_table=data.T
raw_table['current_price'] = round(si.get_live_price(p),2)
table=table.append(raw_table) #Table having Data about the company
new_index.append(p)
time.sleep(10)
except:
count = count+1
print('Bad Ticker 2nd attempt {}: {}'.format(count, p))
time.sleep(60*5)
table.index=new_index
table2 = table[ratio_valuation_function]
final=pd.concat([table2,table1],axis=1)
final.to_csv('../ProjectDatasets/final_recommendations_int.csv', index=True, index_label='ticker')
print(final.shape)
# # Evaluations
final['Trailing P/E'] = pd.to_numeric(final['Trailing P/E'], errors='coerce')
final['Price/Book (mrq)'] = pd.to_numeric(final['Price/Book (mrq)'], errors='coerce')
# low_valuations
final = final[(final['Trailing P/E'].astype(float)<40) & (final['Price/Book (mrq)'].astype(float) < 15)].copy()
# earning_power
final = final[final['Diluted EPS (ttm)'].astype(float) > 4].copy()
# equity_to_debt
final = final[(final['Total Debt/Equity (mrq)'].astype(float)< 75 )].copy() # Filter for Debt to Equity
final = final[(final['Return on Equity (ttm)'] > str(20) )].copy() # Filter for ROE
# insider_owned
final = final[final['% Held by Insiders 1']>str(.07)].copy()
FINAL = pd.concat([tickersDF,final], axis=1, join='inner')
FINAL.sort_values(by=['current_price'], inplace=True)
FINAL.head()
FINAL.to_csv('../ProjectDatasets/final_recommendations.csv', index=True, index_label='ticker')
FINAL.shape
stop = timeit.default_timer()
print('Time (hrs): ', ((stop - start)/60)/60)
| StarcoderdataPython |
1771076 | import unittest
from base import testlabel
from cqparts.utils.test import CatalogueTest
from cqparts.catalogue import JSONCatalogue
catalogue = JSONCatalogue('test-files/thread_catalogue.json')
cls = testlabel('complex_thread')(CatalogueTest.create_from(catalogue))
# FIXME: when #1 is fixed, remove this so tests are not permanently skipped
cls = unittest.skip('skipped until #1 is fixed')(cls)
globals()[cls.__name__] = cls
| StarcoderdataPython |
65024 | <filename>garminexport/garminclient.py<gh_stars>0
#! /usr/bin/env python
"""A module for authenticating against and communicating with selected
parts of the Garmin Connect REST API.
"""
import json
import logging
import os
import re
import requests
from io import BytesIO
import sys
import zipfile
import dateutil
import dateutil.parser
import os.path
from functools import wraps
from builtins import range
#
# Note: For more detailed information about the API services
# used by this module, log in to your Garmin Connect account
# through the web browser and visit the API documentation page
# for the REST service of interest. For example:
# https://connect.garmin.com/proxy/activity-service-1.3/index.html
# https://connect.garmin.com/proxy/activity-search-service-1.2/index.html
#
#
# Other useful references:
# https://github.com/cpfair/tapiriik/blob/master/tapiriik/services/GarminConnect/garminconnect.py
# https://forums.garmin.com/showthread.php?72150-connect-garmin-com-signin-question/page2
#
log = logging.getLogger(__name__)
# reduce logging noise from requests library
logging.getLogger("requests").setLevel(logging.ERROR)
SSO_LOGIN_URL = "https://sso.garmin.com/sso/login"
"""The Garmin Connect Single-Sign On login URL."""
def require_session(client_function):
"""Decorator that is used to annotate :class:`GarminClient`
methods that need an authenticated session before being called.
"""
@wraps(client_function)
def check_session(*args, **kwargs):
client_object = args[0]
if not client_object.session:
raise Exception("Attempt to use GarminClient without being connected. Call connect() before first use.'")
return client_function(*args, **kwargs)
return check_session
class GarminClient(object):
"""A client class used to authenticate with Garmin Connect and
extract data from the user account.
Since this class implements the context manager protocol, this object
can preferably be used together with the with-statement. This will
automatically take care of logging in to Garmin Connect before any
further interactions and logging out after the block completes or
a failure occurs.
Example of use: ::
with GarminClient("<EMAIL>", "secretpassword") as client:
ids = client.list_activity_ids()
for activity_id in ids:
gpx = client.get_activity_gpx(activity_id)
"""
def __init__(self, username, password):
"""Initialize a :class:`GarminClient` instance.
:param username: Garmin Connect user name or email address.
:type username: str
:param password: Garmin Connect account password.
:type password: str
"""
self.username = username
self.password = password
self.session = None
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.disconnect()
def connect(self):
self.session = requests.Session()
self._authenticate()
def disconnect(self):
if self.session:
self.session.close()
self.session = None
def _authenticate(self):
log.info("authenticating user ...")
form_data = {
"username": self.username,
"password": <PASSWORD>,
"embed": "false"
}
request_params = {
"service": "https://connect.garmin.com/modern"
}
auth_response = self.session.post(
SSO_LOGIN_URL, params=request_params, data=form_data)
log.debug("got auth response: %s", auth_response.text)
if auth_response.status_code != 200:
raise ValueError(
"authentication failure: did you enter valid credentials?")
auth_ticket_url = self._extract_auth_ticket_url(
auth_response.text)
log.debug("auth ticket url: '%s'", auth_ticket_url)
log.info("claiming auth ticket ...")
response = self.session.get(auth_ticket_url)
if response.status_code != 200:
raise RuntimeError(
"auth failure: failed to claim auth ticket: %s: %d\n%s" %
(auth_ticket_url, response.status_code, response.text))
# appears like we need to touch base with the old API to initiate
# some form of legacy session. otherwise certain downloads will fail.
self.session.get('https://connect.garmin.com/legacy/session')
def _extract_auth_ticket_url(self, auth_response):
"""Extracts an authentication ticket URL from the response of an
authentication form submission. The auth ticket URL is typically
of form:
https://connect.garmin.com/modern?ticket=ST-0123456-aBCDefgh1iJkLmN5opQ9R-cas
:param auth_response: HTML response from an auth form submission.
"""
match = re.search(
r'response_url\s*=\s*"(https:[^"]+)"', auth_response)
if not match:
raise RuntimeError(
"auth failure: unable to extract auth ticket URL. did you provide a correct username/password?")
auth_ticket_url = match.group(1).replace("\\", "")
return auth_ticket_url
@require_session
def list_activities(self):
"""Return all activity ids stored by the logged in user, along
with their starting timestamps.
:returns: The full list of activity identifiers (along with their
starting timestamps).
:rtype: tuples of (int, datetime)
"""
ids = []
batch_size = 100
# fetch in batches since the API doesn't allow more than a certain
# number of activities to be retrieved on every invocation
for start_index in range(0, sys.maxsize, batch_size):
next_batch = self._fetch_activity_ids_and_ts(start_index, batch_size)
if not next_batch:
break
ids.extend(next_batch)
return ids
@require_session
def _fetch_activity_ids_and_ts(self, start_index, max_limit=100):
"""Return a sequence of activity ids (along with their starting
timestamps) starting at a given index, with index 0 being the user's
most recently registered activity.
Should the index be out of bounds or the account empty, an empty
list is returned.
:param start_index: The index of the first activity to retrieve.
:type start_index: int
:param max_limit: The (maximum) number of activities to retrieve.
:type max_limit: int
:returns: A list of activity identifiers (along with their
starting timestamps).
:rtype: tuples of (int, datetime)
"""
log.debug("fetching activities {} through {} ...".format(
start_index, start_index+max_limit-1))
response = self.session.get(
"https://connect.garmin.com/modern/proxy/activitylist-service/activities/search/activities",
params={"start": start_index, "limit": max_limit})
if response.status_code != 200:
raise Exception(
u"failed to fetch activities {} to {} types: {}\n{}".format(
start_index, (start_index+max_limit-1),
response.status_code, response.text))
activities = json.loads(response.text)
if not activities:
# index out of bounds or empty account
return []
entries = []
for activity in activities:
id = int(activity["activityId"])
timestamp_utc = dateutil.parser.parse(activity["startTimeGMT"])
# make sure UTC timezone gets set
timestamp_utc = timestamp_utc.replace(tzinfo=dateutil.tz.tzutc())
entries.append( (id, timestamp_utc) )
log.debug("got {} activities.".format(len(entries)))
return entries
@require_session
def get_activity_summary(self, activity_id):
"""Return a summary about a given activity. The
summary contains several statistics, such as duration, GPS starting
point, GPS end point, elevation gain, max heart rate, max pace, max
speed, etc).
:param activity_id: Activity identifier.
:type activity_id: int
:returns: The activity summary as a JSON dict.
:rtype: dict
"""
response = self.session.get("https://connect.garmin.com/modern/proxy/activity-service/activity/{}".format(activity_id))
if response.status_code != 200:
log.error(u"failed to fetch json summary for activity {}: {}\n{}".format(
activity_id, response.status_code, response.text))
raise Exception(u"failed to fetch json summary for activity {}: {}\n{}".format(
activity_id, response.status_code, response.text))
return json.loads(response.text)
@require_session
def get_activity_details(self, activity_id):
"""Return a JSON representation of a given activity including
available measurements such as location (longitude, latitude),
heart rate, distance, pace, speed, elevation.
:param activity_id: Activity identifier.
:type activity_id: int
:returns: The activity details as a JSON dict.
:rtype: dict
"""
# mounted at xml or json depending on result encoding
response = self.session.get("https://connect.garmin.com/modern/proxy/activity-service/activity/{}/details".format(activity_id))
if response.status_code != 200:
raise Exception(u"failed to fetch json activityDetails for {}: {}\n{}".format(
activity_id, response.status_code, response.text))
return json.loads(response.text)
@require_session
def get_activity_gpx(self, activity_id):
"""Return a GPX (GPS Exchange Format) representation of a
given activity. If the activity cannot be exported to GPX
(not yet observed in practice, but that doesn't exclude the
possibility), a :obj:`None` value is returned.
:param activity_id: Activity identifier.
:type activity_id: int
:returns: The GPX representation of the activity as an XML string
or ``None`` if the activity couldn't be exported to GPX.
:rtype: str
"""
response = self.session.get("https://connect.garmin.com/modern/proxy/download-service/export/gpx/activity/{}".format(activity_id))
# An alternate URL that seems to produce the same results
# and is the one used when exporting through the Garmin
# Connect web page.
#response = self.session.get("https://connect.garmin.com/proxy/activity-service-1.1/gpx/activity/{}?full=true".format(activity_id))
# A 404 (Not Found) or 204 (No Content) response are both indicators
# of a gpx file not being available for the activity. It may, for
# example be a manually entered activity without any device data.
if response.status_code in (404, 204):
return None
if response.status_code != 200:
raise Exception(u"failed to fetch GPX for activity {}: {}\n{}".format(
activity_id, response.status_code, response.text))
return response.text
@require_session
def get_activity_tcx(self, activity_id):
"""Return a TCX (Training Center XML) representation of a
given activity. If the activity doesn't have a TCX source (for
example, if it was originally uploaded in GPX format, Garmin
won't try to synthesize a TCX file) a :obj:`None` value is
returned.
:param activity_id: Activity identifier.
:type activity_id: int
:returns: The TCX representation of the activity as an XML string
or ``None`` if the activity cannot be exported to TCX.
:rtype: str
"""
response = self.session.get("https://connect.garmin.com/modern/proxy/download-service/export/tcx/activity/{}".format(activity_id))
if response.status_code == 404:
return None
if response.status_code != 200:
raise Exception(u"failed to fetch TCX for activity {}: {}\n{}".format(
activity_id, response.status_code, response.text))
return response.text
def get_original_activity(self, activity_id):
"""Return the original file that was uploaded for an activity.
If the activity doesn't have any file source (for example,
if it was entered manually rather than imported from a Garmin
device) then :obj:`(None,None)` is returned.
:param activity_id: Activity identifier.
:type activity_id: int
:returns: A tuple of the file type (e.g. 'fit', 'tcx', 'gpx') and
its contents, or :obj:`(None,None)` if no file is found.
:rtype: (str, str)
"""
response = self.session.get("https://connect.garmin.com/modern/proxy/download-service/files/activity/{}".format(activity_id))
# A 404 (Not Found) response is a clear indicator of a missing .fit
# file. As of lately, the endpoint appears to have started to
# respond with 500 "NullPointerException" on attempts to download a
# .fit file for an activity without one.
if response.status_code in [404, 500]:
# Manually entered activity, no file source available
return (None,None)
if response.status_code != 200:
raise Exception(
u"failed to get original activity file for {}: {}\n{}".format(
activity_id, response.status_code, response.text))
# return the first entry from the zip archive where the filename is
# activity_id (should be the only entry!)
zip = zipfile.ZipFile(BytesIO(response.content), mode="r")
for path in zip.namelist():
fn, ext = os.path.splitext(path)
if fn==str(activity_id):
return ext[1:], zip.open(path).read()
return (None,None)
def get_activity_fit(self, activity_id):
"""Return a FIT representation for a given activity. If the activity
doesn't have a FIT source (for example, if it was entered manually
rather than imported from a Garmin device) a :obj:`None` value is
returned.
:param activity_id: Activity identifier.
:type activity_id: int
:returns: A string with a FIT file for the activity or :obj:`None`
if no FIT source exists for this activity (e.g., entered manually).
:rtype: str
"""
fmt, orig_file = self.get_original_activity(activity_id)
# if the file extension of the original activity file isn't 'fit',
# this activity was uploaded in a different format (e.g. gpx/tcx)
# and cannot be exported to fit
return orig_file if fmt=='fit' else None
@require_session
def upload_activity(self, file, format=None, name=None, description=None, activity_type=None, private=None):
"""Upload a GPX, TCX, or FIT file for an activity.
:param file: Path or open file
:param format: File format (gpx, tcx, or fit); guessed from filename if None
:param name: Optional name for the activity on Garmin Connect
:param description: Optional description for the activity on Garmin Connect
:param activity_type: Optional activityType key (lowercase: e.g. running, cycling)
:param private: If true, then activity will be set as private.
:returns: ID of the newly-uploaded activity
:rtype: int
"""
if isinstance(file, basestring):
file = open(file, "rb")
# guess file type if unspecified
fn = os.path.basename(file.name)
_, ext = os.path.splitext(fn)
if format is None:
if ext.lower() in ('.gpx','.tcx','.fit'):
format = ext.lower()[1:]
else:
raise Exception(u"could not guess file type for {}".format(fn))
# upload it
files = dict(data=(fn, file))
response = self.session.post("https://connect.garmin.com/modern/proxy/upload-service/upload/.{}".format(format),
files=files, headers={"nk": "NT"})
# check response and get activity ID
try:
j = response.json()["detailedImportResult"]
except (json.JSONDecodeException, KeyError):
raise Exception(u"failed to upload {} for activity: {}\n{}".format(
format, response.status_code, response.text))
if len(j["failures"]) or len(j["successes"])<1:
raise Exception(u"failed to upload {} for activity: {}\n{}".format(
format, response.status_code, j["failures"]))
if len(j["successes"])>1:
raise Exception(u"uploading {} resulted in multiple activities ({})".format(
format, len(j["successes"])))
activity_id = j["successes"][0]["internalId"]
# add optional fields
data = {}
if name is not None: data['activityName'] = name
if description is not None: data['description'] = name
if activity_type is not None: data['activityTypeDTO'] = {"typeKey": activity_type}
if private: data['privacy'] = {"typeKey": "private"}
if data:
data['activityId'] = activity_id
encoding_headers = {"Content-Type": "application/json; charset=UTF-8"} # see Tapiriik
response = self.session.put("https://connect.garmin.com/proxy/activity-service/activity/{}".format(activity_id), data=json.dumps(data), headers=encoding_headers)
if response.status_code != 204:
raise Exception(u"failed to set metadata for activity {}: {}\n{}".format(
activity_id, response.status_code, response.text))
return activity_id
| StarcoderdataPython |
34810 | <reponame>UsterNes/OnlineSchemaChange
"""
Copyright (c) 2017-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
PREFIX = '__osc_'
OUTFILE_TABLE = '__osc_tbl_'
OUTFILE_EXCLUDE_ID = '__osc_ex_'
OUTFILE_INCLUDE_ID = '__osc_in_'
NEW_TABLE_PREFIX = '__osc_new_'
DELTA_TABLE_PREFIX = '__osc_chg_'
RENAMED_TABLE_PREFIX = '__osc_old_'
INSERT_TRIGGER_PREFIX = '__osc_ins_'
UPDATE_TRIGGER_PREFIX = '__osc_upd_'
DELETE_TRIGGER_PREFIX = '__osc_del_'
OSC_LOCK_NAME = 'OnlineSchemaChange'
CHUNK_BYTES = 2 * 1024 * 1024
REPLAY_DEFAULT_TIMEOUT = 30
DEFAULT_BATCH_SIZE = 500
DEFAULT_REPLAY_ATTEMPT = 10
DEFAULT_RESERVED_SPACE = 1024 * 1024 * 1024
LONG_TRX_TIME = 30
MAX_RUNNING_BEFORE_DDL = 200
DDL_GUARD_ATTEMPTS = 600
LOCK_MAX_ATTEMPTS = 3
SESSION_TIMEOUT = 600
DEFAULT_REPLAY_GROUP_SIZE = 200
| StarcoderdataPython |
1700135 | <gh_stars>0
class HTTPError(Exception):
pass
class VersionSpecificationError(Exception):
pass
class SchedulerException(Exception):
pass
class CandidateNodeNotFoundException(SchedulerException):
pass
class LowResourceException(SchedulerException):
pass
class AbortInstanceStartException(SchedulerException):
pass
# Database
class DatabaseException(Exception):
pass
class LockException(DatabaseException):
pass
class WriteException(DatabaseException):
pass
class ReadException(DatabaseException):
pass
class BadMetadataPacket(DatabaseException):
pass
class VirtException(Exception):
pass
class NoDomainException(VirtException):
pass
class FlagException(Exception):
pass
# Images
class BadCheckSum(Exception):
pass
# Tasks
class TaskException(Exception):
pass
class UnknownTaskException(TaskException):
pass
class NoURLImageFetchTaskException(TaskException):
pass
class ImageFetchTaskFailedException(TaskException):
pass
class NoInstanceTaskException(TaskException):
pass
class NoNetworkTaskException(TaskException):
pass
class NetworkNotListTaskException(TaskException):
pass
| StarcoderdataPython |
1650916 | <reponame>geofft/multiprocess
#
# Module supporting finaliztion using weakrefs
#
# processing/finalize.py
#
# Copyright (c) 2006-2008, <NAME> --- see COPYING.txt
#
import weakref
import itertools
from processing.logger import subDebug
__all__ = ['Finalize', '_runFinalizers']
_registry = {}
_counter = itertools.count()
class Finalize(object):
'''
Class which supports object finalization using weakrefs
'''
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
assert exitpriority is None or type(exitpriority) is int
assert callback is not None
if obj is not None:
self._weakref = weakref.ref(obj, self)
else:
assert exitpriority is not None
self._callback = callback
self._args = args
self._kwargs = kwargs or {}
self._key = (exitpriority, _counter.next())
_registry[self._key] = self
def __call__(self, wr=None):
'''
Run the callback unless it has already been called or cancelled
Returns True if callback was run otherwise returns False
'''
try:
del _registry[self._key]
except KeyError:
subDebug('finalizer no longer registered')
else:
subDebug('finalizer calling %s with args %s and kwargs %s',
self._callback, self._args, self._kwargs)
self._callback(*self._args, **self._kwargs)
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
return True
def cancel(self):
'''
Cancel finalization of the object
'''
try:
del _registry[self._key]
except KeyError:
pass
else:
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
def stillActive(self):
'''
Return whether this finalizer is still waiting to invoke callback
'''
return self._key in _registry
def __repr__(self):
try:
obj = self._weakref()
except (AttributeError, TypeError):
obj = None
if obj is None:
return '<Finalize object, dead>'
x = '<Finalize object, callback=%s' % \
getattr(self._callback, '__name__', self._callback)
if self._args:
x += ', args=' + str(self._args)
if self._kwargs:
x += ', kwargs=' + str(self._kwargs)
if self._key[0] is not None:
x += ', exitprority=' + str(self._key[0])
return x + '>'
def _runFinalizers(minpriority=None):
'''
Run all finalizers whose exit priority is not None and at least minpriority
Finalizers with highest priority are called first; finalizers with
the same priority will be called in reverse order of creation.
'''
if minpriority is None:
f = lambda p : p[0][0] is not None
else:
f = lambda p : p[0][0] is not None and p[0][0] >= minpriority
items = sorted(filter(f, _registry.items()), reverse=True)
for key, finalizer in items:
subDebug('calling %s', finalizer)
try:
finalizer()
except Exception:
import traceback
traceback.print_exc()
if minpriority is None:
_registry.clear()
| StarcoderdataPython |
175458 | <filename>training/src/tests/tests/python/gaussian_upsampling.py
# Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
class GaussianUpsampling:
def __init__(self):
super(GaussianUpsampling, self).__init__()
def __call__(self, x, durations, ranges, mel_len=None):
eps = 0.00001
# x: batch_size x seq_len x input_dim
# durations: batch_size x seq_len
# missing elements to fit dimensions of both arrays should be 0
if mel_len is not None:
mel_len = tf.cast(mel_len, tf.float32)
else:
mel_len = tf.reduce_max(tf.reduce_sum(durations, axis=1))
c = tf.add(
tf.cast(tf.divide(durations, 2), tf.float32),
tf.concat(
[
tf.zeros([tf.shape(durations)[0], 1], tf.float32),
tf.cast(tf.cumsum(durations, axis=1)[:, :-1], tf.float32),
],
axis=1,
),
)
dist = tfd.Normal(loc=c, scale=ranges + eps)
bs = tf.shape(durations)[0]
t = tf.range(1, mel_len + 1, dtype=tf.float32)
t = t[:, tf.newaxis, tf.newaxis]
p = tf.transpose(
tf.exp(dist.log_prob(t)), [1, 0, 2]
) # bs x seq_len (t) x len(t)=seq_len (i)
s = tf.expand_dims(tf.reduce_sum(p, axis=2), axis=2) + eps
w = tf.divide(p, s)
u = tf.matmul(w, x)
return u
tf.random.set_seed(0)
x = tf.random.uniform([2, 5, 4], minval=0.0, maxval=1.0)
durations = tf.random.uniform([2, 5], minval=0.0, maxval=1.0)
ranges = tf.random.uniform([2, 5], minval=0.0, maxval=1.0)
gauss = GaussianUpsampling()
print("Input: ", x)
print("Durations: ", durations)
print("Ranges: ", ranges)
with tf.GradientTape() as g:
g.watch(x)
y = gauss(x, durations, ranges, 3.0)
print("Output: ", y)
dy_dx = g.gradient(y, x)
print("Gradient for input: ", dy_dx)
with tf.GradientTape() as g:
g.watch(durations)
y = gauss(x, durations, ranges, 3.0)
dy_dx = g.gradient(y, durations)
print("Gradient for durations: ", dy_dx)
with tf.GradientTape() as g:
g.watch(ranges)
y = gauss(x, durations, ranges, 3.0)
dy_dx = g.gradient(y, ranges)
print("Gradient for ranges: ", dy_dx)
| StarcoderdataPython |
83056 | <filename>actor/urls.py<gh_stars>1-10
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls.defaults import *
urlpatterns = patterns('actor.views',
(r'^invite$', 'actor_invite'),
(r'^contacts$', 'actor_contacts'),
(r'^contacts/(?P<format>json|xml|atom)$', 'actor_contacts'),
(r'^followers$', 'actor_followers'),
(r'^followers/(?P<format>json|xml|atom)$', 'actor_followers'),
(r'^presence/(?P<item>[\da-f]+|last)/(?P<format>json|xml|atom)$', 'actor_item'),
(r'^presence/(?P<item>[\da-f]+|last)$', 'actor_item'),
#(r'^presence/(?P<format>json|xml|atom)$', 'presence_current'),
#(r'^presence$', 'presence_current'),
(r'^(?P<format>json|xml|atom|rss)$', 'actor_history'),
(r'^feed/(?P<format>json|xml|atom|rss)$', 'actor_history'),
(r'^contacts/feed/(?P<format>json|xml|atom|rss)$', 'actor_overview'),
(r'^overview/(?P<format>json|xml|atom|rss)$', 'actor_overview'),
(r'^overview$', 'actor_overview', {"format": "html"}),
(r'^$', 'actor_history', {'format': 'html'}),
(r'^settings$', 'actor_settings'),
(r'^settings/(?P<page>\w+)$', 'actor_settings'),
)
handler404 = 'common.views.common_404'
handler500 = 'common.views.common_500'
| StarcoderdataPython |
1731022 | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2017, Arm Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Thermal Analysis Module """
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import pandas as pd
import pylab as pl
import operator
import os
from trappy.utils import listify
from devlib.utils.misc import memoized, list_to_mask, mask_to_list
from analysis_module import AnalysisModule
from trace import ResidencyTime, ResidencyData
from bart.common.Utils import area_under_curve
from matplotlib.ticker import MaxNLocator
class ThermalAnalysis(AnalysisModule):
"""
Support for plotting Thermal Analysis data
:param trace: input Trace object
:type trace: :mod:`libs.utils.Trace`
"""
###############################################################################
# Analysis properties
###############################################################################
@property
@memoized
def thermal_zones(self):
"""
Get thermal zone ids that appear in the trace
"""
df = self._dfg_trace_event('thermal_temperature')
return df["thermal_zone"].unique().tolist()
@property
@memoized
def cpufreq_cdevs(self):
"""
Get cpufreq cooling devices that appear in the trace
"""
df = self._dfg_trace_event('thermal_power_cpu_limit')
res = df['cpus'].unique().tolist()
return [mask_to_list(mask) for mask in res]
@property
@memoized
def devfreq_cdevs(self):
"""
Get devfreq cooling devices that appear in the trace
"""
df = self._dfg_trace_event('thermal_power_devfreq_limit')
return df['type'].unique().tolist()
###############################################################################
# DataFrame Getter Methods
###############################################################################
def _dfg_thermal_zone_temperature(self, ids=None):
"""
Get the temperature readings of one or more thermal zone(s)
(all by default)
:param ids: The thermal zones to consider
:type ids: list(int)
"""
df = self._dfg_trace_event('thermal_temperature')
df = df[['id', 'thermal_zone', 'temp']]
if ids is not None:
df = df[df.id.isin(ids)]
return df
def _dfg_cpufreq_cooling_state(self, cpus=None):
"""
Get the cooling states of one or more cpufreq cooling device(s)
(all by default)
:param cpus: The CPUs to consider
:type cpus: list(int)
"""
df = self._dfg_trace_event('thermal_power_cpu_limit')
df = df[['cpus', 'freq', 'cdev_state']]
if cpus is not None:
# Find masks that match the requested CPUs
# This can include other CPUs
masks = self._matchingMasks(cpus)
df = df[df.cpus.isin(masks)]
return df
def _dfg_devfreq_cooling_state(self, devices=None):
"""
Get the cooling states of one or more devfreq cooling device(s)
(all by default)
:param devices: The devfreq devices to consider
:type device: list(str)
"""
df = self._dfg_trace_event('thermal_power_devfreq_limit')
df = df[['type', 'freq', 'cdev_state']]
if devices is not None:
df = df[df.type.isin(devices)]
return df
###############################################################################
# Plotting Methods
###############################################################################
def plotTemperature(self, thermal_zones=None, ax=None):
"""
Plot temperature of thermal zones (all by default)
Requires the following trace event:
- thermal_temperature
:param thermal_zones: ID(s) of the zones to be plotted.
All the zones are plotted by default.
IDs can be found in syfs: /sys/class/thermal/thermal_zone<ID>
:type thermal_zones: list(int)
"""
if not self._trace.hasEvents('thermal_temperature'):
self._log.warning('Event [{}] not found, plot DISABLED!'
.format('thermal_temperature'))
return
plot_df = self._dfg_thermal_zone_temperature(thermal_zones)
def stringify_tz(id):
return plot_df[plot_df.id == id]['thermal_zone'].unique()[0]
filters = None if thermal_zones is None else {'thermal_zone' : thermal_zones}
self._plot_generic(plot_df, 'id', filters=filters, columns=['temp'],
prettify_name=stringify_tz,
drawstyle='steps-post', ax=ax
)
if thermal_zones is None:
suffix = ''
else:
suffix = '_' + '_'.join(map(str, thermal_zones))
# Save generated plots into datadir
figname = os.path.join(
self._trace.plots_dir,
'{}thermal_temperature{}.png'.format(
self._trace.plots_dir, self._trace.plots_prefix, suffix
)
)
pl.savefig(figname, bbox_inches='tight')
def plotCPUCoolingStates(self, cpus=None, ax=None):
"""
Plot the state evolution of cpufreq cooling devices (all by default)
Requires the following trace event:
- thermal_power_cpu_limit
:param cpus: list of CPUs to plot. Whole clusters can be controlled as
a single cooling device, they will be plotted as long as one of their
CPUs is in the list.
:type cpus: list(int)
"""
if not self._trace.hasEvents('thermal_power_cpu_limit'):
self._log.warning('Event [{}] not found, plot DISABLED!'
.format('thermal_power_cpu_limit'))
return
plot_df = self._dfg_trace_event('thermal_power_cpu_limit')
def stringify_mask(mask):
return 'CPUs {}'.format(mask_to_list(mask))
# Find masks that match the requested CPUs
# This can include other CPUs
masks = None
if cpus is not None:
masks = self._matchingMasks(cpus)
filters = None if masks is None else {'cpus' : masks}
_ax = self._plot_generic(plot_df, 'cpus', filters=filters, columns=['cdev_state'],
prettify_name=stringify_mask,
drawstyle='steps-post', ax=ax
)
if ax is None:
ax = _ax
# Cdev status is an integer series
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.grid(axis='y')
if cpus is None:
suffix = ''
else:
suffix = '_' + '_'.join(map(str, cpus))
# Save generated plots into datadir
figname = os.path.join(
self._trace.plots_dir,
'{}thermal_cpufreq_cdev_state{}.png'.format(
self._trace.plots_dir, self._trace.plots_prefix, suffix
)
)
pl.savefig(figname, bbox_inches='tight')
def plotDevfreqCoolingStates(self, devices=None, ax=None):
"""
Plot the state evolution of devfreq cooling devices (all by default)
Requires the following trace event:
- thermal_power_devfreq_limit
:param devices: list of devfreq devices to plot.
:type cpus: list(int)
"""
if not self._trace.hasEvents('thermal_power_devfreq_limit'):
self._log.warning('Event [{}] not found, plot DISABLED!'
.format('thermal_power_devfreq_limit'))
return
plot_df = self._dfg_trace_event('thermal_power_devfreq_limit')
# Might have more than one device selected by 'type', but that's
# the best we can do
filters = None if devices is None else {'type' : devices}
_ax = self._plot_generic(plot_df, 'type', filters=filters, columns=['cdev_state'],
drawstyle='steps-post', ax=ax
)
if ax is None:
ax = _ax
# Cdev status is an integer series
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.grid(axis='y')
if devices is None:
suffix = ''
else:
suffix = '_' + '_'.join(map(str, devices))
# Save generated plots into datadir
figname = os.path.join(
self._trace.plots_dir,
'{}thermal_devfreq_cdev_state{}.png'.format(
self._trace.plots_dir, self._trace.plots_prefix, suffix
)
)
pl.savefig(figname, bbox_inches='tight')
###############################################################################
# Utility Methods
###############################################################################
def _matchingMasks(self, cpus):
df = self._dfg_trace_event('thermal_power_cpu_limit')
global_mask = list_to_mask(cpus)
cpumasks = df['cpus'].unique().tolist()
return [m for m in cpumasks if m & global_mask]
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
| StarcoderdataPython |
1661213 | <reponame>IvanNardini/2-Engineering<filename>MachineLearningPlatforms/Kubeflow/applications/base/pipelines/conditionals/component.py
#!/usr/bin/env python3
# This is an example for testing conditions in Kubeflow
# Steps:
# 1 - Define functions
# 2 - Define lightweight python components
# 3 - Write the component to a file
# Goal is:
# - Testing conditions with dsl.Condition
import argparse
import kfp.components as cpt
# Functions ------------------------------------------------------------------------------------------------------------
def get_word(text: str, word: str) -> bool:
text_lower = text.lower()
word_lower = word.lower()
return True if word_lower in text_lower else False
# Component ------------------------------------------------------------------------------------------------------------
def run_component(args):
OUT_COMPONENTS_DIR = args.out_component_dir
get_word_component = cpt.create_component_from_func(get_word,
output_component_file=f'{OUT_COMPONENTS_DIR}/get_word.component')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Create the component yaml")
parser.add_argument('--out-component-dir', default='../../out/components')
args = parser.parse_args()
run_component(args=args)
| StarcoderdataPython |
121386 | <reponame>henakauser/resume-api<gh_stars>0
"""PostgreSQL utilities"""
def pg_result_to_dict(columns, result, single_object=False):
"""Convert a PostgreSQL query result to a dict"""
resp = []
for row in result:
resp.append(dict(zip(columns, row)))
if single_object:
return resp[0]
return resp
| StarcoderdataPython |
3216774 | <reponame>grantmcconnaughey/django-related-entities
from django.test import TestCase
from relatedentities.models import RelatedEntity
from relatedentities.utils import add_related
from .models import Cat, Dog
class RelatedEntitiesUtilsTests(TestCase):
def setUp(self):
self.cat = Cat.objects.create(name="Test Cat")
self.dog = Dog.objects.create(name="Test Dog")
def test_add_related_adds_one_record(self):
related_entity = add_related(self.cat, self.dog)
self.assertEqual(RelatedEntity.objects.count(), 1)
def test_add_related_returns_instance_of_related_entity(self):
related_entity = add_related(self.cat, self.dog)
self.assertIsInstance(related_entity, RelatedEntity)
| StarcoderdataPython |
1602711 | import numpy as np
from pandas import DataFrame
from tensorflow.keras.preprocessing.sequence import pad_sequences
import csv
import os
class ECGDataIterator:
def __init__(self, f, subsample=1):
self.ifd = open(f, "rb")
self._ss = subsample
self._offset = 2048
def __next__(self):
chIb = self.ifd.read(2)
if chIb == b'':
self.ifd.close()
raise StopIteration
chI = int.from_bytes(chIb, byteorder="little") - self._offset
chIII = int.from_bytes(self.ifd.read(2), byteorder="little") - self._offset
# move pointer 4 bytes times subsampling to next data
if self._ss > 1:
self.ifd.seek(4*(self._ss-1), 1)
return chI, chIII
def __iter__(self):
return self
def extract_data(file_list, sample_rate=125, subsample=1, duration=42):
fs = sample_rate // subsample
num_samples = duration * fs
length = len(file_list)
x = np.empty((length, num_samples, 2))
y = np.empty(length)
for i, f in enumerate(file_list):
data = DataFrame(ECGDataIterator(f, subsample)).to_numpy().T
data = pad_sequences(data, maxlen=num_samples, padding='pre', truncating='pre')
x[i, :] = data.T
y[i] = 0 if 'sinus' in f else 1
return x, y
def extract_ecg_file_list(datafolder, csvfile):
file_list = list()
with open(os.path.join(datafolder, csvfile)) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
file_list.append(os.path.join(datafolder, row[0]))
return file_list
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.