text
stringlengths 2
999k
|
|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# big_abs documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import big_abs
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Biggest Absolute Value'
copyright = u"2020, Tiffany Timbers"
author = u"Tiffany Timbers"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = big_abs.__version__
# The full version, including alpha/beta/rc tags.
release = big_abs.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'big_absdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'big_abs.tex',
u'Biggest Absolute Value Documentation',
u'Tiffany Timbers', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'big_abs',
u'Biggest Absolute Value Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'big_abs',
u'Biggest Absolute Value Documentation',
author,
'big_abs',
'One line description of project.',
'Miscellaneous'),
]
# Add napoleon to the extensions list
extensions = ['sphinx.ext.napoleon']
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from typing import Optional, Tuple, cast
import gym
import hydra.utils
import numpy as np
import omegaconf
import torch
import mbrl.constants
import mbrl.models
import mbrl.planning
import mbrl.third_party.pytorch_sac as pytorch_sac
import mbrl.types
import mbrl.util
import mbrl.util.common
import mbrl.util.math
from mbrl.planning.sac_wrapper import SACAgent
MBPO_LOG_FORMAT = mbrl.constants.EVAL_LOG_FORMAT + [
("epoch", "E", "int"),
("rollout_length", "RL", "int"),
]
def rollout_model_and_populate_sac_buffer(
model_env: mbrl.models.ModelEnv,
replay_buffer: mbrl.util.ReplayBuffer,
agent: SACAgent,
sac_buffer: pytorch_sac.ReplayBuffer,
sac_samples_action: bool,
rollout_horizon: int,
batch_size: int,
):
batch = replay_buffer.sample(batch_size)
initial_obs, *_ = cast(mbrl.types.TransitionBatch, batch).astuple()
model_state = model_env.reset(
initial_obs_batch=cast(np.ndarray, initial_obs),
return_as_np=True,
)
accum_dones = np.zeros(initial_obs.shape[0], dtype=bool)
obs = initial_obs
for i in range(rollout_horizon):
action = agent.act(obs, sample=sac_samples_action, batched=True)
pred_next_obs, pred_rewards, pred_dones, model_state = model_env.step(
action, model_state, sample=True
)
sac_buffer.add_batch(
obs[~accum_dones],
action[~accum_dones],
pred_rewards[~accum_dones],
pred_next_obs[~accum_dones],
pred_dones[~accum_dones],
pred_dones[~accum_dones],
)
obs = pred_next_obs
accum_dones |= pred_dones.squeeze()
def evaluate(
env: gym.Env,
agent: pytorch_sac.Agent,
num_episodes: int,
video_recorder: pytorch_sac.VideoRecorder,
) -> float:
avg_episode_reward = 0
for episode in range(num_episodes):
obs = env.reset()
video_recorder.init(enabled=(episode == 0))
done = False
episode_reward = 0
while not done:
action = agent.act(obs)
obs, reward, done, _ = env.step(action)
video_recorder.record(env)
episode_reward += reward
avg_episode_reward += episode_reward
return avg_episode_reward / num_episodes
def maybe_replace_sac_buffer(
sac_buffer: Optional[pytorch_sac.ReplayBuffer],
new_capacity: int,
obs_shape: Tuple[int],
act_shape: Tuple[int],
device: torch.device,
) -> pytorch_sac.ReplayBuffer:
if sac_buffer is None or new_capacity != sac_buffer.capacity:
new_buffer = pytorch_sac.ReplayBuffer(
obs_shape, act_shape, new_capacity, device
)
if sac_buffer is None:
return new_buffer
n = len(sac_buffer)
new_buffer.add_batch(
sac_buffer.obses[:n],
sac_buffer.actions[:n],
sac_buffer.rewards[:n],
sac_buffer.next_obses[:n],
np.logical_not(sac_buffer.not_dones[:n]),
np.logical_not(sac_buffer.not_dones_no_max[:n]),
)
return new_buffer
return sac_buffer
def train(
env: gym.Env,
test_env: gym.Env,
termination_fn: mbrl.types.TermFnType,
cfg: omegaconf.DictConfig,
silent: bool = False,
work_dir: Optional[str] = None,
) -> np.float32:
# ------------------- Initialization -------------------
debug_mode = cfg.get("debug_mode", False)
obs_shape = env.observation_space.shape
act_shape = env.action_space.shape
mbrl.planning.complete_agent_cfg(env, cfg.algorithm.agent)
agent = hydra.utils.instantiate(cfg.algorithm.agent)
work_dir = work_dir or os.getcwd()
# enable_back_compatible to use pytorch_sac agent
logger = mbrl.util.Logger(work_dir, enable_back_compatible=True)
logger.register_group(
mbrl.constants.RESULTS_LOG_NAME,
MBPO_LOG_FORMAT,
color="green",
dump_frequency=1,
)
save_video = cfg.get("save_video", False)
video_recorder = pytorch_sac.VideoRecorder(work_dir if save_video else None)
rng = np.random.default_rng(seed=cfg.seed)
torch_generator = torch.Generator(device=cfg.device)
if cfg.seed is not None:
torch_generator.manual_seed(cfg.seed)
# -------------- Create initial overrides. dataset --------------
dynamics_model = mbrl.util.common.create_one_dim_tr_model(cfg, obs_shape, act_shape)
use_double_dtype = cfg.algorithm.get("normalize_double_precision", False)
dtype = np.double if use_double_dtype else np.float32
replay_buffer = mbrl.util.common.create_replay_buffer(
cfg,
obs_shape,
act_shape,
rng=rng,
obs_type=dtype,
action_type=dtype,
reward_type=dtype,
)
random_explore = cfg.algorithm.random_initial_explore
mbrl.util.common.rollout_agent_trajectories(
env,
cfg.algorithm.initial_exploration_steps,
mbrl.planning.RandomAgent(env) if random_explore else agent,
{} if random_explore else {"sample": True, "batched": False},
replay_buffer=replay_buffer,
)
# ---------------------------------------------------------
# --------------------- Training Loop ---------------------
rollout_batch_size = (
cfg.overrides.effective_model_rollouts_per_step * cfg.algorithm.freq_train_model
)
trains_per_epoch = int(
np.ceil(cfg.overrides.epoch_length / cfg.overrides.freq_train_model)
)
updates_made = 0
env_steps = 0
model_env = mbrl.models.ModelEnv(
env, dynamics_model, termination_fn, None, generator=torch_generator
)
model_trainer = mbrl.models.ModelTrainer(
dynamics_model,
optim_lr=cfg.overrides.model_lr,
weight_decay=cfg.overrides.model_wd,
logger=None if silent else logger,
)
best_eval_reward = -np.inf
epoch = 0
sac_buffer = None
while env_steps < cfg.overrides.num_steps:
rollout_length = int(
mbrl.util.math.truncated_linear(
*(cfg.overrides.rollout_schedule + [epoch + 1])
)
)
sac_buffer_capacity = rollout_length * rollout_batch_size * trains_per_epoch
sac_buffer_capacity *= cfg.overrides.num_epochs_to_retain_sac_buffer
sac_buffer = maybe_replace_sac_buffer(
sac_buffer,
sac_buffer_capacity,
obs_shape,
act_shape,
torch.device(cfg.device),
)
obs, done = None, False
for steps_epoch in range(cfg.overrides.epoch_length):
if steps_epoch == 0 or done:
obs, done = env.reset(), False
# --- Doing env step and adding to model dataset ---
next_obs, reward, done, _ = mbrl.util.common.step_env_and_add_to_buffer(
env, obs, agent, {}, replay_buffer
)
# --------------- Model Training -----------------
if (env_steps + 1) % cfg.overrides.freq_train_model == 0:
mbrl.util.common.train_model_and_save_model_and_data(
dynamics_model,
model_trainer,
cfg.overrides,
replay_buffer,
work_dir=work_dir,
)
# --------- Rollout new model and store imagined trajectories --------
# Batch all rollouts for the next freq_train_model steps together
rollout_model_and_populate_sac_buffer(
model_env,
replay_buffer,
agent,
sac_buffer,
cfg.algorithm.sac_samples_action,
rollout_length,
rollout_batch_size,
)
if debug_mode:
print(
f"Epoch: {epoch}. "
f"SAC buffer size: {len(sac_buffer)}. "
f"Rollout length: {rollout_length}. "
f"Steps: {env_steps}"
)
# --------------- Agent Training -----------------
for _ in range(cfg.overrides.num_sac_updates_per_step):
if (env_steps + 1) % cfg.overrides.sac_updates_every_steps != 0 or len(
sac_buffer
) < rollout_batch_size:
break # only update every once in a while
agent.update(sac_buffer, logger, updates_made)
updates_made += 1
if not silent and updates_made % cfg.log_frequency_agent == 0:
logger.dump(updates_made, save=True)
# ------ Epoch ended (evaluate and save model) ------
if (env_steps + 1) % cfg.overrides.epoch_length == 0:
avg_reward = evaluate(
test_env, agent, cfg.algorithm.num_eval_episodes, video_recorder
)
logger.log_data(
mbrl.constants.RESULTS_LOG_NAME,
{
"epoch": epoch,
"env_step": env_steps,
"episode_reward": avg_reward,
"rollout_length": rollout_length,
},
)
if avg_reward > best_eval_reward:
video_recorder.save(f"{epoch}.mp4")
best_eval_reward = avg_reward
torch.save(
agent.critic.state_dict(), os.path.join(work_dir, "critic.pth")
)
torch.save(
agent.actor.state_dict(), os.path.join(work_dir, "actor.pth")
)
epoch += 1
env_steps += 1
obs = next_obs
return np.float32(best_eval_reward)
|
from typing import Union
from ..types import TealType
from ..ir import TealOp, Op, TealBlock
from ..errors import TealInputError
from .leafexpr import LeafExpr
class Int(LeafExpr):
"""An expression that represents a uint64."""
def __init__(self, value: int) -> None:
"""Create a new uint64.
Args:
value: The integer value this uint64 will represent. Must be a positive value less than
2**64.
"""
if type(value) is not int:
raise TealInputError("invalid input type {} to Int".format(type(value)))
elif value >= 0 and value < 2 ** 64:
self.value = value
else:
raise TealInputError("Int {} is out of range".format(value))
def __teal__(self):
op = TealOp(Op.int, self.value)
return TealBlock.FromOp(op)
def __str__(self):
return "(Int: {})".format(self.value)
def type_of(self):
return TealType.uint64
Int.__module__ = "pyteal"
class EnumInt(LeafExpr):
"""An expression that represents uint64 enum values."""
def __init__(self, name: str) -> None:
"""Create an expression to reference a uint64 enum value.
Args:
name: The name of the enum value.
"""
self.name = name
def __teal__(self):
op = TealOp(Op.int, self.name)
return TealBlock.FromOp(op)
def __str__(self):
return "(IntEnum: {})".format(self.name)
def type_of(self):
return TealType.uint64
EnumInt.__module__ = "pyteal"
|
"""python wrapper for CAISO Oasis API"""
__version__ = "0.2.7"
|
import pytest
from frictionless import describe, Resource, Package, helpers
from frictionless.plugins.csv import CsvDialect
# General
@pytest.mark.skipif(helpers.is_platform("windows"), reason="It doesn't work for Windows")
def test_describe():
resource = describe("data/table.csv")
assert resource.metadata_valid
assert resource == {
"profile": "tabular-data-resource",
"name": "table",
"path": "data/table.csv",
"scheme": "file",
"format": "csv",
"hashing": "md5",
"encoding": "utf-8",
"innerpath": "",
"compression": "",
"control": {"newline": ""},
"dialect": {},
"layout": {},
"schema": {
"fields": [
{"name": "id", "type": "integer"},
{"name": "name", "type": "string"},
]
},
"stats": {
"hash": "6c2c61dd9b0e9c6876139a449ed87933",
"bytes": 30,
"fields": 2,
"rows": 2,
},
}
def test_describe_resource():
resource = describe("data/table.csv")
assert isinstance(resource, Resource)
def test_describe_package():
resource = describe(["data/table.csv"])
assert isinstance(resource, Package)
def test_describe_package_pattern():
resource = describe("data/chunk*.csv")
assert isinstance(resource, Package)
def test_describe_package_type_package():
resource = describe("data/table.csv", type="package")
assert isinstance(resource, Package)
# Issues
def test_describe_blank_cells_issue_7():
source = "header1,header2\n1,\n2,\n3,\n"
resource = describe(source, scheme="text", format="csv")
assert resource.schema == {
"fields": [
{"name": "header1", "type": "integer"},
{"name": "header2", "type": "any"},
]
}
def test_describe_whitespace_cells_issue_7():
source = "header1,header2\n1, \n2, \n3, \n"
resource = describe(source, scheme="text", format="csv")
assert resource.schema == {
"fields": [
{"name": "header1", "type": "integer"},
{"name": "header2", "type": "string"},
]
}
def test_describe_whitespace_cells_with_skip_initial_space_issue_7():
source = "header1,header2\n1, \n2, \n3, \n"
dialect = CsvDialect(skip_initial_space=True)
resource = describe(source, scheme="text", format="csv", dialect=dialect)
assert resource.schema == {
"fields": [
{"name": "header1", "type": "integer"},
{"name": "header2", "type": "any"},
]
}
|
# Django settings for patchman project.
from __future__ import unicode_literals, absolute_import
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1']
ADMINS = []
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# SECURE_BROWSER_XSS_FILTER = True
# SECURE_CONTENT_TYPE_NOSNIFF = True
# CSRF_COOKIE_SECURE = True
# SESSION_COOKIE_SECURE = True
# X_FRAME_OPTIONS = 'DENY'
SITE_ID = 1
ROOT_URLCONF = 'patchman.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
},
},
]
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/NewYork'
USE_I18N = True
USE_L10N = True
USE_TZ = False
DEFAULT_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'django.contrib.admindocs',
'django.contrib.sites',
]
THIRD_PARTY_APPS = [
'django_extensions',
'tagging',
'bootstrap3',
'rest_framework',
]
LOCAL_APPS = [
'arch.apps.ArchConfig',
'domains.apps.DomainsConfig',
'hosts.apps.HostsConfig',
'operatingsystems.apps.OperatingsystemsConfig',
'packages.apps.PackagesConfig',
'repos.apps.ReposConfig',
'reports.apps.ReportsConfig',
'util.apps.UtilConfig',
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',), # noqa
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination', # noqa
'PAGE_SIZE': 100,
}
try:
from celery import Celery # noqa
except ImportError:
USE_ASYNC_PROCESSING = False
else:
THIRD_PARTY_APPS += ['celery']
CELERY_IMPORTS = ['reports.tasks']
USE_ASYNC_PROCESSING = True
BROKER_HOST = 'localhost'
BROKER_PORT = 5672
BROKER_USER = 'guest'
BROKER_PASSWORD = 'guest'
BROKER_VHOST = '/'
LOGIN_REDIRECT_URL = '/patchman/'
LOGOUT_REDIRECT_URL = '/patchman/login/'
LOGIN_URL = '/patchman/login/'
# URL prefix for static files.
STATIC_URL = '/patchman/static/'
# Additional dirs where the media should be copied from
STATICFILES_DIRS = [os.path.abspath(os.path.join(BASE_DIR, 'patchman/static'))]
# Absolute path to the directory static files should be collected to.
STATIC_ROOT = '/var/lib/patchman/static/'
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
try:
from .local_settings import * # noqa
except ImportError:
if sys.prefix == '/usr':
conf_path = '/etc/patchman'
else:
conf_path = sys.prefix + '/etc/patchman'
local_settings = conf_path + '/local_settings.py'
try:
exec(compile(open(local_settings).read(), local_settings, 'exec'))
except IOError:
pass
MANAGERS = ADMINS
INSTALLED_APPS = DEFAULT_APPS + THIRD_PARTY_APPS + LOCAL_APPS
if RUN_GUNICORN or (len(sys.argv) > 1 and sys.argv[1] == 'runserver'): # noqa
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/login/'
LOGIN_URL = '/login/'
STATICFILES_DIRS = [os.path.abspath(os.path.join(BASE_DIR, 'patchman/static'))] # noqa
STATIC_ROOT = os.path.abspath(os.path.join(BASE_DIR, 'run/static'))
STATIC_URL = '/static/'
|
# Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains classes/functions related to Google Cloud Storage."""
import logging
import posixpath
import re
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import linux_packages
from perfkitbenchmarker import object_storage_service
from perfkitbenchmarker import providers
from perfkitbenchmarker import vm_util
flags.DEFINE_string('google_cloud_sdk_version', None,
'Use a particular version of the Google Cloud SDK, e.g.: '
'103.0.0')
FLAGS = flags.FLAGS
_DEFAULT_GCP_SERVICE_KEY_FILE = 'gcp_credentials.json'
DEFAULT_GCP_REGION = 'us-central1'
GCLOUD_CONFIG_PATH = '.config/gcloud'
class GoogleCloudStorageService(object_storage_service.ObjectStorageService):
"""Interface to Google Cloud Storage."""
STORAGE_NAME = providers.GCP
def PrepareService(self, location):
self.location = location or DEFAULT_GCP_REGION
def MakeBucket(self, bucket):
command = ['gsutil', 'mb']
if self.location:
command.extend(['-l', self.location])
if self.location and '-' in self.location:
# regional buckets
command.extend(['-c', 'regional'])
elif FLAGS.object_storage_storage_class is not None:
command.extend(['-c', FLAGS.object_storage_storage_class])
if FLAGS.project:
command.extend(['-p', FLAGS.project])
command.extend(['gs://%s' % bucket])
vm_util.IssueCommand(command)
@vm_util.Retry()
def DeleteBucket(self, bucket):
# We want to retry rm and rb together because it's possible that
# we issue rm followed by rb, but then rb fails because the
# metadata store isn't consistent and the server that handles the
# rb thinks there are still objects in the bucket. It's also
# possible for rm to fail because the metadata store is
# inconsistent and rm doesn't find all objects, so can't delete
# them all.
self.EmptyBucket(bucket)
vm_util.IssueCommand(
['gsutil', 'rb',
'gs://%s' % bucket])
def EmptyBucket(self, bucket):
vm_util.IssueCommand(
['gsutil', '-m', 'rm', '-r',
'gs://%s/*' % bucket])
def PrepareVM(self, vm):
vm.Install('wget')
# Unfortunately there isn't one URL scheme that works for both
# versioned archives and "always get the latest version".
if FLAGS.google_cloud_sdk_version is not None:
sdk_file = ('google-cloud-sdk-%s-linux-x86_64.tar.gz' %
FLAGS.google_cloud_sdk_version)
sdk_url = 'https://storage.googleapis.com/cloud-sdk-release/' + sdk_file
else:
sdk_file = 'google-cloud-sdk.tar.gz'
sdk_url = 'https://dl.google.com/dl/cloudsdk/release/' + sdk_file
vm.RemoteCommand('wget ' + sdk_url)
vm.RemoteCommand('tar xvf ' + sdk_file)
# Versioned and unversioned archives both unzip to a folder called
# 'google-cloud-sdk'.
vm.RemoteCommand('bash ./google-cloud-sdk/install.sh '
'--disable-installation-options '
'--usage-report=false '
'--rc-path=.bash_profile '
'--path-update=true '
'--bash-completion=true')
vm.RemoteCommand('mkdir -p .config')
boto_file = object_storage_service.FindBotoFile()
vm.PushFile(boto_file, object_storage_service.DEFAULT_BOTO_LOCATION)
# If the boto file specifies a service key file, copy that service key file
# to the VM and modify the .boto file on the VM to point to the copied file.
with open(boto_file) as f:
boto_contents = f.read()
match = re.search(r'gs_service_key_file\s*=\s*(.*)', boto_contents)
if match:
service_key_file = match.group(1)
vm.PushFile(service_key_file, _DEFAULT_GCP_SERVICE_KEY_FILE)
vm_pwd, _ = vm.RemoteCommand('pwd')
vm.RemoteCommand(
'sed -i '
'-e "s/^gs_service_key_file.*/gs_service_key_file = %s/" %s' % (
re.escape(posixpath.join(vm_pwd.strip(),
_DEFAULT_GCP_SERVICE_KEY_FILE)),
object_storage_service.DEFAULT_BOTO_LOCATION))
vm.gsutil_path, _ = vm.RemoteCommand('which gsutil', login_shell=True)
vm.gsutil_path = vm.gsutil_path.split()[0]
# Detect if we need to install crcmod for gcp.
# See "gsutil help crc" for details.
raw_result, _ = vm.RemoteCommand('%s version -l' % vm.gsutil_path)
logging.info('gsutil version -l raw result is %s', raw_result)
search_string = 'compiled crcmod: True'
result_string = re.findall(search_string, raw_result)
if not result_string:
logging.info('compiled crcmod is not available, installing now...')
try:
# Try uninstall first just in case there is a pure python version of
# crcmod on the system already, this is required by gsutil doc:
# https://cloud.google.com/storage/docs/
# gsutil/addlhelp/CRC32CandInstallingcrcmod
vm.Uninstall('crcmod')
except errors.VirtualMachine.RemoteCommandError:
logging.info('pip uninstall crcmod failed, could be normal if crcmod '
'is not available at all.')
vm.Install('crcmod')
vm.installed_crcmod = True
else:
logging.info('compiled crcmod is available, not installing again.')
vm.installed_crcmod = False
vm.Install('gcs_boto_plugin')
def CleanupVM(self, vm):
vm.RemoveFile('google-cloud-sdk')
vm.RemoveFile(GCLOUD_CONFIG_PATH)
vm.RemoveFile(object_storage_service.DEFAULT_BOTO_LOCATION)
vm.Uninstall('gcs_boto_plugin')
def CLIUploadDirectory(self, vm, directory, files, bucket):
return vm.RemoteCommand(
'time %s -m cp %s/* gs://%s/' % (
vm.gsutil_path, directory, bucket))
def CLIDownloadBucket(self, vm, bucket, objects, dest):
return vm.RemoteCommand(
'time %s -m cp gs://%s/* %s' % (vm.gsutil_path, bucket, dest))
def Metadata(self, vm):
metadata = {'pkb_installed_crcmod': vm.installed_crcmod,
object_storage_service.BOTO_LIB_VERSION:
linux_packages.GetPipPackageVersion(vm, 'boto')}
return metadata
@classmethod
def APIScriptFiles(cls):
return ['boto_service.py', 'gcs.py']
|
def odd(n):
"""Tells is a number is odd."""
if n <= 0:
return False
return not odd(n-1)
|
import pika, time
credentials = pika.PlainCredentials('therabbit', 'secret123')
parameters = pika.ConnectionParameters(host='rabbitmq', port=5672, virtual_host='/', credentials=credentials)
print(f'Parameters: {parameters}')
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue='hello')
for i in range(10):
channel.basic_publish(exchange='', routing_key='hello', body=f'Hello World #{i}!')
print(f" [{i}] Sent 'Hello World!'")
time.sleep(2)
connection.close()
|
# -*- coding: utf-8 -*-
'''
Return data to an ODBC compliant server. This driver was
developed with Microsoft SQL Server in mind, but theoretically
could be used to return data to any compliant ODBC database
as long as there is a working ODBC driver for it on your
minion platform.
:maintainer: C. R. Oldham (cr@saltstack.com)
:maturity: New
:depends: unixodbc, pyodbc, freetds (for SQL Server)
:platform: all
To enable this returner the minion will need
On Linux:
unixodbc (http://www.unixodbc.org)
pyodbc (`pip install pyodbc`)
The FreeTDS ODBC driver for SQL Server (http://www.freetds.org)
or another compatible ODBC driver
On Windows:
TBD
unixODBC and FreeTDS need to be configured via /etc/odbcinst.ini and
/etc/odbc.ini.
/etc/odbcinst.ini::
[TDS]
Description=TDS
Driver=/usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so
(Note the above Driver line needs to point to the location of the FreeTDS
shared library. This example is for Ubuntu 14.04.)
/etc/odbc.ini::
[TS]
Description = "Salt Returner"
Driver=TDS
Server = <your server ip or fqdn>
Port = 1433
Database = salt
Trace = No
Also you need the following values configured in the minion or master config.
Configure as you see fit::
returner.odbc.dsn: 'TS'
returner.odbc.user: 'salt'
returner.odbc.passwd: 'salt'
Alternative configuration values can be used by prefacing the configuration.
Any values not found in the alternative configuration will be pulled from
the default location::
alternative.returner.odbc.dsn: 'TS'
alternative.returner.odbc.user: 'salt'
alternative.returner.odbc.passwd: 'salt'
Running the following commands against Microsoft SQL Server in the desired
database as the appropriate user should create the database tables
correctly. Replace with equivalent SQL for other ODBC-compliant servers
.. code-block:: sql
--
-- Table structure for table 'jids'
--
if OBJECT_ID('dbo.jids', 'U') is not null
DROP TABLE dbo.jids
CREATE TABLE dbo.jids (
jid varchar(255) PRIMARY KEY,
load varchar(MAX) NOT NULL
);
--
-- Table structure for table 'salt_returns'
--
IF OBJECT_ID('dbo.salt_returns', 'U') IS NOT NULL
DROP TABLE dbo.salt_returns;
CREATE TABLE dbo.salt_returns (
added datetime not null default (getdate()),
fun varchar(100) NOT NULL,
jid varchar(255) NOT NULL,
retval varchar(MAX) NOT NULL,
id varchar(255) NOT NULL,
success bit default(0) NOT NULL,
full_ret varchar(MAX)
);
CREATE INDEX salt_returns_added on dbo.salt_returns(added);
CREATE INDEX salt_returns_id on dbo.salt_returns(id);
CREATE INDEX salt_returns_jid on dbo.salt_returns(jid);
CREATE INDEX salt_returns_fun on dbo.salt_returns(fun);
To use this returner, append '--return odbc' to the salt command.
.. code-block:: bash
salt '*' status.diskusage --return odbc
To use the alternative configuration, append '--return_config alternative' to the salt command.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' test.ping --return odbc --return_config alternative
To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command.
.. versionadded:: 2016.3.0
.. code-block:: bash
salt '*' test.ping --return odbc --return_kwargs '{"dsn": "dsn-name"}'
'''
from __future__ import absolute_import
# Let's not allow PyLint complain about string substitution
# pylint: disable=W1321,E1321
# Import python libs
import json
# Import Salt libs
import salt.utils.jid
import salt.returners
# FIXME We'll need to handle this differently for Windows.
# Import third party libs
try:
import pyodbc
#import psycopg2.extras
HAS_ODBC = True
except ImportError:
HAS_ODBC = False
# Define the module's virtual name
__virtualname__ = 'odbc'
def __virtual__():
if not HAS_ODBC:
return False
return True
def _get_options(ret=None):
'''
Get the odbc options from salt.
'''
attrs = {'dsn': 'dsn',
'user': 'user',
'passwd': 'passwd'}
_options = salt.returners.get_returner_options('returner.{0}'.format(__virtualname__),
ret,
attrs,
__salt__=__salt__,
__opts__=__opts__)
return _options
def _get_conn(ret=None):
'''
Return a MSSQL connection.
'''
_options = _get_options(ret)
dsn = _options.get('dsn')
user = _options.get('user')
passwd = _options.get('passwd')
return pyodbc.connect('DSN={0};UID={1};PWD={2}'.format(
dsn,
user,
passwd))
def _close_conn(conn):
'''
Close the MySQL connection
'''
conn.commit()
conn.close()
def returner(ret):
'''
Return data to an odbc server
'''
conn = _get_conn(ret)
cur = conn.cursor()
sql = '''INSERT INTO salt_returns
(fun, jid, retval, id, success, full_ret)
VALUES (?, ?, ?, ?, ?, ?)'''
cur.execute(
sql, (
ret['fun'],
ret['jid'],
json.dumps(ret['return']),
ret['id'],
ret['success'],
json.dumps(ret)
)
)
_close_conn(conn)
def save_load(jid, load):
'''
Save the load to the specified jid id
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''INSERT INTO jids (jid, load) VALUES (?, ?)'''
cur.execute(sql, (jid, json.dumps(load)))
_close_conn(conn)
def save_minions(jid, minions): # pylint: disable=unused-argument
'''
Included for API consistency
'''
pass
def get_load(jid):
'''
Return the load data that marks a specified jid
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT load FROM jids WHERE jid = ?;'''
cur.execute(sql, (jid,))
data = cur.fetchone()
if data:
return json.loads(data)
_close_conn(conn)
return {}
def get_jid(jid):
'''
Return the information returned when the specified job id was executed
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT id, full_ret FROM salt_returns WHERE jid = ?'''
cur.execute(sql, (jid,))
data = cur.fetchall()
ret = {}
if data:
for minion, full_ret in data:
ret[minion] = json.loads(full_ret)
_close_conn(conn)
return ret
def get_fun(fun):
'''
Return a dict of the last function called for all minions
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT s.id,s.jid, s.full_ret
FROM salt_returns s
JOIN ( SELECT MAX(jid) AS jid FROM salt_returns GROUP BY fun, id) max
ON s.jid = max.jid
WHERE s.fun = ?
'''
cur.execute(sql, (fun,))
data = cur.fetchall()
ret = {}
if data:
for minion, _, retval in data:
ret[minion] = json.loads(retval)
_close_conn(conn)
return ret
def get_jids():
'''
Return a list of all job ids
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT distinct jid, load FROM jids'''
cur.execute(sql)
data = cur.fetchall()
ret = {}
for jid, load in data:
ret[jid] = salt.utils.jid.format_jid_instance(jid, json.loads(load))
_close_conn(conn)
return ret
def get_minions():
'''
Return a list of minions
'''
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = '''SELECT DISTINCT id FROM salt_returns'''
cur.execute(sql)
data = cur.fetchall()
ret = []
for minion in data:
ret.append(minion[0])
_close_conn(conn)
return ret
def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument
'''
Do any work necessary to prepare a JID, including sending a custom id
'''
return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid()
|
#MenuTitle: Move Vietnamese Marks to top_viet Anchor in Circumflex
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
from builtins import str
__doc__="""
Where possible, puts acute(comb), grave(comb), hookabovecomb on 'top_viet' position in all layers in all selected glyphs. Assumes that you have a 'top_viet' anchor in circumflex. Useful for Vietnamese glyphs.
"""
accentsToBeMoved = [ "acute", "grave", "hookabovecomb", "acutecomb", "gravecomb" ]
newAnchor = "top_viet"
Font = Glyphs.font
selectedGlyphs = [ x.parent for x in Font.selectedLayers ]
def baseHasAnchor( thisComponent, masterID, anchorToLookFor = "top_viet" ):
baseGlyph = thisComponent.component
baseLayer = baseGlyph.layers[masterID]
baseAnchors = [a for a in baseLayer.anchors]
anchorIsInLayer = False
for i in range(len(baseAnchors)):
if baseAnchors[i].name == anchorToLookFor:
anchorIsInLayer = True
return anchorIsInLayer
def nameUntilFirstDot( thisName ):
dotIndex = thisName.find(".")
if dotIndex > 0:
return thisName[:dotIndex]
else:
return thisName
def process( thisGlyph ):
statusString = "Processing %s" % thisGlyph.name
for thisMaster in Font.masters:
thisLayerID = thisMaster.id
thisLayer = thisGlyph.layers[ thisLayerID ]
for thisComponentIndex in range( len( thisLayer.components ))[1:]:
accentComponent = thisLayer.components[ thisComponentIndex ]
accentName = nameUntilFirstDot( accentComponent.componentName )
if accentName in accentsToBeMoved:
baseComponent = thisLayer.components[ thisComponentIndex - 1 ]
if baseComponent:
if baseHasAnchor( baseComponent, thisLayerID, anchorToLookFor=newAnchor ):
try:
thisLayer.components[ thisComponentIndex ].setAnchor_( newAnchor )
statusString += "\n %s: Moved %s on %s." % ( thisLayer.name, accentName, newAnchor )
except Exception as e:
return "\nERROR in %s %s:\nCould not move %s onto %s.\n%s" % ( thisGlyph.name, thisLayer.name, accentName, newAnchor, e )
return statusString
for thisGlyph in selectedGlyphs:
thisGlyph.beginUndo()
print(process( thisGlyph ))
thisGlyph.endUndo()
|
import numpy as np
import cv2
class SeamCarver:
def __init__(self, filename, out_height, out_width, protect_mask='', object_mask=''):
# initialize parameter
self.filename = filename
self.out_height = out_height
self.out_width = out_width
# read in image and store as np.float64 format
self.in_image = cv2.imread(filename).astype(np.float64)
self.in_height, self.in_width = self.in_image.shape[: 2]
# keep tracking resulting image
self.out_image = np.copy(self.in_image)
# object removal --> self.object = True
self.object = (object_mask != '')
if self.object:
# read in object mask image file as np.float64 format in gray scale
self.mask = cv2.imread(object_mask, 0).astype(np.float64)
self.protect = False
# image re-sizing with or without protect mask
else:
self.protect = (protect_mask != '')
if self.protect:
# if protect_mask filename is provided, read in protect mask image file as np.float64 format in gray scale
self.mask = cv2.imread(protect_mask, 0).astype(np.float64)
# kernel for forward energy map calculation
self.kernel_x = np.array([[0., 0., 0.], [-1., 0., 1.], [0., 0., 0.]], dtype=np.float64)
self.kernel_y_left = np.array([[0., 0., 0.], [0., 0., 1.], [0., -1., 0.]], dtype=np.float64)
self.kernel_y_right = np.array([[0., 0., 0.], [1., 0., 0.], [0., -1., 0.]], dtype=np.float64)
# constant for covered area by protect mask or object mask
self.constant = 1000
# starting program
self.start()
def start(self):
"""
:return:
If object mask is provided --> object removal function will be executed
else --> seam carving function (image retargeting) will be process
"""
if self.object:
self.object_removal()
elif self.out_height==0:
self.content_amplify()
else:
self.seams_carving()
def content_amplify(self):
in_height = self.in_height
in_width = self.in_width
scaled_img = cv2.resize(self.in_image,(int(in_width*1.5),int(in_height*1.5)),interpolation=cv2.INTER_CUBIC)
self.in_height = int(in_height*1.5)
self.in_width = int(in_width*1.5)
self.in_image = scaled_img
self.out_image = np.copy(self.in_image)
self.out_height = in_height
self.out_width = in_width
self.seams_carving()
def seams_carving(self):
"""
:return:
We first process seam insertion or removal in vertical direction then followed by horizontal direction.
If targeting height or width is greater than original ones --> seam insertion,
else --> seam removal
The algorithm is written for seam processing in vertical direction (column), so image is rotated 90 degree
counter-clockwise for seam processing in horizontal direction (row)
"""
# calculate number of rows and columns needed to be inserted or removed
delta_row, delta_col = int(self.out_height - self.in_height), int(self.out_width - self.in_width)
# remove column
if delta_col < 0:
self.seams_removal(delta_col * -1)
# insert column
elif delta_col > 0:
self.seams_insertion(delta_col)
# remove row
if delta_row < 0:
self.out_image = self.rotate_image(self.out_image, 1)
if self.protect:
self.mask = self.rotate_mask(self.mask, 1)
self.seams_removal(delta_row * -1)
self.out_image = self.rotate_image(self.out_image, 0)
# insert row
elif delta_row > 0:
self.out_image = self.rotate_image(self.out_image, 1)
if self.protect:
self.mask = self.rotate_mask(self.mask, 1)
self.seams_insertion(delta_row)
self.out_image = self.rotate_image(self.out_image, 0)
def object_removal(self):
"""
:return:
Object covered by mask will be removed first and seam will be inserted to return to original image dimension
"""
rotate = False
object_height, object_width = self.get_object_dimension()
if object_height < object_width:
self.out_image = self.rotate_image(self.out_image, 1)
self.mask = self.rotate_mask(self.mask, 1)
rotate = True
while len(np.where(self.mask[:, :] > 0)[0]) > 0:
energy_map = self.calc_energy_map()
energy_map[np.where(self.mask[:, :] > 0)] *= -self.constant
cumulative_map = self.cumulative_map_forward(energy_map)
seam_idx = self.find_seam(cumulative_map)
self.delete_seam(seam_idx)
self.delete_seam_on_mask(seam_idx)
if not rotate:
num_pixels = self.in_width - self.out_image.shape[1]
else:
num_pixels = self.in_height - self.out_image.shape[1]
self.seams_insertion(num_pixels)
if rotate:
self.out_image = self.rotate_image(self.out_image, 0)
def seams_removal(self, num_pixel):
if self.protect:
for dummy in range(num_pixel):
energy_map = self.calc_energy_map()
energy_map[np.where(self.mask > 0)] *= self.constant
cumulative_map = self.cumulative_map_forward(energy_map)
seam_idx = self.find_seam(cumulative_map)
self.delete_seam(seam_idx)
self.delete_seam_on_mask(seam_idx)
else:
for dummy in range(num_pixel):
energy_map = self.calc_energy_map()
cumulative_map = self.cumulative_map_forward(energy_map)
seam_idx = self.find_seam(cumulative_map)
self.delete_seam(seam_idx)
def seams_insertion(self, num_pixel):
if self.protect:
temp_image = np.copy(self.out_image)
temp_mask = np.copy(self.mask)
seams_record = []
for dummy in range(num_pixel):
energy_map = self.calc_energy_map()
energy_map[np.where(self.mask[:, :] > 0)] *= self.constant
cumulative_map = self.cumulative_map_backward(energy_map)
seam_idx = self.find_seam(cumulative_map)
seams_record.append(seam_idx)
self.delete_seam(seam_idx)
self.delete_seam_on_mask(seam_idx)
self.out_image = np.copy(temp_image)
self.mask = np.copy(temp_mask)
n = len(seams_record)
for dummy in range(n):
seam = seams_record.pop(0)
self.add_seam(seam)
self.add_seam_on_mask(seam)
seams_record = self.update_seams(seams_record, seam)
else:
temp_image = np.copy(self.out_image)
seams_record = []
for dummy in range(num_pixel):
energy_map = self.calc_energy_map()
cumulative_map = self.cumulative_map_backward(energy_map)
seam_idx = self.find_seam(cumulative_map)
seams_record.append(seam_idx)
self.delete_seam(seam_idx)
self.out_image = np.copy(temp_image)
n = len(seams_record)
for dummy in range(n):
seam = seams_record.pop(0)
self.add_seam(seam)
seams_record = self.update_seams(seams_record, seam)
def calc_energy_map(self):
b, g, r = cv2.split(self.out_image)
b_energy = np.absolute(cv2.Scharr(b, -1, 1, 0)) + np.absolute(cv2.Scharr(b, -1, 0, 1))
g_energy = np.absolute(cv2.Scharr(g, -1, 1, 0)) + np.absolute(cv2.Scharr(g, -1, 0, 1))
r_energy = np.absolute(cv2.Scharr(r, -1, 1, 0)) + np.absolute(cv2.Scharr(r, -1, 0, 1))
return b_energy + g_energy + r_energy
def cumulative_map_backward(self, energy_map):
m, n = energy_map.shape
output = np.copy(energy_map)
for row in range(1, m):
for col in range(n):
output[row, col] = \
energy_map[row, col] + np.amin(output[row - 1, max(col - 1, 0): min(col + 2, n - 1)])
return output
def cumulative_map_forward(self, energy_map):
matrix_x = self.calc_neighbor_matrix(self.kernel_x)
matrix_y_left = self.calc_neighbor_matrix(self.kernel_y_left)
matrix_y_right = self.calc_neighbor_matrix(self.kernel_y_right)
m, n = energy_map.shape
output = np.copy(energy_map)
for row in range(1, m):
for col in range(n):
if col == 0:
e_right = output[row - 1, col + 1] + matrix_x[row - 1, col + 1] + matrix_y_right[row - 1, col + 1]
e_up = output[row - 1, col] + matrix_x[row - 1, col]
output[row, col] = energy_map[row, col] + min(e_right, e_up)
elif col == n - 1:
e_left = output[row - 1, col - 1] + matrix_x[row - 1, col - 1] + matrix_y_left[row - 1, col - 1]
e_up = output[row - 1, col] + matrix_x[row - 1, col]
output[row, col] = energy_map[row, col] + min(e_left, e_up)
else:
e_left = output[row - 1, col - 1] + matrix_x[row - 1, col - 1] + matrix_y_left[row - 1, col - 1]
e_right = output[row - 1, col + 1] + matrix_x[row - 1, col + 1] + matrix_y_right[row - 1, col + 1]
e_up = output[row - 1, col] + matrix_x[row - 1, col]
output[row, col] = energy_map[row, col] + min(e_left, e_right, e_up)
return output
def calc_neighbor_matrix(self, kernel):
b, g, r = cv2.split(self.out_image)
output = np.absolute(cv2.filter2D(b, -1, kernel=kernel)) + \
np.absolute(cv2.filter2D(g, -1, kernel=kernel)) + \
np.absolute(cv2.filter2D(r, -1, kernel=kernel))
return output
def find_seam(self, cumulative_map):
m, n = cumulative_map.shape
output = np.zeros((m,), dtype=np.uint32)
output[-1] = np.argmin(cumulative_map[-1])
for row in range(m - 2, -1, -1):
prv_x = output[row + 1]
if prv_x == 0:
output[row] = np.argmin(cumulative_map[row, : 2])
else:
output[row] = np.argmin(cumulative_map[row, prv_x - 1: min(prv_x + 2, n - 1)]) + prv_x - 1
return output
def delete_seam(self, seam_idx):
m, n = self.out_image.shape[: 2]
output = np.zeros((m, n - 1, 3))
for row in range(m):
col = seam_idx[row]
output[row, :, 0] = np.delete(self.out_image[row, :, 0], [col])
output[row, :, 1] = np.delete(self.out_image[row, :, 1], [col])
output[row, :, 2] = np.delete(self.out_image[row, :, 2], [col])
self.out_image = np.copy(output)
def add_seam(self, seam_idx):
m, n = self.out_image.shape[: 2]
output = np.zeros((m, n + 1, 3))
for row in range(m):
col = seam_idx[row]
for ch in range(3):
if col == 0:
p = np.average(self.out_image[row, col: col + 2, ch])
output[row, col, ch] = self.out_image[row, col, ch]
output[row, col + 1, ch] = p
output[row, col + 1:, ch] = self.out_image[row, col:, ch]
else:
p = np.average(self.out_image[row, col - 1: col + 1, ch])
output[row, : col, ch] = self.out_image[row, : col, ch]
output[row, col, ch] = p
output[row, col + 1:, ch] = self.out_image[row, col:, ch]
self.out_image = np.copy(output)
def update_seams(self, remaining_seams, current_seam):
output = []
for seam in remaining_seams:
seam[np.where(seam >= current_seam)] += 2
output.append(seam)
return output
def rotate_image(self, image, ccw):
m, n, ch = image.shape
output = np.zeros((n, m, ch))
if ccw:
image_flip = np.fliplr(image)
for c in range(ch):
for row in range(m):
output[:, row, c] = image_flip[row, :, c]
else:
for c in range(ch):
for row in range(m):
output[:, m - 1 - row, c] = image[row, :, c]
return output
def rotate_mask(self, mask, ccw):
m, n = mask.shape
output = np.zeros((n, m))
if ccw > 0:
image_flip = np.fliplr(mask)
for row in range(m):
output[:, row] = image_flip[row, : ]
else:
for row in range(m):
output[:, m - 1 - row] = mask[row, : ]
return output
def delete_seam_on_mask(self, seam_idx):
m, n = self.mask.shape
output = np.zeros((m, n - 1))
for row in range(m):
col = seam_idx[row]
output[row, : ] = np.delete(self.mask[row, : ], [col])
self.mask = np.copy(output)
def add_seam_on_mask(self, seam_idx):
m, n = self.mask.shape
output = np.zeros((m, n + 1))
for row in range(m):
col = seam_idx[row]
if col == 0:
p = np.average(self.mask[row, col: col + 2])
output[row, col] = self.mask[row, col]
output[row, col + 1] = p
output[row, col + 1: ] = self.mask[row, col: ]
else:
p = np.average(self.mask[row, col - 1: col + 1])
output[row, : col] = self.mask[row, : col]
output[row, col] = p
output[row, col + 1: ] = self.mask[row, col: ]
self.mask = np.copy(output)
def get_object_dimension(self):
rows, cols = np.where(self.mask > 0)
height = np.amax(rows) - np.amin(rows) + 1
width = np.amax(cols) - np.amin(cols) + 1
return height, width
def save_result(self, filename):
cv2.imwrite(filename, self.out_image.astype(np.uint8))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiSalesKbassetStuffCancelstockinorderQueryModel(object):
def __init__(self):
self._ext_info = None
self._page_no = None
self._page_size = None
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def page_no(self):
return self._page_no
@page_no.setter
def page_no(self, value):
self._page_no = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
def to_alipay_dict(self):
params = dict()
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.page_no:
if hasattr(self.page_no, 'to_alipay_dict'):
params['page_no'] = self.page_no.to_alipay_dict()
else:
params['page_no'] = self.page_no
if self.page_size:
if hasattr(self.page_size, 'to_alipay_dict'):
params['page_size'] = self.page_size.to_alipay_dict()
else:
params['page_size'] = self.page_size
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiSalesKbassetStuffCancelstockinorderQueryModel()
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'page_no' in d:
o.page_no = d['page_no']
if 'page_size' in d:
o.page_size = d['page_size']
return o
|
#!/usr/bin/env python
import numpy as np
def convert_to_x4_q7_weights(weights):
[r, h, w, c] = weights.shape
weights = np.reshape(weights, (r, h*w*c))
num_of_rows = r
num_of_cols = h*w*c
new_weights = np.copy(weights)
new_weights = np.reshape(new_weights, (r*h*w*c))
counter = 0
for i in range(int(num_of_rows)/4):
# we only need to do the re-ordering for every 4 rows
row_base = 4*i
for j in range (int(num_of_cols)/4):
# for each 4 entries
column_base = 4*j
new_weights[counter] = weights[row_base ][column_base ]
new_weights[counter+1] = weights[row_base+1][column_base ]
new_weights[counter+2] = weights[row_base ][column_base+2]
new_weights[counter+3] = weights[row_base+1][column_base+2]
new_weights[counter+4] = weights[row_base+2][column_base ]
new_weights[counter+5] = weights[row_base+3][column_base ]
new_weights[counter+6] = weights[row_base+2][column_base+2]
new_weights[counter+7] = weights[row_base+3][column_base+2]
new_weights[counter+8] = weights[row_base ][column_base+1]
new_weights[counter+9] = weights[row_base+1][column_base+1]
new_weights[counter+10] = weights[row_base ][column_base+3]
new_weights[counter+11] = weights[row_base+1][column_base+3]
new_weights[counter+12] = weights[row_base+2][column_base+1]
new_weights[counter+13] = weights[row_base+3][column_base+1]
new_weights[counter+14] = weights[row_base+2][column_base+3]
new_weights[counter+15] = weights[row_base+3][column_base+3]
counter = counter + 16
# the remaining ones are in order
for j in range((int)(num_of_cols-num_of_cols%4), int(num_of_cols)):
new_weights[counter] = weights[row_base][j]
new_weights[counter+1] = weights[row_base+1][j]
new_weights[counter+2] = weights[row_base+2][j]
new_weights[counter+3] = weights[row_base+3][j]
counter = counter + 4
return new_weights
def convert_to_x4_q15_weights(weights):
[r, h, w, c] = weights.shape
weights = np.reshape(weights, (r, h*w*c))
num_of_rows = r
num_of_cols = h*w*c
new_weights = np.copy(weights)
new_weights = np.reshape(new_weights, (r*h*w*c))
counter = 0
for i in range(int(num_of_rows)/4):
# we only need to do the re-ordering for every 4 rows
row_base = 4*i
for j in range (int(num_of_cols)/2):
# for each 2 entries
column_base = 2*j
new_weights[counter] = weights[row_base ][column_base ]
new_weights[counter+1] = weights[row_base ][column_base+1]
new_weights[counter+2] = weights[row_base+1][column_base ]
new_weights[counter+3] = weights[row_base+1][column_base+1]
new_weights[counter+4] = weights[row_base+2][column_base ]
new_weights[counter+5] = weights[row_base+2][column_base+1]
new_weights[counter+6] = weights[row_base+3][column_base ]
new_weights[counter+7] = weights[row_base+3][column_base+1]
counter = counter + 8
# the remaining ones are in order
for j in range((int)(num_of_cols-num_of_cols%2), int(num_of_cols)):
new_weights[counter] = weights[row_base][j]
new_weights[counter+1] = weights[row_base+1][j]
new_weights[counter+2] = weights[row_base+2][j]
new_weights[counter+3] = weights[row_base+3][j]
counter = counter + 4
return new_weights
def convert_q7_q15_weights(weights):
[r, h, w, c] = weights.shape
weights = np.reshape(weights, (r, h*w*c))
num_of_rows = r
num_of_cols = h*w*c
new_weights = np.copy(weights)
new_weights = np.reshape(new_weights, (r*h*w*c))
counter = 0
for i in range(int(num_of_rows)/4):
# we only need to do the re-ordering for every 4 rows
row_base = 4*i
for j in range (int(num_of_cols)/2):
# for each 2 entries
column_base = 2*j
new_weights[counter] = weights[row_base ][column_base ]
new_weights[counter+1] = weights[row_base+1][column_base ]
new_weights[counter+2] = weights[row_base ][column_base+1]
new_weights[counter+3] = weights[row_base+1][column_base+1]
new_weights[counter+4] = weights[row_base+2][column_base ]
new_weights[counter+5] = weights[row_base+3][column_base ]
new_weights[counter+6] = weights[row_base+2][column_base+1]
new_weights[counter+7] = weights[row_base+3][column_base+1]
counter = counter + 8
# the remaining ones are in order
for j in range((int)(num_of_cols-num_of_cols%2), int(num_of_cols)):
new_weights[counter] = weights[row_base][j]
new_weights[counter+1] = weights[row_base+1][j]
new_weights[counter+2] = weights[row_base+2][j]
new_weights[counter+3] = weights[row_base+3][j]
counter = counter + 4
return new_weights
# input dimensions
vec_dim = 127
row_dim = 127
weight = np.zeros((row_dim,vec_dim), dtype=int)
# generate random inputs
for i in range(row_dim):
for j in range(vec_dim):
weight[i][j] = np.random.randint(256)-128
weight = np.reshape(weight, (row_dim, vec_dim, 1, 1))
outfile = open("../Ref_Implementations/fully_connected_testing_weights.h", "w")
outfile.write("#define IP2_WEIGHT {")
weight.tofile(outfile,sep=",",format="%d")
outfile.write("}\n\n")
new_weight = convert_to_x4_q7_weights(weight)
outfile.write("#define IP4_WEIGHT {")
new_weight.tofile(outfile,sep=",",format="%d")
outfile.write("}\n\n")
new_weight = convert_q7_q15_weights(weight)
outfile.write("#define IP4_q7_q15_WEIGHT {")
new_weight.tofile(outfile,sep=",",format="%d")
outfile.write("}\n\n")
new_weight = convert_to_x4_q15_weights(weight)
outfile.write("#define IP4_WEIGHT_Q15 {")
new_weight.tofile(outfile,sep=",",format="%d")
outfile.write("}\n\n")
outfile.close()
|
from .motifprogram import MotifProgram
import io
import re
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from gimmemotifs.motif import Motif
class Meme(MotifProgram):
"""
Predict motifs using MEME.
Reference:
"""
def __init__(self):
self.name = "MEME"
self.cmd = "meme"
self.use_width = True
def _run_program(self, bin, fastafile, params=None):
"""
Run MEME and predict motifs from a FASTA file.
Parameters
----------
bin : str
Command used to run the tool.
fastafile : str
Name of the FASTA input file.
params : dict, optional
Optional parameters. For some of the tools required parameters
are passed using this dictionary.
Returns
-------
motifs : list of Motif instances
The predicted motifs.
stdout : str
Standard out of the tool.
stderr : str
Standard error of the tool.
"""
default_params = {"width": 10, "single": False, "number": 10}
if params is not None:
default_params.update(params)
tmp = NamedTemporaryFile(dir=self.tmpdir)
strand = "-revcomp"
width = default_params["width"]
number = default_params["number"]
cmd = [
bin,
fastafile,
"-text",
"-dna",
"-nostatus",
"-mod",
"zoops",
"-nmotifs",
"%s" % number,
"-w",
"%s" % width,
"-maxsize",
"10000000",
]
if not default_params["single"]:
cmd.append(strand)
# sys.stderr.write(" ".join(cmd) + "\n")
p = Popen(cmd, bufsize=1, stderr=PIPE, stdout=PIPE)
stdout, stderr = p.communicate()
motifs = []
motifs = self.parse(io.StringIO(stdout.decode()))
# Delete temporary files
tmp.close()
return motifs, stdout, stderr
def parse(self, fo):
"""
Convert MEME output to motifs
Parameters
----------
fo : file-like
File object containing MEME output.
Returns
-------
motifs : list
List of Motif instances.
"""
motifs = []
nucs = {"A": 0, "C": 1, "G": 2, "T": 3}
p = re.compile(r"MOTIF.+MEME-(\d+)\s*width\s*=\s*(\d+)\s+sites\s*=\s*(\d+)")
pa = re.compile(r"\)\s+([A-Z]+)")
line = fo.readline()
while line:
m = p.search(line)
align = []
pfm = None
if m:
# print(m.group(0))
id = "%s_%s_w%s" % (self.name, m.group(1), m.group(2))
while not line.startswith("//"):
ma = pa.search(line)
if ma:
# print(ma.group(0))
match = ma.group(1)
align.append(match)
if not pfm:
pfm = [[0 for x in range(4)] for x in range(len(match))]
for pos in range(len(match)):
if match[pos] in nucs:
pfm[pos][nucs[match[pos]]] += 1
else:
for i in range(4):
pfm[pos][i] += 0.25
line = fo.readline()
motifs.append(Motif(pfm[:]))
motifs[-1].id = id
motifs[-1].align = align[:]
line = fo.readline()
return motifs
|
import src.util.type_helper as th
class Converters:
def __init__(self, *args, **kwargs):
self.verbose = False
self.debug = False
if 'verbose' in kwargs:
value = kwargs.get('verbose')
th.validate(name_of_value='verbose', value_to_check=value, d_type=bool)
self.verbose = value
if 'debug' in kwargs:
value = kwargs.get('debug')
th.validate(name_of_value='debug', value_to_check=value, d_type=bool)
self.debug = kwargs.get('debug')
def extract(self, options):
return None
|
from subprocess import Popen, PIPE
from config_parser import getModuleIntervals
from config_parser import getTimestampsFile
import sys
import pickle
import time
import myexceptions
def execute(command):
output = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
return output.stdout.readlines()
def getExecTimestamps(timestamps_file):
try:
f = open(timestamps_file,'r')
timestamps = pickle.load(f)
if type(timestamps) != type({}):
raise myexceptions.BadSyntaxError("[Error] Bad syntax in timestamps file")
f.close()
return timestamps
except IOError, args:
print args
print "Considering no previous executions"
return {'memory':0.0,'acls':0.0,'checksums':0.0,'connections':0.0,'logs':0.0}
except IndexError:
raise myexceptions.BadSyntaxError("[Error] Bad syntax in timestamps file")
def putExecTimestamps(timestamps_file, timestamps):
try:
f = open(timestamps_file,'w')
pickle.dump(timestamps, f)
f.close()
except IOError, args:
print args
sys.exit()
def update_timestamps(timestamps, turns):
for module, turn in turns.items():
if turn:
timestamps[module] = time.time() / 60.0
return timestamps
def calculate_turns(intervals, timestamps, minutes_from_epoch):
turns = {}
for module,interval in intervals.items():
last_exec = timestamps[module]
if (last_exec + interval) < minutes_from_epoch:
turns[module] = True
timestamps[module] = minutes_from_epoch
else:
turns[module] = False
return turns
def getModuleTurns(config_file):
timestamps_file = getTimestampsFile(config_file)
intervals = getModuleIntervals(config_file)
timestamps = getExecTimestamps(timestamps_file)
turns = calculate_turns(intervals, timestamps, time.time()/60.0)
timestamps = update_timestamps(timestamps, turns)
putExecTimestamps(timestamps_file, timestamps)
return turns
|
# Generated by Django 3.2.5 on 2021-08-10 21:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0014_corereinvitation'),
]
operations = [
migrations.AddField(
model_name='historicalmanuscript',
name='additional_info',
field=models.CharField(blank=True, default='', help_text='Additional info about the manuscript (e.g., approved exemptions, restricted data, etc).', max_length=1024, null=True, verbose_name='Additional Info'),
),
migrations.AddField(
model_name='manuscript',
name='additional_info',
field=models.CharField(blank=True, default='', help_text='Additional info about the manuscript (e.g., approved exemptions, restricted data, etc).', max_length=1024, null=True, verbose_name='Additional Info'),
),
migrations.AlterField(
model_name='historicalmanuscript',
name='contact_first_name',
field=models.CharField(blank=True, help_text='Given name of the publication contact that will be stored in Dataverse', max_length=150, verbose_name='Contact Given Name'),
),
migrations.AlterField(
model_name='historicalmanuscript',
name='contact_last_name',
field=models.CharField(blank=True, help_text='Surname of the publication contact that will be stored in Dataverse', max_length=150, verbose_name='Contact Surname'),
),
migrations.AlterField(
model_name='historicalmanuscript',
name='description',
field=models.CharField(blank=True, default='', help_text='The abstract for the manuscript', max_length=1024, null=True, verbose_name='Abstract'),
),
migrations.AlterField(
model_name='historicalmanuscript',
name='pub_id',
field=models.CharField(blank=True, db_index=True, default='', help_text='The internal ID from the publication', max_length=200, null=True, verbose_name='Manuscript #'),
),
migrations.AlterField(
model_name='historicalmanuscript',
name='qual_analysis',
field=models.BooleanField(blank=True, default=False, help_text='Whether this manuscript includes qualitative analysis', null=True, verbose_name='Qualitative Analysis'),
),
migrations.AlterField(
model_name='manuscript',
name='contact_first_name',
field=models.CharField(blank=True, help_text='Given name of the publication contact that will be stored in Dataverse', max_length=150, verbose_name='Contact Given Name'),
),
migrations.AlterField(
model_name='manuscript',
name='contact_last_name',
field=models.CharField(blank=True, help_text='Surname of the publication contact that will be stored in Dataverse', max_length=150, verbose_name='Contact Surname'),
),
migrations.AlterField(
model_name='manuscript',
name='description',
field=models.CharField(blank=True, default='', help_text='The abstract for the manuscript', max_length=1024, null=True, verbose_name='Abstract'),
),
migrations.AlterField(
model_name='manuscript',
name='pub_id',
field=models.CharField(blank=True, db_index=True, default='', help_text='The internal ID from the publication', max_length=200, null=True, verbose_name='Manuscript #'),
),
migrations.AlterField(
model_name='manuscript',
name='qual_analysis',
field=models.BooleanField(blank=True, default=False, help_text='Whether this manuscript includes qualitative analysis', null=True, verbose_name='Qualitative Analysis'),
),
]
|
# coding: utf-8
"""
Cherwell REST API
Unofficial Python Cherwell REST API library. # noqa: E501
The version of the OpenAPI document: 9.3.2
Contact: See AUTHORS.
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from pycherwell.configuration import Configuration
class SaveApiClientSettingRequest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'allow_anonymous_access': 'bool',
'api_access_is_enabled': 'bool',
'create_new_client_key': 'bool',
'culture': 'str',
'description': 'str',
'name': 'str',
'refresh_token_lifespan_minutes': 'int',
'stand_in_key': 'str',
'token_lifespan_minutes': 'int'
}
attribute_map = {
'allow_anonymous_access': 'allowAnonymousAccess',
'api_access_is_enabled': 'apiAccessIsEnabled',
'create_new_client_key': 'createNewClientKey',
'culture': 'culture',
'description': 'description',
'name': 'name',
'refresh_token_lifespan_minutes': 'refreshTokenLifespanMinutes',
'stand_in_key': 'standInKey',
'token_lifespan_minutes': 'tokenLifespanMinutes'
}
def __init__(self, allow_anonymous_access=None, api_access_is_enabled=None, create_new_client_key=None, culture=None, description=None, name=None, refresh_token_lifespan_minutes=None, stand_in_key=None, token_lifespan_minutes=None, local_vars_configuration=None): # noqa: E501
"""SaveApiClientSettingRequest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._allow_anonymous_access = None
self._api_access_is_enabled = None
self._create_new_client_key = None
self._culture = None
self._description = None
self._name = None
self._refresh_token_lifespan_minutes = None
self._stand_in_key = None
self._token_lifespan_minutes = None
self.discriminator = None
if allow_anonymous_access is not None:
self.allow_anonymous_access = allow_anonymous_access
if api_access_is_enabled is not None:
self.api_access_is_enabled = api_access_is_enabled
if create_new_client_key is not None:
self.create_new_client_key = create_new_client_key
if culture is not None:
self.culture = culture
if description is not None:
self.description = description
if name is not None:
self.name = name
if refresh_token_lifespan_minutes is not None:
self.refresh_token_lifespan_minutes = refresh_token_lifespan_minutes
if stand_in_key is not None:
self.stand_in_key = stand_in_key
if token_lifespan_minutes is not None:
self.token_lifespan_minutes = token_lifespan_minutes
@property
def allow_anonymous_access(self):
"""Gets the allow_anonymous_access of this SaveApiClientSettingRequest. # noqa: E501
:return: The allow_anonymous_access of this SaveApiClientSettingRequest. # noqa: E501
:rtype: bool
"""
return self._allow_anonymous_access
@allow_anonymous_access.setter
def allow_anonymous_access(self, allow_anonymous_access):
"""Sets the allow_anonymous_access of this SaveApiClientSettingRequest.
:param allow_anonymous_access: The allow_anonymous_access of this SaveApiClientSettingRequest. # noqa: E501
:type: bool
"""
self._allow_anonymous_access = allow_anonymous_access
@property
def api_access_is_enabled(self):
"""Gets the api_access_is_enabled of this SaveApiClientSettingRequest. # noqa: E501
:return: The api_access_is_enabled of this SaveApiClientSettingRequest. # noqa: E501
:rtype: bool
"""
return self._api_access_is_enabled
@api_access_is_enabled.setter
def api_access_is_enabled(self, api_access_is_enabled):
"""Sets the api_access_is_enabled of this SaveApiClientSettingRequest.
:param api_access_is_enabled: The api_access_is_enabled of this SaveApiClientSettingRequest. # noqa: E501
:type: bool
"""
self._api_access_is_enabled = api_access_is_enabled
@property
def create_new_client_key(self):
"""Gets the create_new_client_key of this SaveApiClientSettingRequest. # noqa: E501
:return: The create_new_client_key of this SaveApiClientSettingRequest. # noqa: E501
:rtype: bool
"""
return self._create_new_client_key
@create_new_client_key.setter
def create_new_client_key(self, create_new_client_key):
"""Sets the create_new_client_key of this SaveApiClientSettingRequest.
:param create_new_client_key: The create_new_client_key of this SaveApiClientSettingRequest. # noqa: E501
:type: bool
"""
self._create_new_client_key = create_new_client_key
@property
def culture(self):
"""Gets the culture of this SaveApiClientSettingRequest. # noqa: E501
:return: The culture of this SaveApiClientSettingRequest. # noqa: E501
:rtype: str
"""
return self._culture
@culture.setter
def culture(self, culture):
"""Sets the culture of this SaveApiClientSettingRequest.
:param culture: The culture of this SaveApiClientSettingRequest. # noqa: E501
:type: str
"""
self._culture = culture
@property
def description(self):
"""Gets the description of this SaveApiClientSettingRequest. # noqa: E501
:return: The description of this SaveApiClientSettingRequest. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this SaveApiClientSettingRequest.
:param description: The description of this SaveApiClientSettingRequest. # noqa: E501
:type: str
"""
self._description = description
@property
def name(self):
"""Gets the name of this SaveApiClientSettingRequest. # noqa: E501
:return: The name of this SaveApiClientSettingRequest. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this SaveApiClientSettingRequest.
:param name: The name of this SaveApiClientSettingRequest. # noqa: E501
:type: str
"""
self._name = name
@property
def refresh_token_lifespan_minutes(self):
"""Gets the refresh_token_lifespan_minutes of this SaveApiClientSettingRequest. # noqa: E501
:return: The refresh_token_lifespan_minutes of this SaveApiClientSettingRequest. # noqa: E501
:rtype: int
"""
return self._refresh_token_lifespan_minutes
@refresh_token_lifespan_minutes.setter
def refresh_token_lifespan_minutes(self, refresh_token_lifespan_minutes):
"""Sets the refresh_token_lifespan_minutes of this SaveApiClientSettingRequest.
:param refresh_token_lifespan_minutes: The refresh_token_lifespan_minutes of this SaveApiClientSettingRequest. # noqa: E501
:type: int
"""
self._refresh_token_lifespan_minutes = refresh_token_lifespan_minutes
@property
def stand_in_key(self):
"""Gets the stand_in_key of this SaveApiClientSettingRequest. # noqa: E501
:return: The stand_in_key of this SaveApiClientSettingRequest. # noqa: E501
:rtype: str
"""
return self._stand_in_key
@stand_in_key.setter
def stand_in_key(self, stand_in_key):
"""Sets the stand_in_key of this SaveApiClientSettingRequest.
:param stand_in_key: The stand_in_key of this SaveApiClientSettingRequest. # noqa: E501
:type: str
"""
self._stand_in_key = stand_in_key
@property
def token_lifespan_minutes(self):
"""Gets the token_lifespan_minutes of this SaveApiClientSettingRequest. # noqa: E501
:return: The token_lifespan_minutes of this SaveApiClientSettingRequest. # noqa: E501
:rtype: int
"""
return self._token_lifespan_minutes
@token_lifespan_minutes.setter
def token_lifespan_minutes(self, token_lifespan_minutes):
"""Sets the token_lifespan_minutes of this SaveApiClientSettingRequest.
:param token_lifespan_minutes: The token_lifespan_minutes of this SaveApiClientSettingRequest. # noqa: E501
:type: int
"""
self._token_lifespan_minutes = token_lifespan_minutes
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SaveApiClientSettingRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SaveApiClientSettingRequest):
return True
return self.to_dict() != other.to_dict()
|
"""info_variables.py
Used to provide basic variable information in a way that
can be useful for beginners without overwhelming them.
"""
import ast
import builtins
import sys
from . import utils
from . import debug_helper
from . import token_utils
from .path_info import path_utils
from .my_gettext import current_lang
# third-party
try:
from asttokens import ASTTokens # noqa
from pure_eval import Evaluator, group_expressions # noqa
except ImportError:
pass # ignore errors when processed by Sphinx
INDENT = " "
MAX_LENGTH = 65
def convert_type(short_form):
_ = current_lang.translate
forms = {
"complex": _("a complex number"),
"dict": _("a dictionary (`dict`)"),
"float": _("a number (`float`)"),
"frozenset": _("a `frozenset`"),
"int": _("an integer (`int`)"),
"list": _("a `list`"),
"NoneType": _("a variable equal to `None` (`NoneType`)"),
"set": _("a `set`"),
"str": _("a string (`str`)"),
"string": _("a string (`str`)"),
"tuple": _("a `tuple`"),
}
return forms.get(short_form, short_form)
def get_all_objects(line, frame):
"""Given a (partial) line of code and a frame,
obtains a dict containing all the relevant information about objects
found on that line so that they can be formatted as part of the
answer to "where()" or they can be used during the analysis
of the cause of the exception.
The dict returned has five keys.
The first three, 'locals', 'globals', 'builtins',
each containing a list of tuples, each tuple being of the form
(name, repr(obj), obj) where name --> obj.
The fourth key, 'expressions', contains a list of tuples of the form
('name', obj). It is only occasionally used in helping to make
suggestions regarding the cause of some exception.
"""
objects = {
"locals": [],
"globals": [],
"builtins": [],
"expressions": [],
"name, obj": [],
}
scopes = (
("locals", frame.f_locals), # always have locals before globals
("globals", frame.f_globals),
)
names = set()
tokens = token_utils.get_significant_tokens(line)
for tok in tokens:
if tok.is_identifier():
name = tok.string
if name in names:
continue
for scope, scope_dict in scopes:
if name in scope_dict:
names.add(name)
obj = scope_dict[name]
objects[scope].append((name, repr(obj), obj))
objects["name, obj"].append((name, obj))
break
else:
if name in dir(builtins):
names.add(name)
obj = getattr(builtins, name)
objects["builtins"].append((name, repr(obj), obj))
objects["name, obj"].append((name, obj))
dotted_names = get_dotted_names(line)
for name in dotted_names:
for scope, scope_dict in scopes:
if name not in scope_dict:
continue
obj = scope_dict[name]
if (name, obj) not in objects["name, obj"]:
objects[scope].append((name, repr(obj), obj))
objects["name, obj"].append((name, obj))
try:
atok = ASTTokens(line, parse=True)
except SyntaxError: # this should not happen
return objects
if atok is not None:
evaluator = Evaluator.from_frame(frame)
for nodes, obj in group_expressions(
pair for pair in evaluator.find_expressions(atok.tree)
):
name = atok.get_text(nodes[0])
if name in names:
continue
names.add(name)
objects["name, obj"].append((name, obj))
try:
# We're not interested in showing literals in the list of variables
ast.literal_eval(name)
except Exception: # noqa
objects["expressions"].append((name, obj))
return objects
def get_dotted_names(line):
"""Retrieve dotted names, i.e. something like A.x or A.x.y, etc.
In principle, pure_eval/ASTTokens used above should be able to
retrieve dotted names. However, I have not (yet) been able to do so
without this hack.
"""
names = []
prev_token = token_utils.tokenize("")[0] # convenient guard
dot_found = False
tokens = token_utils.get_significant_tokens(line)
for tok in tokens:
if tok == ".":
dot_found = True
continue
if tok.is_identifier():
if prev_token.is_identifier() and dot_found:
previous_name = names[-1]
names.append(previous_name + "." + tok.string)
dot_found = False
continue
names.append(tok.string)
prev_token = tok
dot_found = False
# remove duplicate and names without "."
dotted_names = []
for name in names:
if name not in dotted_names and "." in name:
dotted_names.append(name)
return dotted_names
def get_object_from_name(name, frame):
"""Given the name of an object, for example 'str', or 'A' for
class A, returns a basic object of that type found in a frame,
or None.
"""
# We must guard against people defining their own type with a
# standard name by checking standard types last.
if name in frame.f_locals:
return frame.f_locals[name]
if name in frame.f_globals:
return frame.f_globals[name]
if name in dir(builtins): # Do this last
return getattr(builtins, name)
return None
def get_variables_in_frame_by_scope(frame, scope):
"""Returns a list of variables based on the provided scope, which must
be one of 'local', 'global', or 'nonlocal'.
"""
if scope == "local":
return frame.f_locals
if scope == "global":
return frame.f_globals
if scope == "nonlocal":
globals_ = frame.f_globals
non_locals = {}
while frame.f_back is not None:
frame = frame.f_back
# By creating a new list here, we prevent a failure when
# running with pytest.
for key in list(frame.f_locals):
if key in globals_ or key in non_locals:
continue
non_locals[key] = frame.f_locals[key]
return non_locals
debug_helper.log("Internal error in get_variable_in_frame_by_scope()")
debug_helper.log(f"unknown scope '{scope}'")
debug_helper.log_error()
return {}
def get_definition_scope(variable_name, frame):
"""Returns a list of scopes ('local', 'global', 'nonlocal')
in which a variable is defined.
"""
scopes = []
for scope in ["local", "global", "nonlocal"]:
in_scope = get_variables_in_frame_by_scope(frame, scope)
if variable_name in in_scope:
scopes.append(scope)
return scopes
def get_var_info(line, frame):
"""Given a frame object, it obtains the value (repr) of the names
found in the logical line (which may span many lines in the file)
where the exception occurred.
We ignore values found *only* in nonlocal scope as they should not
be relevant.
"""
names_info = []
objects = get_all_objects(line.strip(), frame)
for name, value, obj in objects["locals"]:
result = format_var_info(name, value, obj)
names_info.append(result)
for name, value, obj in objects["globals"]:
result = format_var_info(name, value, obj, "globals")
names_info.append(result)
for name, value, obj in objects["builtins"]:
result = format_var_info(name, value, obj)
names_info.append(result)
for name, obj in objects["expressions"]:
result = format_var_info(name, repr(obj), obj)
names_info.append(result)
if names_info:
names_info.append("")
return "\n".join(names_info)
def simplify_name(name):
"""Remove irrelevant memory location information from functions, etc."""
# There are two reasons to do this:
# 1. this information is essentially of no value for beginners
# 2. Removing this information ensures that consecutive runs of
# script to create tracebacks for the documentation will yield
# exactly the same results. This makes it easier to spot changes/regressions.
if " at " in name:
name = name.split(" at ")[0] + ">"
elif " from " in name: # example: module X from stdlib_path
obj_repr, path = name.split(" from ")
path = path_utils.shorten_path(path[:-1]) # -1 removes >
# Avoid lines that are too long
if len(obj_repr) + len(path) < MAX_LENGTH:
name = obj_repr + "> from " + path
else:
name = obj_repr + f">\n{INDENT}from " + path
# The following is done so that, when using rich, pygments
# does not style the - and 'in' in a weird way.
name = name.replace("built-in", "builtin")
if name.startswith("<"):
name = name.replace("'", "")
if ".<locals>." in name:
file_name, obj_name = name.split(".<locals>.")
if name.startswith("<function "):
start = "<function "
elif name.startswith("<class "):
start = "<class "
else:
start = "<"
file_name = file_name.replace(start, "")
name = start + obj_name + " from " + file_name
if "__main__." in name:
name = name.replace("__main__.", "") + " from __main__"
return name
def format_var_info(name, value, obj, _global=""):
"""Formats the variable information so that it fits on a single line
for each variable.
The format we want is something like the following:
[global] name: repr(name)
However, if repr(name) exceeds a certain value, it is truncated.
When that is the case, if len(name) is defined, as is the case for
lists, tuples, dicts, etc., then len(name) is shown on a separate line.
This can be useful information in case of IndexError and possibly
others.
"""
_ = current_lang.translate
length_info = ""
if _global:
_global = "global "
if value.startswith("<") and value.endswith(">"):
value = simplify_name(value)
if len(value) > MAX_LENGTH and not value.startswith("<"):
# We reduce the length of the repr, indicate this by ..., but we
# also keep the last character so that the repr of a list still
# ends with ], that of a tuple still ends with ), etc.
if "," in value: # try to truncate at a natural place
parts = value.split(", ")
length = 0
new_parts = []
for part in parts:
if len(part) + length > MAX_LENGTH:
break
new_parts.append(part + ", ")
length += len(part) + 2
if new_parts:
value = "".join(new_parts) + "..." + value[-1]
else:
value = value[0 : MAX_LENGTH - 5] + "..." + value[-1]
else:
value = value[0 : MAX_LENGTH - 5] + "..." + value[-1]
try:
length_info = len(obj)
except TypeError:
pass
result = f" {_global}{name}: {value}"
if length_info:
result += f"\n{INDENT}len({name}): {length_info}"
return result
def get_similar_names(name, frame):
"""This function looks for objects with names similar to 'name' in
either the current locals() and globals() as well as in
Python's builtins.
"""
similar = {}
locals_ = get_variables_in_frame_by_scope(frame, "local")
similar["locals"] = utils.get_similar_words(name, locals_)
globals_ = get_variables_in_frame_by_scope(frame, "global")
names = utils.get_similar_words(name, globals_)
similar["globals"] = [name for name in names if name not in similar["locals"]]
similar["builtins"] = utils.get_similar_words(name, dir(builtins))
all_similar = similar["locals"] + similar["globals"] + similar["builtins"]
if all_similar:
most_similar = utils.get_similar_words(name, all_similar)
similar["best"] = most_similar[0]
else:
# utils.get_similar_words() used above only look for relatively
# minor letter mismatches in making suggestions.
# Here we add a few additional hard-coded cases.
if name in ["length", "lenght"]:
similar["builtins"] = ["len"]
similar["best"] = "len"
else:
similar["best"] = None
return similar
def name_has_type_hint(name, frame):
"""Identifies if a variable name has a type hint associated with it.
This can be useful if a user write something like::
name : something
use(name)
instead of::
name = something
use(name)
and sees a NameError.
HOWEVER, when an exception is raised, it seems that the only type hints
that are picked up correctly are those found in the global scope.
"""
_ = current_lang.translate
type_hint_found_in_scope = _(
"A type hint found for `{name}` in the {scope} scope.\n"
"Perhaps you had used a colon instead of an equal sign and wrote\n\n"
" {name} : {hint}\n\n"
"instead of\n\n"
" {name} = {hint}\n"
)
nonlocals = get_variables_in_frame_by_scope(frame, "nonlocal")
scopes = (
("local", frame.f_locals),
("global", frame.f_globals),
("nonlocal", nonlocals),
)
for scope, scope_dict in scopes:
if "__annotations__" in scope_dict and name in scope_dict["__annotations__"]:
hint = scope_dict["__annotations__"][name]
# For Python 3.10+, all type hints are strings
if (
isinstance(hint, str)
and sys.version_info.major == 3
and sys.version_info.minor < 10
):
hint = repr(hint)
return type_hint_found_in_scope.format(name=name, scope=scope, hint=hint)
return ""
|
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
__version__ = '3.9.0'
|
from pytest_mock import MockFixture
from opta.pre_check import dependency_check
class TestPreCheck:
def test_dependency_check(self, mocker: MockFixture) -> None:
validate_version = mocker.patch("opta.pre_check.Terraform.validate_version")
dependency_check()
validate_version.assert_called_once()
|
#!/usr/bin/python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from ConfigParser import SafeConfigParser
from subprocess import *
from os import path
import time
import os
class StatusCodes:
SUCCESS = 0
FAILED = 1
INVALID_INP = 2
RUNNING = 3
STOPPED = 4
STARTING = 5
class Log:
INFO = 'INFO'
ALERT = 'ALERT'
CRIT = 'CRIT'
NOTIF = 'NOTIF'
class Config:
MONIT_AFTER_MINS = 30
SLEEP_SEC = 1
RETRY_ITERATIONS = 10
RETRY_FOR_RESTART = 5
MONITOR_LOG = '/var/log/monitor.log'
UNMONIT_PS_FILE = '/etc/unmonit_psList.txt'
def getConfig( config_file_path = "/etc/monitor.conf" ):
"""
Reads the process configuration from the config file.
Config file contains the processes to be monitored.
"""
process_dict = {}
parser = SafeConfigParser()
parser.read( config_file_path )
for section in parser.sections():
process_dict[section] = {}
for name, value in parser.items(section):
process_dict[section][name] = value
# printd (" %s = %r" % (name, value))
return process_dict
def printd (msg):
"""
prints the debug messages
"""
#for debug
#print msg
return 0
f= open(Config.MONITOR_LOG,'r+')
f.seek(0, 2)
f.write(str(msg)+"\n")
f.close()
def raisealert(severity, msg, process_name=None):
""" Writes the alert message"""
#timeStr=str(time.ctime())
if process_name is not None:
log = '['+severity +']'+" " + '['+process_name+']' + " " + msg +"\n"
else:
log = '['+severity+']' + " " + msg +"\n"
msg = 'logger -t monit '+ log
pout = Popen(msg, shell=True, stdout=PIPE)
def isPidMatchPidFile(pidfile, pids):
""" Compares the running process pid with the pid in pid file.
If a process with multiple pids then it matches with pid file
"""
if pids is None or isinstance(pids,list) != True or len(pids) == 0:
printd ("Invalid Arguments")
return StatusCodes.FAILED
if not path.isfile(pidfile):
#It seems there is no pid file for this service
printd("The pid file "+pidfile+" is not there for this process")
return StatusCodes.FAILED
fd=None
try:
fd = open(pidfile,'r')
except:
printd("pid file: "+ pidfile +" open failed")
return StatusCodes.FAILED
inp = fd.read()
if not inp:
fd.close()
return StatusCodes.FAILED
printd("file content "+str(inp))
printd(pids)
tocheck_pid = inp.strip()
for item in pids:
if str(tocheck_pid) == item.strip():
printd("pid file matched")
fd.close()
return StatusCodes.SUCCESS
fd.close()
return StatusCodes.FAILED
def checkProcessRunningStatus(process_name, pidFile):
printd("checking the process " + process_name)
cmd = ''
pids = []
cmd = 'pidof ' + process_name
printd(cmd)
#cmd = 'service ' + process_name + ' status'
pout = Popen(cmd, shell=True, stdout=PIPE)
exitStatus = pout.wait()
temp_out = pout.communicate()[0]
#check there is only one pid or not
if exitStatus == 0:
pids = temp_out.split(' ')
printd("pid(s) of process %s are %s " %(process_name, pids))
#there is more than one process so match the pid file
#if not matched set pidFileMatched=False
printd("Checking pid file")
if isPidMatchPidFile(pidFile, pids) == StatusCodes.SUCCESS:
return True,pids
printd("pid of exit status %s" %exitStatus)
return False,pids
def restartService(service_name):
cmd = 'service ' + service_name + ' restart'
cout = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT)
return_val = cout.wait()
if return_val == 0:
printd("The service " + service_name +" recovered successfully ")
msg="The process " +service_name+" is recovered successfully "
raisealert(Log.INFO,msg,service_name)
return True
else:
printd("process restart failed ....")
return False
def checkProcessStatus( process ):
"""
Check the process running status, if not running tries to restart
"""
process_name = process.get('processname')
service_name = process.get('servicename')
pidfile = process.get('pidfile')
#temp_out = None
restartFailed=False
pidFileMatched=False
pids=''
cmd=''
if process_name is None:
printd ("\n Invalid Process Name")
return StatusCodes.INVALID_INP
status, pids = checkProcessRunningStatus(process_name, pidfile)
if status == True:
printd("The process is running ....")
return StatusCodes.RUNNING
else:
printd("Process %s is not running trying to recover" %process_name)
#Retry the process state for few seconds
for i in range(1, Config.RETRY_ITERATIONS):
time.sleep(Config.SLEEP_SEC)
if i < Config.RETRY_FOR_RESTART: # this is just for trying few more times
status, pids = checkProcessRunningStatus(process_name, pidfile)
if status == True:
raisealert(Log.ALERT, "The process detected as running", process_name)
break
else:
printd("Process %s is not running checking the status again..." %process_name)
continue
else:
msg="The process " +process_name+" is not running trying recover "
raisealert(Log.INFO,process_name,msg)
if service_name == 'apache2':
# Killing apache2 process with this the main service will not start
for pid in pids:
cmd = 'kill -9 '+pid
printd(cmd)
Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT)
if restartService(service_name) == True:
break
else:
restartFailed = True
continue
#for end here
if restartFailed == True:
msg="The process %s recover failed "%process_name
raisealert(Log.ALERT,process_name,msg)
printd("Restart failed after number of retries")
return StatusCodes.STOPPED
return StatusCodes.RUNNING
def monitProcess( processes_info ):
"""
Monitors the processes which got from the config file
"""
if len( processes_info ) == 0:
printd("Invalid Input")
return StatusCodes.INVALID_INP
dict_unmonit={}
umonit_update={}
unMonitPs=False
if not path.isfile(Config.UNMONIT_PS_FILE):
printd('Unmonit File not exist')
else:
#load the dictionary with unmonit process list
dict_unmonit = loadPsFromUnMonitFile()
#time for noting process down time
csec = repr(time.time()).split('.')[0]
for process,properties in processes_info.items():
#skip the process it its time stamp less than Config.MONIT_AFTER_MINS
printd ("checking the service %s \n" %process)
if not is_emtpy(dict_unmonit):
if dict_unmonit.has_key(process):
ts = dict_unmonit[process]
if checkPsTimeStampForMonitor (csec, ts, properties) == False:
unMonitPs = True
continue
if checkProcessStatus( properties) != StatusCodes.RUNNING:
printd( "\n Service %s is not Running"%process)
#add this process into unmonit list
printd ("updating the service for unmonit %s\n" %process)
umonit_update[process]=csec
#if dict is not empty write to file else delete it
if not is_emtpy(umonit_update):
writePsListToUnmonitFile(umonit_update)
else:
if is_emtpy(umonit_update) and unMonitPs == False:
#delete file it is there
removeFile(Config.UNMONIT_PS_FILE)
def checkPsTimeStampForMonitor(csec,ts, process):
printd("Time difference=%s" %str(int(csec) - int(ts)))
tmin = (int(csec) - int(ts) )/60
if ( int(csec) - int(ts) )/60 < Config.MONIT_AFTER_MINS:
raisealert(Log.ALERT, "The %s get monitor after %s minutes " %(process, Config.MONIT_AFTER_MINS))
printd('process will be monitored after %s min' %(str(int(Config.MONIT_AFTER_MINS) - tmin)))
return False
return True
def removeFile(fileName):
if path.isfile(fileName):
printd("Removing the file %s" %fileName)
os.remove(fileName)
def loadPsFromUnMonitFile():
dict_unmonit = {}
try:
fd = open(Config.UNMONIT_PS_FILE)
except:
printd("Failed to open file %s " %(Config.UNMONIT_PS_FILE))
return StatusCodes.FAILED
ps = fd.read()
if not ps:
printd("File %s content is empty " %Config.UNMONIT_PS_FILE)
return StatusCodes.FAILED
printd(ps)
plist = ps.split(',')
plist.remove('')
for i in plist:
dict_unmonit[i.split(':')[0]] = i.split(':')[1]
fd.close()
return dict_unmonit
def writePsListToUnmonitFile(umonit_update):
printd("Write updated unmonit list to file")
line=''
for i in umonit_update:
line+=str(i)+":"+str(umonit_update[i])+','
printd(line)
try:
fd=open(Config.UNMONIT_PS_FILE,'w')
except:
printd("Failed to open file %s " %Config.UNMONIT_PS_FILE)
return StatusCodes.FAILED
fd.write(line)
fd.close()
def is_emtpy(struct):
"""
Checks wether the given struct is empty or not
"""
if struct:
return False
else:
return True
def main():
'''
Step1 : Get Config
'''
printd("monitoring started")
temp_dict = getConfig()
'''
Step2: Monitor and Raise Alert
'''
monitProcess( temp_dict )
if __name__ == "__main__":
main()
|
import json
import subprocess
import arrow
import click
import yaml
from tasklib import Task
from tasklib.serializing import SerializingObject
from .cmd import quiet, task, tw
def rewrite_task(backend, data):
dates_cols = ("due", "wait")
for col in dates_cols:
if col in data:
serializer = SerializingObject(backend)
due = arrow.get(data[col])
data[col] = serializer.timestamp_serializer(due)
if "priority" in data:
data["priority"] = str(data["priority"])
@task.command()
@click.argument("task_id", required=True)
@quiet
def edit(task_id, quiet):
"""
Opens an editor to modify the file in yaml
"""
try:
t = tw.tasks.get(id=task_id)
except Task.DoesNotExist as ex:
click.secho(f"Task id='{task_id}' does not exist", fg="red")
raise click.Abort() from ex
as_dict = json.loads(t.export_data())
immutable_keys = t.read_only_fields + ["status"]
for key in immutable_keys:
if key in immutable_keys:
del as_dict[key]
if "due" in as_dict:
due = arrow.get(as_dict["due"]).shift(days=1)
as_dict["due"] = due.strftime("%Y-%m-%d")
result = click.edit("---\n" + yaml.dump(as_dict), extension=".yaml")
if not result:
raise click.Abort()
modified = yaml.load(result, yaml.FullLoader)
rewrite_task(t.backend, modified)
t._update_data(modified) # pylint: disable=protected-access
t.save()
if not quiet:
subprocess.Popen(["task", "information", task_id])
|
print('Python module loaded')
import numpy as np
def sum(i, j):
return np.array(i) + np.array(j).tolist() # using numpy arrays as return types would require eigen
|
import cv2
from screen import Screen
from typing import Tuple, Union, List
from dataclasses import dataclass
import numpy as np
from logger import Logger
import time
import os
from config import Config
from utils.misc import load_template, list_files_in_folder, alpha_to_mask
@dataclass
class TemplateMatch:
name: str = None
score: float = -1.0
position: Tuple[float, float] = None
valid: bool = False
class TemplateFinder:
"""
Loads images from assets/templates and assets/npc and provides search functions
to find these assets within another image
"""
def __init__(self, screen: Screen, template_pathes: list[str] = ["assets\\templates", "assets\\npc", "assets\\item_properties", "assets\\chests"]):
self._screen = screen
self._config = Config()
self.last_res = None
# load templates with their filename as key in the dict
pathes = []
for path in template_pathes:
pathes += list_files_in_folder(path)
self._templates = {}
for file_path in pathes:
file_name: str = os.path.basename(file_path)
if file_name.endswith('.png'):
key = file_name[:-4].upper()
template_img = load_template(file_path, 1.0, True)
mask = alpha_to_mask(template_img)
self._templates[key] = [
cv2.cvtColor(template_img, cv2.COLOR_BGRA2BGR),
cv2.cvtColor(template_img, cv2.COLOR_BGRA2GRAY),
1.0,
mask
]
def get_template(self, key):
return cv2.cvtColor(self._templates[key][0], cv2.COLOR_BGRA2BGR)
def search(
self,
ref: Union[str, np.ndarray, List[str]],
inp_img: np.ndarray,
threshold: float = 0.68,
roi: List[float] = None,
normalize_monitor: bool = False,
best_match: bool = False,
use_grayscale: bool = False,
) -> TemplateMatch:
"""
Search for a template in an image
:param ref: Either key of a already loaded template, list of such keys, or a image which is used as template
:param inp_img: Image in which the template will be searched
:param threshold: Threshold which determines if a template is found or not
:param roi: Region of Interest of the inp_img to restrict search area. Format [left, top, width, height]
:param normalize_monitor: If True will return positions in monitor coordinates. Otherwise in coordinates of the input image.
:param best_match: If list input, will search for list of templates by best match. Default behavior is first match.
:param use_grayscale: Use grayscale template matching for speed up
:return: Returns a TempalteMatch object with a valid flag
"""
if roi is None:
# if no roi is provided roi = full inp_img
roi = [0, 0, inp_img.shape[1], inp_img.shape[0]]
rx, ry, rw, rh = roi
inp_img = inp_img[ry:ry + rh, rx:rx + rw]
if type(ref) == str:
templates = [self._templates[ref][0]]
templates_gray = [self._templates[ref][1]]
scales = [self._templates[ref][2]]
masks = [self._templates[ref][3]]
names = [ref]
best_match = False
elif type(ref) == list:
templates = [self._templates[i][0] for i in ref]
templates_gray = [self._templates[i][1] for i in ref]
scales = [self._templates[i][2] for i in ref]
masks = [self._templates[i][3] for i in ref]
names = ref
else:
templates = [ref]
templates_gray = [cv2.cvtColor(ref, cv2.COLOR_BGRA2GRAY)]
scales = [1.0]
masks = [None]
best_match = False
scores = [0] * len(ref)
ref_points = [(0, 0)] * len(ref)
for count, template in enumerate(templates):
template_match = TemplateMatch()
scale = scales[count]
mask = masks[count]
img: np.ndarray = cv2.resize(inp_img, None, fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)
rx *= scale
ry *= scale
rw *= scale
rh *= scale
if img.shape[0] > template.shape[0] and img.shape[1] > template.shape[1]:
if use_grayscale:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
template = templates_gray[count]
self.last_res = cv2.matchTemplate(img, template, cv2.TM_CCOEFF_NORMED, mask=mask)
np.nan_to_num(self.last_res, copy=False, nan=0.0, posinf=0.0, neginf=0.0)
_, max_val, _, max_pos = cv2.minMaxLoc(self.last_res)
if max_val > threshold:
ref_point = (max_pos[0] + int(template.shape[1] * 0.5) + rx, max_pos[1] + int(template.shape[0] * 0.5) + ry)
ref_point = (int(ref_point[0] * (1.0 / scale)), int(ref_point[1] * (1.0 / scale)))
if normalize_monitor:
ref_point = self._screen.convert_screen_to_monitor(ref_point)
if best_match:
scores[count]=max_val
ref_points[count]=ref_point
else:
try: template_match.name = names[count]
except: pass
template_match.position = ref_point
template_match.score = max_val
template_match.valid = True
return template_match
if max(scores) > 0:
idx=scores.index(max(scores))
try: template_match.name = names[idx]
except: pass
template_match.position = ref_points[idx]
template_match.score = scores[idx]
template_match.valid = True
return template_match
def search_and_wait(
self,
ref: Union[str, List[str]],
roi: List[float] = None,
time_out: float = None,
threshold: float = 0.68,
best_match: bool = False,
take_ss: bool = True,
use_grayscale: bool = False
) -> TemplateMatch:
"""
Helper function that will loop and keep searching for a template
:param time_out: After this amount of time the search will stop and it will return [False, None]
:param take_ss: Bool value to take screenshot on timeout or not (flag must still be set in params!)
Other params are the same as for TemplateFinder.search()
"""
if type(ref) is str:
ref = [ref]
Logger.debug(f"Waiting for Template {ref}")
start = time.time()
while 1:
img = self._screen.grab()
template_match = self.search(ref, img, roi=roi, threshold=threshold, best_match=best_match, use_grayscale=use_grayscale)
is_loading_black_roi = np.average(img[:, 0:self._config.ui_roi["loading_left_black"][2]]) < 1.0
if not is_loading_black_roi or "LOADING" in ref:
if template_match.valid:
Logger.debug(f"Found Match: {template_match.name} ({template_match.score*100:.1f}% confidence)")
return template_match
if time_out is not None and (time.time() - start) > time_out:
if self._config.general["info_screenshots"] and take_ss:
cv2.imwrite(f"./info_screenshots/info_wait_for_{ref}_time_out_" + time.strftime("%Y%m%d_%H%M%S") + ".png", img)
if take_ss:
Logger.debug(f"Could not find any of the above templates")
return template_match
# Testing: Have whatever you want to find on the screen
if __name__ == "__main__":
from screen import Screen
from config import Config
config = Config()
screen = Screen(config.general["monitor"])
template_finder = TemplateFinder(screen)
search_templates = ["ARC_END_1", "ARC_END_2", "ARC_ALTAR", "ARC_ALTAR2", "ARC_STASH4"]
while 1:
# img = cv2.imread("")
img = screen.grab()
display_img = img.copy()
start = time.time()
for key in search_templates:
template_match = template_finder.search(key, img, best_match=True, threshold=0.35, use_grayscale=True)
if template_match.valid:
cv2.putText(display_img, str(template_match.name), template_match.position, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.circle(display_img, template_match.position, 7, (255, 0, 0), thickness=5)
x, y = template_match.position
print(f"Name: {template_match.name} Pos: {template_match.position}, Dist: {625-x, 360-y}, Score: {template_match.score}")
# print(time.time() - start)
display_img = cv2.resize(display_img, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_NEAREST)
cv2.imshow('test', display_img)
key = cv2.waitKey(1)
|
from typing import List, Tuple
import tensorflow as tf
import random
AUTOTUNE = tf.data.experimental.AUTOTUNE
class DataLoader(object):
"""A TensorFlow Dataset API based loader for semantic segmentation problems."""
def __init__(self, image_paths: List[str], mask_paths: List[str], image_size: Tuple[int],
channels: Tuple[int] = (3, 3), crop_percent: float = None, seed: int = None,
augment: bool = True, compose: bool = False, one_hot_encoding: bool = False, palette=None):
"""
Initializes the data loader object
Args:
image_paths: List of paths of train images.
mask_paths: List of paths of train masks (segmentation masks)
image_size: Tuple, the final height, width of the loaded images.
channels: Tuple of ints, first element is number of channels in images,
second is the number of channels in the mask image (needed to
correctly read the images into tensorflow and apply augmentations)
crop_percent: Float in the range 0-1, defining percentage of image
to randomly crop.
palette: A list of RGB pixel values in the mask. If specified, the mask
will be one hot encoded along the channel dimension.
seed: An int, if not specified, chosen randomly. Used as the seed for
the RNG in the data pipeline.
"""
self.image_paths = image_paths
self.mask_paths = mask_paths
self.palette = palette
self.image_size = image_size
self.augment = augment
self.compose = compose
self.one_hot_encoding = one_hot_encoding
if crop_percent is not None:
if 0.0 < crop_percent <= 1.0:
self.crop_percent = tf.constant(crop_percent, tf.float32)
elif 0 < crop_percent <= 100:
self.crop_percent = tf.constant(crop_percent / 100., tf.float32)
else:
raise ValueError("Invalid value entered for crop size. Please use an \
integer between 0 and 100, or a float between 0 and 1.0")
else:
self.crop_percent = None
self.channels = channels
if seed is None:
self.seed = random.randint(0, 1000)
else:
self.seed = seed
def _corrupt_brightness(self, image, mask):
"""
Radnomly applies a random brightness change.
"""
cond_brightness = tf.cast(tf.random.uniform(
[], maxval=2, dtype=tf.int32), tf.bool)
image = tf.cond(cond_brightness, lambda: tf.image.random_brightness(
image, 0.1), lambda: tf.identity(image))
return image, mask
def _corrupt_contrast(self, image, mask):
"""
Randomly applies a random contrast change.
"""
cond_contrast = tf.cast(tf.random.uniform(
[], maxval=2, dtype=tf.int32), tf.bool)
image = tf.cond(cond_contrast, lambda: tf.image.random_contrast(
image, 0.1, 0.8), lambda: tf.identity(image))
return image, mask
def _corrupt_saturation(self, image, mask):
"""
Randomly applies a random saturation change.
"""
cond_saturation = tf.cast(tf.random.uniform(
[], maxval=2, dtype=tf.int32), tf.bool)
image = tf.cond(cond_saturation, lambda: tf.image.random_saturation(
image, 0.1, 0.8), lambda: tf.identity(image))
return image, mask
def _crop_random(self, image, mask):
"""
Randomly crops image and mask in accord.
"""
cond_crop_image = tf.cast(tf.random.uniform(
[], maxval=2, dtype=tf.int32, seed=self.seed), tf.bool)
shape = tf.cast(tf.shape(image), tf.float32)
h = tf.cast(shape[0] * self.crop_percent, tf.int32)
w = tf.cast(shape[1] * self.crop_percent, tf.int32)
comb_tensor = tf.concat([image, mask], axis=2)
comb_tensor = tf.cond(cond_crop_image, lambda: tf.image.random_crop(
comb_tensor, [h, w, self.channels[0] + self.channels[1]], seed=self.seed), lambda: tf.identity(comb_tensor))
image, mask = tf.split(comb_tensor, [self.channels[0], self.channels[1]], axis=2)
return image, mask
def _flip_left_right(self, image, mask):
"""
Randomly flips image and mask left or right in accord.
"""
comb_tensor = tf.concat([image, mask], axis=2)
comb_tensor = tf.image.random_flip_left_right(comb_tensor, seed=self.seed)
image, mask = tf.split(comb_tensor, [self.channels[0], self.channels[1]], axis=2)
return image, mask
def _resize_data(self, image, mask):
"""
Resizes images to specified size.
"""
image = tf.image.resize(image, self.image_size)
mask = tf.image.resize(mask, self.image_size, method="nearest")
return image, mask
def _parse_data(self, image_paths, mask_paths):
"""
Reads image and mask files depending on
specified exxtension.
"""
image_content = tf.io.read_file(image_paths)
mask_content = tf.io.read_file(mask_paths)
images = tf.image.decode_jpeg(image_content, channels=self.channels[0])
masks = tf.image.decode_jpeg(mask_content, channels=self.channels[1])
return images, masks
def _one_hot_encode(self, image, mask):
"""
Converts mask to a one-hot encoding specified by the semantic map.
"""
one_hot_map = []
for colour in self.palette:
class_map = tf.reduce_all(tf.equal(mask, colour), axis=-1)
one_hot_map.append(class_map)
one_hot_map = tf.stack(one_hot_map, axis=-1)
one_hot_map = tf.cast(one_hot_map, tf.float32)
return image, one_hot_map
@tf.function
def _map_function(self, images_path, masks_path):
image, mask = self._parse_data(images_path, masks_path)
def _augmentation_func(image_f, mask_f):
if self.augment:
if self.compose:
image_f, mask_f = self._corrupt_brightness(image_f, mask_f)
image_f, mask_f = self._corrupt_contrast(image_f, mask_f)
image_f, mask_f = self._corrupt_saturation(image_f, mask_f)
image_f, mask_f = self._crop_random(image_f, mask_f)
image_f, mask_f = self._flip_left_right(image_f, mask_f)
else:
options = [self._corrupt_brightness,
self._corrupt_contrast,
self._corrupt_saturation,
self._crop_random,
self._flip_left_right]
augment_func = random.choice(options)
image_f, mask_f = augment_func(image_f, mask_f)
if self.one_hot_encoding:
if self.palette is None:
raise ValueError('No Palette for one-hot encoding specified in the data loader! \
please specify one when initializing the loader.')
image_f, mask_f = self._one_hot_encode(image_f, mask_f)
image_f, mask_f = self._resize_data(image_f, mask_f)
return image_f, mask_f
return tf.py_function(_augmentation_func, [image, mask], [tf.float32, tf.uint8])
def data_batch(self, batch_size, shuffle=False):
"""
Reads data, normalizes it, shuffles it, then batches it, returns a
the next element in dataset op and the dataset initializer op.
Inputs:
batch_size: Number of images/masks in each batch returned.
augment: Boolean, whether to augment data or not.
shuffle: Boolean, whether to shuffle data in buffer or not.
one_hot_encode: Boolean, whether to one hot encode the mask image or not.
Encoding will done according to the palette specified when
initializing the object.
Returns:
data: A tf dataset object.
"""
# Create dataset out of the 2 files:
data = tf.data.Dataset.from_tensor_slices((self.image_paths, self.mask_paths))
# Parse images and labels
data = data.map(self._map_function, num_parallel_calls=AUTOTUNE)
if shuffle:
# Prefetch, shuffle then batch
data = data.prefetch(AUTOTUNE).shuffle(random.randint(0, len(self.image_paths))).batch(batch_size)
else:
# Batch and prefetch
data = data.batch(batch_size).prefetch(AUTOTUNE)
return data
|
"""Forex Controller."""
__docformat__ = "numpy"
import argparse
import logging
import os
from datetime import datetime, timedelta
from typing import List
import pandas as pd
from prompt_toolkit.completion import NestedCompleter
from gamestonk_terminal import feature_flags as gtff
from gamestonk_terminal.decorators import log_start_end
from gamestonk_terminal.forex import av_view, forex_helper
from gamestonk_terminal.forex.forex_helper import FOREX_SOURCES, SOURCES_INTERVALS
from gamestonk_terminal.helper_funcs import parse_known_args_and_warn, valid_date
from gamestonk_terminal.menu import session
from gamestonk_terminal.parent_classes import BaseController
from gamestonk_terminal.rich_config import console
from gamestonk_terminal.decorators import check_api_key
# pylint: disable=R1710,import-outside-toplevel
logger = logging.getLogger(__name__)
class ForexController(BaseController):
"""Forex Controller class."""
CHOICES_COMMANDS = ["to", "from", "load", "quote", "candle", "resources"]
CHOICES_MENUS = ["ta", "oanda"]
PATH = "/forex/"
FILE_PATH = os.path.join(os.path.dirname(__file__), "README.md")
def __init__(self, queue: List[str] = None):
"""Construct Data."""
super().__init__(queue)
self.from_symbol = "USD"
self.to_symbol = ""
self.source = "yf"
self.data = pd.DataFrame()
if session and gtff.USE_PROMPT_TOOLKIT:
choices: dict = {c: {} for c in self.controller_choices}
choices["to"] = {c: None for c in forex_helper.YF_CURRENCY_LIST}
choices["from"] = {c: None for c in forex_helper.YF_CURRENCY_LIST}
choices["load"]["--source"] = {c: None for c in FOREX_SOURCES}
self.completer = NestedCompleter.from_nested_dict(choices)
def print_help(self):
"""Print help."""
has_symbols_start = "" if self.from_symbol and self.to_symbol else "[dim]"
has_symbols_end = "" if self.from_symbol and self.to_symbol else "[/dim]"
help_text = f"""[cmds]
from select the "from" currency in a forex pair
to select the "to" currency in a forex pair[/cmds]
[param]From: [/param]{None or self.from_symbol}
[param]To: [/param]{None or self.to_symbol}
[param]Source: [/param]{None or FOREX_SOURCES[self.source]}[cmds]{has_symbols_start}
[cmds]
quote get last quote [src][AlphaVantage][/src]
load get historical data
candle show candle plot for loaded data[/cmds]
[menu]
> ta technical analysis for loaded coin, e.g.: ema, macd, rsi, adx, bbands, obv
[/menu]{has_symbols_end}
[info]Forex brokerages:[/info][menu]
> oanda Oanda menu[/menu][/cmds]
"""
console.print(text=help_text, menu="Forex")
def custom_reset(self):
"""Class specific component of reset command"""
set_from_symbol = f"from {self.from_symbol}" if self.from_symbol else ""
set_to_symbol = f"to {self.to_symbol}" if self.to_symbol else ""
if set_from_symbol and set_to_symbol:
return ["forex", set_from_symbol, set_to_symbol]
return []
@log_start_end(log=logger)
def call_to(self, other_args: List[str]):
"""Process 'to' command."""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="to",
description='Select the "to" currency symbol in a forex pair',
)
parser.add_argument(
"-n",
"--name",
help="To currency",
type=forex_helper.check_valid_yf_forex_currency,
dest="to_symbol",
)
if (
other_args
and "-n" not in other_args[0]
and "--name" not in other_args[0]
and "-h" not in other_args
):
other_args.insert(0, "-n")
ns_parser = parse_known_args_and_warn(parser, other_args)
if ns_parser:
self.to_symbol = ns_parser.to_symbol
console.print(
f"\nSelected pair\nFrom: {self.from_symbol}\n"
f"To: {self.to_symbol}\n"
f"Source: {FOREX_SOURCES[self.source]}\n\n"
)
@log_start_end(log=logger)
def call_from(self, other_args: List[str]):
"""Process 'from' command."""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="to",
description='Select the "from" currency symbol in a forex pair',
)
parser.add_argument(
"-n",
"--name",
help="From currency",
type=forex_helper.check_valid_yf_forex_currency,
dest="from_symbol",
)
if (
other_args
and "-n" not in other_args[0]
and "--name" not in other_args[0]
and "-h" not in other_args
):
other_args.insert(0, "-n")
ns_parser = parse_known_args_and_warn(parser, other_args)
if ns_parser:
self.from_symbol = ns_parser.from_symbol
console.print(
f"\nSelected pair\nFrom: {self.from_symbol}\n"
f"To: {self.to_symbol}\n"
f"Source: {FOREX_SOURCES[self.source]}\n\n"
)
@log_start_end(log=logger)
def call_load(self, other_args: List[str]):
"""Process select command."""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="load",
description="Load historical exchange rate data."
"Available data sources are Alpha Advantage and YahooFinance"
"By default main source used for analysis is YahooFinance (yf). To change it use --source av",
)
parser.add_argument(
"--source",
help="Source of historical data",
dest="source",
choices=("yf", "av"),
default="yf",
required=False,
)
parser.add_argument(
"-r",
"--resolution",
choices=["i", "d", "w", "m"],
default="d",
help="Resolution of data. Can be intraday, daily, weekly or monthly",
dest="resolution",
)
parser.add_argument(
"-i",
"--interval",
choices=SOURCES_INTERVALS["yf"],
default="5min",
help="""Interval of intraday data. Options:
[YahooFinance] 1min, 2min, 5min, 15min, 30min, 60min, 90min, 1hour, 1day, 5day, 1week, 1month, 3month.
[AlphaAdvantage] 1min, 5min, 15min, 30min, 60min""",
dest="interval",
)
parser.add_argument(
"-s",
"--start_date",
default=(datetime.now() - timedelta(days=59)),
type=valid_date,
help="Start date of data.",
dest="start_date",
)
ns_parser = parse_known_args_and_warn(parser, other_args)
if ns_parser:
if self.to_symbol and self.from_symbol:
self.data = forex_helper.load(
to_symbol=self.to_symbol,
from_symbol=self.from_symbol,
resolution=ns_parser.resolution,
interval=ns_parser.interval,
start_date=ns_parser.start_date.strftime("%Y-%m-%d"),
source=ns_parser.source,
)
if self.data.empty:
console.print(
"\n[red]"
+ "No historical data loaded.\n"
+ f"Make sure you have appropriate access for the '{ns_parser.source}' data source "
+ f"and that '{ns_parser.source}' supports the requested range."
+ "[/red]\n"
)
else:
self.data.index.name = "date"
self.source = ns_parser.source
console.print(
f"\nSelected pair\nFrom: {self.from_symbol}\n"
f"To: {self.to_symbol}\n"
f"Source: {FOREX_SOURCES[self.source]}\n\n"
)
else:
logger.error(
"Make sure both a to symbol and a from symbol are supplied."
)
console.print(
"\n[red]Make sure both a to symbol and a from symbol are supplied.[/red]\n"
)
@log_start_end(log=logger)
def call_candle(self, other_args: List[str]):
"""Process quote command."""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="candle",
description="Show candle for loaded fx data",
)
ns_parser = parse_known_args_and_warn(parser, other_args)
if ns_parser:
if not self.data.empty:
forex_helper.display_candle(self.data, self.to_symbol, self.from_symbol)
else:
logger.error(
"No forex historical data loaded. Load first using <load>."
)
console.print(
"[red]No forex historical data loaded. Load first using <load>.[/red]\n"
)
@log_start_end(log=logger)
def call_quote(self, other_args: List[str]):
"""Process quote command."""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="quote",
description="Get current exchange rate quote",
)
ns_parser = parse_known_args_and_warn(parser, other_args)
if ns_parser:
if self.to_symbol and self.from_symbol:
av_view.display_quote(self.to_symbol, self.from_symbol)
else:
logger.error(
"Make sure both a 'to' symbol and a 'from' symbol are selected."
)
console.print(
"[red]Make sure both a 'to' symbol and a 'from' symbol are selected.[/red]\n"
)
# MENUS
@log_start_end(log=logger)
@check_api_key(["OANDA_ACCOUNT_TYPE", "OANDA_ACCOUNT", "OANDA_TOKEN"])
def call_oanda(self, _):
"""Enter Oanda menu."""
from gamestonk_terminal.forex.oanda.oanda_controller import OandaController
if self.to_symbol and self.from_symbol:
self.queue = self.load_class(
OandaController,
queue=self.queue,
)
else:
console.print("No currency pair data is loaded. Use 'load' to load data.\n")
@log_start_end(log=logger)
def call_ta(self, _):
"""Process ta command"""
from gamestonk_terminal.forex.technical_analysis.ta_controller import (
TechnicalAnalysisController,
)
# TODO: Play with this to get correct usage
if self.to_symbol and self.from_symbol and not self.data.empty:
self.queue = self.load_class(
TechnicalAnalysisController,
ticker=f"{self.from_symbol}/{self.to_symbol}",
source=self.source,
data=self.data,
start=self.data.index[0],
interval="",
queue=self.queue,
)
else:
console.print("No currency pair data is loaded. Use 'load' to load data.\n")
# HELP WANTED!
# TODO: Add news and reddit commands back
# behavioural analysis and exploratory data analysis would be useful in the
# forex menu. The examples of integration of the common ba and eda components
# into the stocks context can provide an insight on how this can be done.
# The earlier implementation did not work and was deleted in commit
# d0e51033f7d5d4da6386b9e0b787892979924dce
|
#!/usr/bin/env python
import os
def main():
"""Adds newlines back to every file, to make them PEP8 compliant."""
root_path = os.getcwd()
for dirpath, dirnames, filenames in os.walk(root_path):
for filename in filenames:
path = os.path.join(dirpath, filename)
if path.endswith('.py'):
with open(path, 'rb') as f:
contents = f.read()
if not contents.endswith('\n'):
with open(path, 'wb') as f:
f.write((contents + b'\n').lstrip())
if __name__ == '__main__':
main()
|
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit it manually
# noinspection PyPep8Naming
class SubstanceSourceMaterial_FractionDescriptionSchema:
"""
Source material shall capture information on the taxonomic and anatomical
origins as well as the fraction of a material that can result in or can be
modified to form a substance. This set of data elements shall be used to
define polymer substances isolated from biological matrices. Taxonomic and
anatomical origins shall be described using a controlled vocabulary as
required. This information is captured for naturally derived polymers ( .
starch) and structurally diverse substances. For Organisms belonging to the
Kingdom Plantae the Substance level defines the fresh material of a single
species or infraspecies, the Herbal Drug and the Herbal preparation. For
Herbal preparations, the fraction information will be captured at the
Substance information level and additional information for herbal extracts
will be captured at the Specified Substance Group 1 information level. See for
further explanation the Substance Class: Structurally Diverse and the herbal
annex.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = None,
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
include_modifierExtension: Optional[bool] = False,
use_date_for: Optional[List[str]] = None,
parent_path: Optional[str] = "",
) -> Union[StructType, DataType]:
"""
Source material shall capture information on the taxonomic and anatomical
origins as well as the fraction of a material that can result in or can be
modified to form a substance. This set of data elements shall be used to
define polymer substances isolated from biological matrices. Taxonomic and
anatomical origins shall be described using a controlled vocabulary as
required. This information is captured for naturally derived polymers ( .
starch) and structurally diverse substances. For Organisms belonging to the
Kingdom Plantae the Substance level defines the fresh material of a single
species or infraspecies, the Herbal Drug and the Herbal preparation. For
Herbal preparations, the fraction information will be captured at the
Substance information level and additional information for herbal extracts
will be captured at the Specified Substance Group 1 information level. See for
further explanation the Substance Class: Structurally Diverse and the herbal
annex.
id: Unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
modifierExtension: May be used to represent additional information that is not part of the basic
definition of the element and that modifies the understanding of the element
in which it is contained and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer can define an extension, there is a set of requirements that SHALL
be met as part of the definition of the extension. Applications processing a
resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
fraction: This element is capturing information about the fraction of a plant part, or
human plasma for fractionation.
materialType: The specific type of the material constituting the component. For Herbal
preparations the particulars of the extracts (liquid/dry) is described in
Specified Substance Group 1.
"""
if extension_fields is None:
extension_fields = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueUrl",
"valueReference",
"valueCodeableConcept",
"valueAddress",
]
from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.r4.complex_types.codeableconcept import (
CodeableConceptSchema,
)
if (
max_recursion_limit
and nesting_list.count("SubstanceSourceMaterial_FractionDescription")
>= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + [
"SubstanceSourceMaterial_FractionDescription"
]
my_parent_path = (
parent_path + ".substancesourcematerial_fractiondescription"
if parent_path
else "substancesourcematerial_fractiondescription"
)
schema = StructType(
[
# Unique id for the element within a resource (for internal references). This
# may be any string value that does not contain spaces.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the element. To make the use of extensions safe and manageable,
# there is a strict set of governance applied to the definition and use of
# extensions. Though any implementer can define an extension, there is a set of
# requirements that SHALL be met as part of the definition of the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# May be used to represent additional information that is not part of the basic
# definition of the element and that modifies the understanding of the element
# in which it is contained and/or the understanding of the containing element's
# descendants. Usually modifier elements provide negation or qualification. To
# make the use of extensions safe and manageable, there is a strict set of
# governance applied to the definition and use of extensions. Though any
# implementer can define an extension, there is a set of requirements that SHALL
# be met as part of the definition of the extension. Applications processing a
# resource are required to check for modifier extensions.
#
# Modifier extensions SHALL NOT change the meaning of any elements on Resource
# or DomainResource (including cannot change the meaning of modifierExtension
# itself).
StructField(
"modifierExtension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# This element is capturing information about the fraction of a plant part, or
# human plasma for fractionation.
StructField("fraction", StringType(), True),
# The specific type of the material constituting the component. For Herbal
# preparations the particulars of the extracts (liquid/dry) is described in
# Specified Substance Group 1.
StructField(
"materialType",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
if not include_modifierExtension:
schema.fields = [
c
if c.name != "modifierExtension"
else StructField("modifierExtension", StringType(), True)
for c in schema.fields
]
return schema
|
# encoding=utf-8
import unittest
from ..similarities import *
from .. import similarities as sims
from numpy.testing import assert_allclose
class TestSimilarities(unittest.TestCase):
def setUp(self):
self.X = [
[1, 0, 1, 0],
[1, 1, 0, 1],
[0, 1, 0, 0]
]
self.Y = [
[1, 0, 1, 0],
[1, 1, 0, 1],
[0, 1, 0, 0],
[0, 0, 0, 0]
]
def test_cosine_similarity(self):
result = [
[1 , 0.40824829, 0 , 0],
[0.40824829, 1 , 0.57735027, 0],
[0 , 0.57735027, 1 , 0]
]
similarities = cosine_similarity(self.X, self.Y)
assert_allclose(result, similarities)
def test_euclidean_similarity(self):
result = [
[np.inf, 0.57735027, 0.57735027, 0.70710678],
[0.57735027, np.inf, 0.70710678, 0.57735027,],
[0.57735027, 0.70710678, np.inf, 1]
]
similarities = euclidean_similarity(self.X, self.Y)
assert_allclose(result, similarities)
def test_jaccard_similarity(self):
result = [
[1, 0.25, 0, 0],
[0.25, 1, 0.33333333, 0],
[0, 0.33333333, 1, 0]
]
similarities = jaccard_similarity(self.X, self.Y)
assert_allclose(result, similarities)
def test_correlation_similarity(self):
# array([[1. , 0.21132487, 0.21132487, 0.5 ],
# [0.21132487, 1. , 0.66666667, 0.5 ],
# [0.21132487, 0.66666667, 1. , 0.5 ]])
result = sims._compute_similarities(correlation_similarity_1D, self.X, self.Y)
result[:, -1] = np.array([0.5, 0.5, 0.5])
X = np.array(self.X)
Y = np.array(self.Y)
similarities = correlation_similarity(X, Y)
assert_allclose(result, similarities)
def test_torch_cosine_similarity(self):
a = torch.randn(3, 2).double()
b = torch.randn(4, 2).double()
sim1 = torch_cosine_similarity(a, b)
sim2 = cosine_similarity(a.numpy(), b.numpy())
assert_allclose(sim1, sim2)
def test_calculate_doc_partial_similarities(self):
# a = [torch.DoubleTensor([[-0.66280855, -0.40342563]]),
# torch.DoubleTensor([[ 1.42738917, -2.08969945],
# [-1.20685194, -0.19919694]])]
# b = [torch.DoubleTensor([[-0.77491406, -0.39541403]]),
# torch.DoubleTensor([[ 0.79141788, 0.14895806],
# [ 0.38944222, -1.21501918]]),
# torch.DoubleTensor([[ 0.95401881, 0.7989287 ],
# [ 1.43606952, -0.20911604],
# [ 0.24571258, -2.08214752]])]
# cosine similarities
# [
# [
# tensor([[0.9972]], dtype=torch.float64),
# tensor([[-0.9356, 0.2344]], dtype=torch.float64),
# tensor([[-0.9887, -0.7704, 0.4162]], dtype=torch.float64)
# ],
# [
# tensor([[-0.1271], [ 0.9529]], dtype=torch.float64),
# tensor([[ 0.4016, 0.9585], [-0.9997, -0.1461]], dtype=torch.float64),
# tensor([[-0.0977, 0.6771, 0.8862], [-0.8610, -0.9529, 0.0461]], dtype=torch.float64)
# ]
# ]
a = [np.array([[-0.66280855, -0.40342563]]),
np.array([[ 1.42738917, -2.08969945],
[-1.20685194, -0.19919694]])]
b = [np.array([[-0.77491406, -0.39541403]]),
np.array([[ 0.79141788, 0.14895806],
[ 0.38944222, -1.21501918]]),
np.array([[ 0.95401881, 0.7989287 ],
[ 1.43606952, -0.20911604],
[ 0.24571258, -2.08214752]])]
c = [[1.0] * 1, [0.5] * 2]
# doc_sims = [
# [0.9972, 0.2344, 0.4162],
# [0.4129, 0.4062, 0.46615]
# ]
doc_sims = [
[0.99719368, 0.23438481, 0.41623218],
[0.41288687, 0.40621558, 0.46613011]
]
result = calculate_doc_partial_similarities(a, b, c, "cuda:0")
assert_allclose(doc_sims, result)
if __name__ == "__main__":
unittest.main()
|
from torch.utils.data import Dataset, DataLoader, Subset
from zipfile import BadZipFile
import os
from process_data import files_utils, mesh_utils, points_utils
import options
from constants import DATASET
from custom_types import *
import json
class MeshDataset(Dataset):
@property
def transforms(self):
return self.opt.transforms
@property
def recon(self) -> bool:
return self.opt.recon
def cache_path(self, idx) -> str:
return os.path.join(DATASET, self.opt.tag, f'{self.opt.info}_{idx:04d}.npy')
def data_path(self, idx) -> str:
return os.path.join(DATASET, self.opt.tag, f'{self.opt.tag}_{idx:04d}.npz')
def delete_cache(self):
if self.cache_length > 0:
for idx in range(len(self)):
cache_path = self.cache_path(idx)
if os.path.isfile(cache_path):
os.remove(cache_path)
def __del__(self):
self.delete_cache()
@staticmethod
def join2root(sub_name) -> str:
return os.path.join(DATASET, sub_name)
def get_taxonomy_models_paths(self):
with open(self.join2root('taxonomy.json'), 'r') as f:
metadata = json.load(f)
for info in metadata:
class_name = info['name'].split(',')[0].replace(' ', '_')
if class_name == self.opt.tag:
taxonomy_dir = self.join2root(info['synsetId'])
if os.path.isdir(taxonomy_dir):
return files_utils.collect(taxonomy_dir, '.obj', '.off')
def sample_sub_points(self, data: mesh_utils.MeshWrap):
p = self.opt.partial_range[0] + np.random.random() * (self.opt.partial_range[1] - self.opt.partial_range[0])
sub_mesh, sub_areas = mesh_utils.split_mesh_side(data.mesh(), data['face_centers'], data['face_areas'],
data['total_area'], p)
sub_points = mesh_utils.sample_on_mesh(sub_mesh, sub_areas, self.opt.partial_samples[1])
return sub_points
def get_sub_points(self, idx: int, data: mesh_utils.MeshWrap) -> V:
if self.cache_length < 1:
return self.sample_sub_points(data)
else:
cache_path = self.cache_path(idx)
if not os.path.isfile(cache_path):
sub_pc_data = [np.expand_dims(self.sample_sub_points(data), axis=0) for _ in range(self.cache_length)]
sub_pc_data = np.concatenate(sub_pc_data, axis=0)
np.save(cache_path[:-4], sub_pc_data)
else:
sub_pc_data = np.load(cache_path)
pc_idx = int(np.random.randint(0, self.cache_length))
return sub_pc_data[pc_idx]
def get_transformed_pc(self, idx: int, data: mesh_utils.MeshWrap, base_points: V) -> Tuple[VS, VS]:
partial = self.get_sub_points(idx, data)
partial, transforms = points_utils.apply_transforms(self.transforms, base_points, partial)
return partial, transforms
def __getitem__(self, idx):
data = self.load_mesh(idx)
mesh, face_areas = data.mesh(), data['face_areas']
points = mesh_utils.sample_on_mesh(mesh, face_areas, self.opt.partial_samples[0])
if self.recon or len(self.transforms) > 0:
pc_trans, transforms = self.get_transformed_pc(idx, data, points)
return [points] + [pc.astype(np.float32) for pc in pc_trans] + [tr.astype(np.float32) for tr in transforms]
else:
return points
def __len__(self):
return len(self.data_paths)
@staticmethod
def first_load(mesh_path: str, data_path: str) -> mesh_utils.MeshWrap:
vs, faces = mesh_utils.load_mesh(mesh_path)
hold = vs[:, 1].copy()
# swapping y and z
vs[:, 1] = vs[:, 2]
vs[:, 2] = hold
mesh = (vs, faces)
mesh = mesh_utils.to_unit(mesh)
face_areas, face_normals = mesh_utils.compute_face_areas(mesh)
face_centers = mesh_utils.compute_faces_centers(mesh)
data = {'vs': mesh[0], 'faces': mesh[1], 'face_areas': face_areas, 'face_normals': face_normals,
'total_area': face_areas.sum(), 'face_ne': mesh_utils.compute_face_ne(mesh),
'face_centers': face_centers}
np.savez_compressed(data_path, **data)
return mesh_utils.MeshWrap(data)
def load_mesh(self, idx: int) -> mesh_utils.MeshWrap:
if self.all_data[idx] is None:
requested_att = ['vs', 'faces', 'face_areas', 'face_normals', 'face_ne', 'total_area', 'face_centers']
base_path = self.data_paths[idx]
mesh_path = os.path.join(base_path[0], f'{base_path[1]}{base_path[2]}')
data_path = self.data_path(idx)
if os.path.isfile(data_path):
try:
data = np.load(data_path)
if sum([int(att not in data) for att in requested_att]) == 0:
self.all_data[idx] = mesh_utils.MeshWrap(dict(data))
except BadZipFile:
print("BadZipFile")
self.all_data[idx] = self.first_load(mesh_path, data_path)
else:
self.all_data[idx] = self.first_load(mesh_path, data_path)
return self.all_data[idx]
def __init__(self, opt: options.Options, cache_length:int):
super(MeshDataset, self).__init__()
self.opt = opt
files_utils.init_folders(self.data_path(0))
self.data_paths = self.get_taxonomy_models_paths()
self.cache_length = cache_length
self.all_data: List[Union[N, mesh_utils.MeshWrap]] = [None] * len(self)
self.delete_cache()
class AnotherLoaderWrap:
def __init__(self, base_loader, batch_size):
self.base_loader = base_loader
self.batch_size = batch_size
self.choices = np.arange(len(self.base_loader.dataset))
self.wrap_iter, self.counter = self.init_iter()
def __iter__(self):
return self.base_loader.__iter__()
def init_iter(self):
return self.__iter__(), len(self.base_loader.dataset)
def __next__(self):
if self.counter < 0:
self.wrap_iter, self.counter = self.init_iter()
self.counter = self.counter - self.batch_size
return next(self.wrap_iter)
def get_random_batch(self):
indices = np.random.choice(self.choices, self.batch_size, replace=False)
batch = [self.base_loader.dataset[idx] for idx in indices]
return indices, self.base_loader.collate_fn(batch)
def get_by_ids(self, *indices):
batch = [self.base_loader.dataset[idx] for idx in indices]
return self.base_loader.collate_fn(batch)
def __getitem__(self, idx):
return self.base_loader.dataset[idx]
def __len__(self):
return len(self.base_loader.dataset)
def get_loader(opt: options.Options, train=True) -> DataLoader:
dataset = MeshDataset(opt, 20)
ds_length = len(dataset)
if 'vae' not in opt.task:
splits_file = f'{DATASET}/{opt.tag}/{opt.tag}_split'
if os.path.isfile(splits_file + '.npy'):
ds_inds = np.load(splits_file + '.npy')
else:
ds_inds = np.arange(ds_length)
np.random.shuffle(ds_inds)
np.save(splits_file, ds_inds)
inds = {True: ds_inds[int(0.1 * ds_length):], False: ds_inds[:int(0.1 * ds_length)]}
dataset = Subset(dataset, inds[train])
loader = DataLoader(dataset, batch_size=opt.batch_size, num_workers=1 + (2 * train), shuffle=(train),
drop_last=(train))
print(f"{opt.tag}- {'train' if train else 'test'} dataset length is: {len(dataset)}")
return loader
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import os
from flask import Blueprint, render_template, request, redirect
from requests.exceptions import SSLError
from werkzeug.utils import secure_filename
from qpylib import qpylib
# pylint: disable=invalid-name
viewsbp = Blueprint('viewsbp', __name__, url_prefix='/')
CERTS_DIRECTORY = '/opt/app-root/store/certs'
@viewsbp.route('/')
@viewsbp.route('/index')
def index():
cert_app = get_certificate_management_app()
url = request.args.get('address')
if url is not None:
try:
response = requests.get(url)
except SSLError as ssl_error:
return render_template('index.html',
url=url,
result='An SSL error occurred!',
status_code = str(ssl_error),
cert_app=cert_app)
return render_template('index.html',
url=url,
status_code=response.status_code,
result=response.text, cert_app=cert_app
)
return render_template('index.html', cert_app=cert_app)
@viewsbp.route('/upload_cert', methods=['POST'])
def upload_cert():
if 'cert' not in request.files:
qpylib.log('no certificate file in upload request')
return redirect('/', code=303)
file = request.files['cert']
# If the user does not select a file, the browser also submits an empty part without filename
if file.filename == '':
qpylib.log('no certificate file in upload request')
return redirect('/', code=303)
filename = secure_filename(file.filename)
file.save(os.path.join(CERTS_DIRECTORY, filename))
refresh_certs()
return redirect('/', code=303)
def get_certificate_management_app():
params = {'filter': 'manifest(name)="QRadar Certificate Management" and application_state(status)="RUNNING"',
'fields': 'application_state'}
response = qpylib.REST(rest_action='GET',
request_url='/api/gui_app_framework/applications',
params=params)
if not response.status_code == 200:
qpylib.log('Failed to get Certificate Management App')
jsonResult = response.json()
address=""
if len(jsonResult) > 0:
for app_id in jsonResult:
cert_management_id = app_id['application_state']['application_id']
console_ip = qpylib.get_console_address()
address = "https://{0}/console/plugins/{1}/app_proxy/#/browse/uploadRoot".format(console_ip, cert_management_id)
return address
def refresh_certs():
os.system('sudo /opt/app-root/bin/update_ca_bundle.sh')
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import re
import subprocess
import time
from typing import Any, Dict, Iterator, List, Optional, Union
from airflow.configuration import conf as airflow_conf
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.security.kerberos import renew_from_kt
from airflow.utils.log.logging_mixin import LoggingMixin
try:
from airflow.kubernetes import kube_client
except ImportError:
pass
# pylint: disable=too-many-instance-attributes
class SparkSubmitHook(BaseHook, LoggingMixin):
"""
This hook is a wrapper around the spark-submit binary to kick off a spark-submit job.
It requires that the "spark-submit" binary is in the PATH or the spark_home to be
supplied.
:param conf: Arbitrary Spark configuration properties
:type conf: dict
:param conn_id: The connection id as configured in Airflow administration. When an
invalid connection_id is supplied, it will default to yarn.
:type conn_id: str
:param files: Upload additional files to the executor running the job, separated by a
comma. Files will be placed in the working directory of each executor.
For example, serialized objects.
:type files: str
:param py_files: Additional python files used by the job, can be .zip, .egg or .py.
:type py_files: str
:param: archives: Archives that spark should unzip (and possibly tag with #ALIAS) into
the application working directory.
:param driver_class_path: Additional, driver-specific, classpath settings.
:type driver_class_path: str
:param jars: Submit additional jars to upload and place them in executor classpath.
:type jars: str
:param java_class: the main class of the Java application
:type java_class: str
:param packages: Comma-separated list of maven coordinates of jars to include on the
driver and executor classpaths
:type packages: str
:param exclude_packages: Comma-separated list of maven coordinates of jars to exclude
while resolving the dependencies provided in 'packages'
:type exclude_packages: str
:param repositories: Comma-separated list of additional remote repositories to search
for the maven coordinates given with 'packages'
:type repositories: str
:param total_executor_cores: (Standalone & Mesos only) Total cores for all executors
(Default: all the available cores on the worker)
:type total_executor_cores: int
:param executor_cores: (Standalone, YARN and Kubernetes only) Number of cores per
executor (Default: 2)
:type executor_cores: int
:param executor_memory: Memory per executor (e.g. 1000M, 2G) (Default: 1G)
:type executor_memory: str
:param driver_memory: Memory allocated to the driver (e.g. 1000M, 2G) (Default: 1G)
:type driver_memory: str
:param keytab: Full path to the file that contains the keytab
:type keytab: str
:param principal: The name of the kerberos principal used for keytab
:type principal: str
:param proxy_user: User to impersonate when submitting the application
:type proxy_user: str
:param name: Name of the job (default airflow-spark)
:type name: str
:param num_executors: Number of executors to launch
:type num_executors: int
:param status_poll_interval: Seconds to wait between polls of driver status in cluster
mode (Default: 1)
:type status_poll_interval: int
:param application_args: Arguments for the application being submitted
:type application_args: list
:param env_vars: Environment variables for spark-submit. It
supports yarn and k8s mode too.
:type env_vars: dict
:param verbose: Whether to pass the verbose flag to spark-submit process for debugging
:type verbose: bool
:param spark_binary: The command to use for spark submit.
Some distros may use spark2-submit.
:type spark_binary: str
"""
# pylint: disable=too-many-arguments,too-many-locals,too-many-branches
def __init__(self,
conf: Optional[Dict[str, Any]] = None,
conn_id: str = 'spark_default',
files: Optional[str] = None,
py_files: Optional[str] = None,
archives: Optional[str] = None,
driver_class_path: Optional[str] = None,
jars: Optional[str] = None,
java_class: Optional[str] = None,
packages: Optional[str] = None,
exclude_packages: Optional[str] = None,
repositories: Optional[str] = None,
total_executor_cores: Optional[int] = None,
executor_cores: Optional[int] = None,
executor_memory: Optional[str] = None,
driver_memory: Optional[str] = None,
keytab: Optional[str] = None,
principal: Optional[str] = None,
proxy_user: Optional[str] = None,
name: str = 'default-name',
num_executors: Optional[int] = None,
status_poll_interval: int = 1,
application_args: Optional[List[Any]] = None,
env_vars: Optional[Dict[str, Any]] = None,
verbose: bool = False,
spark_binary: Optional[str] = None
) -> None:
super().__init__()
self._conf = conf or {}
self._conn_id = conn_id
self._files = files
self._py_files = py_files
self._archives = archives
self._driver_class_path = driver_class_path
self._jars = jars
self._java_class = java_class
self._packages = packages
self._exclude_packages = exclude_packages
self._repositories = repositories
self._total_executor_cores = total_executor_cores
self._executor_cores = executor_cores
self._executor_memory = executor_memory
self._driver_memory = driver_memory
self._keytab = keytab
self._principal = principal
self._proxy_user = proxy_user
self._name = name
self._num_executors = num_executors
self._status_poll_interval = status_poll_interval
self._application_args = application_args
self._env_vars = env_vars
self._verbose = verbose
self._submit_sp: Optional[Any] = None
self._yarn_application_id: Optional[str] = None
self._kubernetes_driver_pod: Optional[str] = None
self._spark_binary = spark_binary
self._connection = self._resolve_connection()
self._is_yarn = 'yarn' in self._connection['master']
self._is_kubernetes = 'k8s' in self._connection['master']
if self._is_kubernetes and kube_client is None:
raise RuntimeError(
"{} specified by kubernetes dependencies are not installed!".format(
self._connection['master']))
self._should_track_driver_status = self._resolve_should_track_driver_status()
self._driver_id: Optional[str] = None
self._driver_status: Optional[str] = None
self._spark_exit_code: Optional[int] = None
self._env: Optional[Dict[str, Any]] = None
def _resolve_should_track_driver_status(self) -> bool:
"""
Determines whether or not this hook should poll the spark driver status through
subsequent spark-submit status requests after the initial spark-submit request
:return: if the driver status should be tracked
"""
return ('spark://' in self._connection['master'] and
self._connection['deploy_mode'] == 'cluster')
def _resolve_connection(self) -> Dict[str, Any]:
# Build from connection master or default to yarn if not available
conn_data = {'master': 'yarn',
'queue': None,
'deploy_mode': None,
'spark_home': None,
'spark_binary': self._spark_binary or "spark-submit",
'namespace': None}
try:
# Master can be local, yarn, spark://HOST:PORT, mesos://HOST:PORT and
# k8s://https://<HOST>:<PORT>
conn = self.get_connection(self._conn_id)
if conn.port:
conn_data['master'] = "{}:{}".format(conn.host, conn.port)
else:
conn_data['master'] = conn.host
# Determine optional yarn queue from the extra field
extra = conn.extra_dejson
conn_data['queue'] = extra.get('queue', None)
conn_data['deploy_mode'] = extra.get('deploy-mode', None)
conn_data['spark_home'] = extra.get('spark-home', None)
conn_data['spark_binary'] = self._spark_binary or \
extra.get('spark-binary', "spark-submit")
conn_data['namespace'] = extra.get('namespace')
except AirflowException:
self.log.info(
"Could not load connection string %s, defaulting to %s",
self._conn_id, conn_data['master']
)
if 'spark.kubernetes.namespace' in self._conf:
conn_data['namespace'] = self._conf['spark.kubernetes.namespace']
return conn_data
def get_conn(self) -> Any:
pass
def _get_spark_binary_path(self) -> List[str]:
# If the spark_home is passed then build the spark-submit executable path using
# the spark_home; otherwise assume that spark-submit is present in the path to
# the executing user
if self._connection['spark_home']:
connection_cmd = [os.path.join(self._connection['spark_home'], 'bin',
self._connection['spark_binary'])]
else:
connection_cmd = [self._connection['spark_binary']]
return connection_cmd
def _mask_cmd(self, connection_cmd: Union[str, List[str]]) -> str:
# Mask any password related fields in application args with key value pair
# where key contains password (case insensitive), e.g. HivePassword='abc'
connection_cmd_masked = re.sub(
r"("
r"\S*?" # Match all non-whitespace characters before...
r"(?:secret|password)" # ...literally a "secret" or "password"
# word (not capturing them).
r"\S*?" # All non-whitespace characters before either...
r"(?:=|\s+)" # ...an equal sign or whitespace characters
# (not capturing them).
r"(['\"]?)" # An optional single or double quote.
r")" # This is the end of the first capturing group.
r"(?:(?!\2\s).)*" # All characters between optional quotes
# (matched above); if the value is quoted,
# it may contain whitespace.
r"(\2)", # Optional matching quote.
r'\1******\3',
' '.join(connection_cmd),
flags=re.I,
)
return connection_cmd_masked
def _build_spark_submit_command(self, application: str) -> List[str]:
"""
Construct the spark-submit command to execute.
:param application: command to append to the spark-submit command
:type application: str
:return: full command to be executed
"""
connection_cmd = self._get_spark_binary_path()
# The url of the spark master
connection_cmd += ["--master", self._connection['master']]
for key in self._conf:
connection_cmd += ["--conf", "{}={}".format(key, str(self._conf[key]))]
if self._env_vars and (self._is_kubernetes or self._is_yarn):
if self._is_yarn:
tmpl = "spark.yarn.appMasterEnv.{}={}"
# Allow dynamic setting of hadoop/yarn configuration environments
self._env = self._env_vars
else:
tmpl = "spark.kubernetes.driverEnv.{}={}"
for key in self._env_vars:
connection_cmd += [
"--conf",
tmpl.format(key, str(self._env_vars[key]))]
elif self._env_vars and self._connection['deploy_mode'] != "cluster":
self._env = self._env_vars # Do it on Popen of the process
elif self._env_vars and self._connection['deploy_mode'] == "cluster":
raise AirflowException(
"SparkSubmitHook env_vars is not supported in standalone-cluster mode.")
if self._is_kubernetes and self._connection['namespace']:
connection_cmd += ["--conf", "spark.kubernetes.namespace={}".format(
self._connection['namespace'])]
if self._files:
connection_cmd += ["--files", self._files]
if self._py_files:
connection_cmd += ["--py-files", self._py_files]
if self._archives:
connection_cmd += ["--archives", self._archives]
if self._driver_class_path:
connection_cmd += ["--driver-class-path", self._driver_class_path]
if self._jars:
connection_cmd += ["--jars", self._jars]
if self._packages:
connection_cmd += ["--packages", self._packages]
if self._exclude_packages:
connection_cmd += ["--exclude-packages", self._exclude_packages]
if self._repositories:
connection_cmd += ["--repositories", self._repositories]
if self._num_executors:
connection_cmd += ["--num-executors", str(self._num_executors)]
if self._total_executor_cores:
connection_cmd += ["--total-executor-cores", str(self._total_executor_cores)]
if self._executor_cores:
connection_cmd += ["--executor-cores", str(self._executor_cores)]
if self._executor_memory:
connection_cmd += ["--executor-memory", self._executor_memory]
if self._driver_memory:
connection_cmd += ["--driver-memory", self._driver_memory]
if self._keytab:
connection_cmd += ["--keytab", self._keytab]
if self._principal:
connection_cmd += ["--principal", self._principal]
if self._proxy_user:
connection_cmd += ["--proxy-user", self._proxy_user]
if self._name:
connection_cmd += ["--name", self._name]
if self._java_class:
connection_cmd += ["--class", self._java_class]
if self._verbose:
connection_cmd += ["--verbose"]
if self._connection['queue']:
connection_cmd += ["--queue", self._connection['queue']]
if self._connection['deploy_mode']:
connection_cmd += ["--deploy-mode", self._connection['deploy_mode']]
# The actual script to execute
connection_cmd += [application]
# Append any application arguments
if self._application_args:
connection_cmd += self._application_args
self.log.info("Spark-Submit cmd: %s", self._mask_cmd(connection_cmd))
return connection_cmd
def _build_track_driver_status_command(self) -> List[str]:
"""
Construct the command to poll the driver status.
:return: full command to be executed
"""
curl_max_wait_time = 30
spark_host = self._connection['master']
if spark_host.endswith(':6066'):
spark_host = spark_host.replace("spark://", "http://")
connection_cmd = [
"/usr/bin/curl",
"--max-time",
str(curl_max_wait_time),
"{host}/v1/submissions/status/{submission_id}".format(
host=spark_host,
submission_id=self._driver_id)]
self.log.info(connection_cmd)
# The driver id so we can poll for its status
if self._driver_id:
pass
else:
raise AirflowException(
"Invalid status: attempted to poll driver " +
"status but no driver id is known. Giving up.")
else:
connection_cmd = self._get_spark_binary_path()
# The url to the spark master
connection_cmd += ["--master", self._connection['master']]
# The driver id so we can poll for its status
if self._driver_id:
connection_cmd += ["--status", self._driver_id]
else:
raise AirflowException(
"Invalid status: attempted to poll driver " +
"status but no driver id is known. Giving up.")
self.log.debug("Poll driver status cmd: %s", connection_cmd)
return connection_cmd
def submit(self, application: str = "", **kwargs: Any) -> None:
"""
Remote Popen to execute the spark-submit job
:param application: Submitted application, jar or py file
:type application: str
:param kwargs: extra arguments to Popen (see subprocess.Popen)
"""
spark_submit_cmd = self._build_spark_submit_command(application)
if self._env:
env = os.environ.copy()
env.update(self._env)
kwargs["env"] = env
self._submit_sp = subprocess.Popen(spark_submit_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=-1,
universal_newlines=True,
**kwargs)
self._process_spark_submit_log(iter(self._submit_sp.stdout)) # type: ignore
returncode = self._submit_sp.wait()
# Check spark-submit return code. In Kubernetes mode, also check the value
# of exit code in the log, as it may differ.
if returncode or (self._is_kubernetes and self._spark_exit_code != 0):
if self._is_kubernetes:
raise AirflowException(
"Cannot execute: {}. Error code is: {}. Kubernetes spark exit code is: {}".format(
self._mask_cmd(spark_submit_cmd), returncode, self._spark_exit_code
)
)
else:
raise AirflowException(
"Cannot execute: {}. Error code is: {}.".format(
self._mask_cmd(spark_submit_cmd), returncode
)
)
self.log.debug("Should track driver: %s", self._should_track_driver_status)
# We want the Airflow job to wait until the Spark driver is finished
if self._should_track_driver_status:
if self._driver_id is None:
raise AirflowException(
"No driver id is known: something went wrong when executing " +
"the spark submit command"
)
# We start with the SUBMITTED status as initial status
self._driver_status = "SUBMITTED"
# Start tracking the driver status (blocking function)
self._start_driver_status_tracking()
if self._driver_status != "FINISHED":
raise AirflowException(
"ERROR : Driver {} badly exited with status {}"
.format(self._driver_id, self._driver_status)
)
def _process_spark_submit_log(self, itr: Iterator[Any]) -> None:
"""
Processes the log files and extracts useful information out of it.
If the deploy-mode is 'client', log the output of the submit command as those
are the output logs of the Spark worker directly.
Remark: If the driver needs to be tracked for its status, the log-level of the
spark deploy needs to be at least INFO (log4j.logger.org.apache.spark.deploy=INFO)
:param itr: An iterator which iterates over the input of the subprocess
"""
# Consume the iterator
for line in itr:
line = line.strip()
# If we run yarn cluster mode, we want to extract the application id from
# the logs so we can kill the application when we stop it unexpectedly
if self._is_yarn and self._connection['deploy_mode'] == 'cluster':
match = re.search('(application[0-9_]+)', line)
if match:
self._yarn_application_id = match.groups()[0]
self.log.info("Identified spark driver id: %s",
self._yarn_application_id)
# If we run Kubernetes cluster mode, we want to extract the driver pod id
# from the logs so we can kill the application when we stop it unexpectedly
elif self._is_kubernetes:
match = re.search(r'\s*pod name: ((.+?)-([a-z0-9]+)-driver)', line)
if match:
self._kubernetes_driver_pod = match.groups()[0]
self.log.info("Identified spark driver pod: %s",
self._kubernetes_driver_pod)
# Store the Spark Exit code
match_exit_code = re.search(r'\s*[eE]xit code: (\d+)', line)
if match_exit_code:
self._spark_exit_code = int(match_exit_code.groups()[0])
# if we run in standalone cluster mode and we want to track the driver status
# we need to extract the driver id from the logs. This allows us to poll for
# the status using the driver id. Also, we can kill the driver when needed.
elif self._should_track_driver_status and not self._driver_id:
match_driver_id = re.search(r'(driver-[0-9\-]+)', line)
if match_driver_id:
self._driver_id = match_driver_id.groups()[0]
self.log.info("identified spark driver id: %s", self._driver_id)
self.log.info(line)
def _process_spark_status_log(self, itr: Iterator[Any]) -> None:
"""
Parses the logs of the spark driver status query process
:param itr: An iterator which iterates over the input of the subprocess
"""
driver_found = False
# Consume the iterator
for line in itr:
line = line.strip()
# Check if the log line is about the driver status and extract the status.
if "driverState" in line:
self._driver_status = line.split(' : ')[1] \
.replace(',', '').replace('\"', '').strip()
driver_found = True
self.log.debug("spark driver status log: %s", line)
if not driver_found:
self._driver_status = "UNKNOWN"
def _start_driver_status_tracking(self) -> None:
"""
Polls the driver based on self._driver_id to get the status.
Finish successfully when the status is FINISHED.
Finish failed when the status is ERROR/UNKNOWN/KILLED/FAILED.
Possible status:
SUBMITTED
Submitted but not yet scheduled on a worker
RUNNING
Has been allocated to a worker to run
FINISHED
Previously ran and exited cleanly
RELAUNCHING
Exited non-zero or due to worker failure, but has not yet
started running again
UNKNOWN
The status of the driver is temporarily not known due to
master failure recovery
KILLED
A user manually killed this driver
FAILED
The driver exited non-zero and was not supervised
ERROR
Unable to run or restart due to an unrecoverable error
(e.g. missing jar file)
"""
# When your Spark Standalone cluster is not performing well
# due to misconfiguration or heavy loads.
# it is possible that the polling request will timeout.
# Therefore we use a simple retry mechanism.
missed_job_status_reports = 0
max_missed_job_status_reports = 10
# Keep polling as long as the driver is processing
while self._driver_status not in ["FINISHED", "UNKNOWN",
"KILLED", "FAILED", "ERROR"]:
# Sleep for n seconds as we do not want to spam the cluster
time.sleep(self._status_poll_interval)
self.log.debug("polling status of spark driver with id %s", self._driver_id)
poll_drive_status_cmd = self._build_track_driver_status_command()
status_process: Any = subprocess.Popen(poll_drive_status_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=-1,
universal_newlines=True
)
self._process_spark_status_log(iter(status_process.stdout))
returncode = status_process.wait()
if returncode:
if missed_job_status_reports < max_missed_job_status_reports:
missed_job_status_reports += 1
else:
raise AirflowException(
"Failed to poll for the driver status {} times: returncode = {}"
.format(max_missed_job_status_reports, returncode)
)
def _build_spark_driver_kill_command(self) -> List[str]:
"""
Construct the spark-submit command to kill a driver.
:return: full command to kill a driver
"""
# If the spark_home is passed then build the spark-submit executable path using
# the spark_home; otherwise assume that spark-submit is present in the path to
# the executing user
if self._connection['spark_home']:
connection_cmd = [os.path.join(self._connection['spark_home'],
'bin',
self._connection['spark_binary'])]
else:
connection_cmd = [self._connection['spark_binary']]
# The url to the spark master
connection_cmd += ["--master", self._connection['master']]
# The actual kill command
if self._driver_id:
connection_cmd += ["--kill", self._driver_id]
self.log.debug("Spark-Kill cmd: %s", connection_cmd)
return connection_cmd
def on_kill(self) -> None:
"""
Kill Spark submit command
"""
self.log.debug("Kill Command is being called")
if self._should_track_driver_status:
if self._driver_id:
self.log.info('Killing driver %s on cluster', self._driver_id)
kill_cmd = self._build_spark_driver_kill_command()
driver_kill = subprocess.Popen(kill_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.log.info("Spark driver %s killed with return code: %s",
self._driver_id, driver_kill.wait())
if self._submit_sp and self._submit_sp.poll() is None:
self.log.info('Sending kill signal to %s', self._connection['spark_binary'])
self._submit_sp.kill()
if self._yarn_application_id:
kill_cmd = "yarn application -kill {}" \
.format(self._yarn_application_id).split()
env = None
if self._keytab is not None and self._principal is not None:
# we are ignoring renewal failures from renew_from_kt
# here as the failure could just be due to a non-renewable ticket,
# we still attempt to kill the yarn application
renew_from_kt(self._principal, self._keytab, exit_on_fail=False)
env = os.environ.copy()
env["KRB5CCNAME"] = airflow_conf.get('kerberos', 'ccache')
yarn_kill = subprocess.Popen(kill_cmd,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.log.info("YARN app killed with return code: %s", yarn_kill.wait())
if self._kubernetes_driver_pod:
self.log.info('Killing pod %s on Kubernetes', self._kubernetes_driver_pod)
# Currently only instantiate Kubernetes client for killing a spark pod.
try:
import kubernetes
client = kube_client.get_kube_client()
api_response = client.delete_namespaced_pod(
self._kubernetes_driver_pod,
self._connection['namespace'],
body=kubernetes.client.V1DeleteOptions(),
pretty=True)
self.log.info("Spark on K8s killed with response: %s", api_response)
except kube_client.ApiException as e:
self.log.error("Exception when attempting to kill Spark on K8s:")
self.log.exception(e)
|
#!/usr/bin/python2
'''
Perform basic ELF security checks on a series of executables.
Exit status will be 0 if successful, and the program will be silent.
Otherwise the exit status will be 1 and it will log which executables failed which checks.
Needs `readelf` (for ELF) and `objdump` (for PE).
'''
from __future__ import division,print_function
import subprocess
import sys
import os
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump')
def check_ELF_PIE(executable):
'''
Check for position independent executable (PIE), allowing for address space randomization.
'''
p = subprocess.Popen([READELF_CMD, '-h', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.split('\n'):
line = line.split()
if len(line)>=2 and line[0] == 'Type:' and line[1] == 'DYN':
ok = True
return ok
def get_ELF_program_headers(executable):
'''Return type and flags for ELF program headers'''
p = subprocess.Popen([READELF_CMD, '-l', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
in_headers = False
count = 0
headers = []
for line in stdout.split('\n'):
if line.startswith('Program Headers:'):
in_headers = True
if line == '':
in_headers = False
if in_headers:
if count == 1: # header line
ofs_typ = line.find('Type')
ofs_offset = line.find('Offset')
ofs_flags = line.find('Flg')
ofs_align = line.find('Align')
if ofs_typ == -1 or ofs_offset == -1 or ofs_flags == -1 or ofs_align == -1:
raise ValueError('Cannot parse elfread -lW output')
elif count > 1:
typ = line[ofs_typ:ofs_offset].rstrip()
flags = line[ofs_flags:ofs_align].rstrip()
headers.append((typ, flags))
count += 1
return headers
def check_ELF_NX(executable):
'''
Check that no sections are writable and executable (including the stack)
'''
have_wx = False
have_gnu_stack = False
for (typ, flags) in get_ELF_program_headers(executable):
if typ == 'GNU_STACK':
have_gnu_stack = True
if 'W' in flags and 'E' in flags: # section is both writable and executable
have_wx = True
return have_gnu_stack and not have_wx
def check_ELF_RELRO(executable):
'''
Check for read-only relocations.
GNU_RELRO program header must exist
Dynamic section must have BIND_NOW flag
'''
have_gnu_relro = False
for (typ, flags) in get_ELF_program_headers(executable):
# Note: not checking flags == 'R': here as linkers set the permission differently
# This does not affect security: the permission flags of the GNU_RELRO program header are ignored, the PT_LOAD header determines the effective permissions.
# However, the dynamic linker need to write to this area so these are RW.
# Glibc itself takes care of mprotecting this area R after relocations are finished.
# See also http://permalink.gmane.org/gmane.comp.gnu.binutils/71347
if typ == 'GNU_RELRO':
have_gnu_relro = True
have_bindnow = False
p = subprocess.Popen([READELF_CMD, '-d', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
for line in stdout.split('\n'):
tokens = line.split()
if len(tokens)>1 and tokens[1] == '(BIND_NOW)' or (len(tokens)>2 and tokens[1] == '(FLAGS)' and 'BIND_NOW' in tokens[2]):
have_bindnow = True
return have_gnu_relro and have_bindnow
def check_ELF_Canary(executable):
'''
Check for use of stack canary
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.split('\n'):
if '__stack_chk_fail' in line:
ok = True
return ok
def get_PE_dll_characteristics(executable):
'''
Get PE DllCharacteristics bits
'''
p = subprocess.Popen([OBJDUMP_CMD, '-x', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
for line in stdout.split('\n'):
tokens = line.split()
if len(tokens)>=2 and tokens[0] == 'DllCharacteristics':
return int(tokens[1],16)
return 0
def check_PE_PIE(executable):
'''PIE: DllCharacteristics bit 0x40 signifies dynamicbase (ASLR)'''
return bool(get_PE_dll_characteristics(executable) & 0x40)
def check_PE_NX(executable):
'''NX: DllCharacteristics bit 0x100 signifies nxcompat (DEP)'''
return bool(get_PE_dll_characteristics(executable) & 0x100)
CHECKS = {
'ELF': [
('PIE', check_ELF_PIE),
('NX', check_ELF_NX),
('RELRO', check_ELF_RELRO),
('Canary', check_ELF_Canary)
],
'PE': [
('PIE', check_PE_PIE),
('NX', check_PE_NX)
]
}
def identify_executable(executable):
with open(filename, 'rb') as f:
magic = f.read(4)
if magic.startswith(b'MZ'):
return 'PE'
elif magic.startswith(b'\x7fELF'):
return 'ELF'
return None
if __name__ == '__main__':
retval = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
if etype is None:
print('%s: unknown format' % filename)
retval = 1
continue
failed = []
for (name, func) in CHECKS[etype]:
if not func(filename):
failed.append(name)
if failed:
print('%s: failed %s' % (filename, ' '.join(failed)))
retval = 1
except IOError:
print('%s: cannot open' % filename)
retval = 1
exit(retval)
|
import os
from dcos import constants
import pytest
from .common import config_set, exec_command, update_config
@pytest.fixture
def env():
r = os.environ.copy()
r.update({
constants.PATH_ENV: os.environ[constants.PATH_ENV],
constants.DCOS_CONFIG_ENV: os.path.join("tests", "data", "dcos.toml"),
})
return r
@pytest.yield_fixture(autouse=True)
def setup_env(env):
config_set("core.dcos_url", "https://dcos.snakeoil.mesosphere.com", env)
try:
yield
finally:
config_set("core.dcos_url", "http://dcos.snakeoil.mesosphere.com", env)
def test_dont_verify_ssl_with_env_var(env):
env[constants.DCOS_SSL_VERIFY_ENV] = 'false'
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'list'], env)
assert returncode == 0
assert stderr == b''
env.pop(constants.DCOS_SSL_VERIFY_ENV)
def test_dont_verify_ssl_with_config(env):
with update_config('core.ssl_verify', 'false', env):
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'list'], env)
assert returncode == 0
assert stderr == b''
def test_verify_ssl_without_cert_env_var(env):
env[constants.DCOS_SSL_VERIFY_ENV] = 'true'
with update_config('core.ssl_verify', None, env):
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'list'], env)
assert returncode == 1
assert stderr.decode('utf-8') == _ssl_error_msg()
env.pop(constants.DCOS_SSL_VERIFY_ENV)
def test_verify_ssl_without_cert_config(env):
with update_config('core.ssl_verify', 'true', env):
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'list'], env)
assert returncode == 1
assert stderr.decode('utf-8') == _ssl_error_msg()
def test_verify_ssl_with_bad_cert_env_var(env):
env[constants.DCOS_SSL_VERIFY_ENV] = 'tests/data/ssl/fake.pem'
with update_config('core.ssl_verify', None, env):
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'list'], env)
assert returncode == 1
assert stderr.decode('utf-8') == _ssl_error_msg()
env.pop(constants.DCOS_SSL_VERIFY_ENV)
def test_verify_ssl_with_bad_cert_config(env):
with update_config('core.ssl_verify', 'tests/data/ssl/fake.pem', env):
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'list'], env)
assert returncode == 1
assert stderr.decode('utf-8') == _ssl_error_msg()
def test_verify_ssl_with_good_cert_env_var(env):
env[constants.DCOS_SSL_VERIFY_ENV] = '/dcos-cli/adminrouter/snakeoil.crt'
with update_config('core.ssl_verify', None, env):
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'list'], env)
assert returncode == 0
assert stderr == b''
env.pop(constants.DCOS_SSL_VERIFY_ENV)
def test_verify_ssl_with_good_cert_config(env):
with update_config(
'core.ssl_verify', '/dcos-cli/adminrouter/snakeoil.crt', env):
returncode, stdout, stderr = exec_command(
['dcos', 'marathon', 'app', 'list'], env)
assert returncode == 0
assert stderr == b''
def _ssl_error_msg():
return (
"An SSL error occurred. To configure your SSL settings, please run: "
"`dcos config set core.ssl_verify <value>`\n"
"<value>: Whether to verify SSL certs for HTTPS or path to certs\n")
|
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import sys
import unittest
from tdda.constraints.testbase import *
from tdda.referencetest import ReferenceTestCase
try:
from tdda.constraints.pd.testpdconstraints import *
except ImportError:
print('Skipping Pandas tests', file=sys.stderr)
try:
from tdda.constraints.db.testdbconstraints import *
# The individual imports of the database driver libraries
# are now all protected with try...except blocks,
# so this try...except is probably now unnecessary.
except ImportError:
print('Skipping Database tests', file=sys.stderr)
if __name__ == '__main__':
ReferenceTestCase.main()
|
# coding: utf-8
# /*##########################################################################
# Copyright (C) 2016 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ############################################################################*/
__authors__ = ["V. Valls"]
__license__ = "MIT"
__date__ = "31/07/2017"
from numpy.distutils.misc_util import Configuration
def configuration(parent_package='', top_path=None):
config = Configuration('utils', parent_package, top_path)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(configuration=configuration)
|
from nltk.corpus.reader import CHILDESCorpusReader, NS, string_types
from nltk.corpus.reader.xmldocs import ElementTree
sent_node = './/{%s}u' % NS
word_node = './/{%s}w' % NS
word_pos_tag_node = './/{%s}c' % NS
replacement_node = './/{%s}w/{%s}replacement' % (NS, NS)
replaced_word_node = './/{%s}w/{%s}replacement/{%s}w' % (NS, NS, NS)
replaced_word_node2 = './/{%s}w/{%s}wk' % (NS, NS)
stem_node = './/{%s}stem' % NS
relation_node = './/{%s}s' % NS
inflection_node = './/{%s}mor/{%s}mw/{%s}mk' % (NS, NS, NS)
clitic_node = './/{%s}mor/{%s}mor-post/{%s}mw/{%s}stem' % (NS, NS, NS, NS)
clitic_pos_tag_node = './/{%s}mor/{%s}mor-post/{%s}gra' % (NS, NS, NS)
def add_inflection(xmlword, word):
xmlinfl = xmlword.find(inflection_node)
if xmlinfl.get('type') == 'sfx': # regular inflection
word += '-' + xmlinfl.text
elif xmlinfl.get('type') == 'sfxf': # irregular inflection
word += '&' + xmlinfl.text
return word
def get_pos_tag(xmlword):
xmlpos = xmlword.findall(word_pos_tag_node) # word's POS tag
xmlpos2 = xmlword.findall(relation_node) # relational information (e.g. "SUB" for subject)
if xmlpos2:
tag = xmlpos[0].text + ":" + xmlpos2[0].text
else:
tag = xmlpos[0].text
return tag
def get_replaced_word(xmlsent, xmlword):
if xmlsent.find(replacement_node):
xmlword = xmlsent.find(replaced_word_node)
elif xmlsent.find(replaced_word_node2):
xmlword = xmlsent.find(replaced_word_node2)
return xmlword
class ModifiedCHILDESCorpusReader(CHILDESCorpusReader):
"""
Modified (and somewhat cleaner than the NLTK version) '_get_words' method of the CHILDES corpus reader from the NLTK.
Modified to fetch clitics as separate lexical entries if you retrieve stemmed sentences,
e.g. by calling 'corpus_reader.tagged_sents(stem=True)'.
"""
def _get_words(self, fileid, speaker, sent, stem, relation, pos, strip_space, replace):
# ensure we have a list of speakers
if isinstance(speaker, string_types) and speaker != 'ALL':
speaker = [speaker]
xmldoc = ElementTree.parse(fileid).getroot()
# processing each sentence in xml doc
results = []
for xmlsent in xmldoc.findall(sent_node):
sents = []
# select speakers
if speaker == 'ALL' or xmlsent.get('who') in speaker:
# process each word
for xml_word in xmlsent.findall(word_node):
clitic_stem = None
# get replaced words
if replace:
xml_word = get_replaced_word(xmlsent, xml_word)
# get text
if xml_word.text:
word = xml_word.text
else:
word = ''
# strip tailing space
if strip_space:
word = word.strip()
# get stemmed words
if stem:
try:
xmlstem = xml_word.find(stem_node)
word = xmlstem.text
except AttributeError:
pass
# if there is an inflection
try:
word = add_inflection(xml_word, word)
except:
pass
# if there is a clitic
try:
xmlclitic = xml_word.find(clitic_node)
clitic_stem = xmlclitic.text
except AttributeError:
clitic_stem = ''
# get pos
if pos:
try:
tag = get_pos_tag(xml_word)
word = (word, tag)
except (AttributeError, IndexError):
word = (word, None)
if clitic_stem:
# add clitic's pos tag if there is one
# in the parent class method, this branch does not fetch the clitic -- this is changed here
clitic_pos = xml_word.find(clitic_pos_tag_node)
if clitic_pos is not None:
clitic_stem = (clitic_stem, clitic_pos.get('relation'))
else:
clitic_stem = (clitic_stem, None)
sents.append(word)
if clitic_stem:
sents.append(clitic_stem)
if sent:
results.append(sents)
else:
results.extend(sents)
return results
|
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch
from torch.nn import functional as F
models_urls = {
'101_voc': 'https://cloudstor.aarnet.edu.au/plus/s/Owmttk9bdPROwc6/download',
'18_imagenet': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'34_imagenet': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'50_imagenet': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'152_imagenet': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'101_imagenet': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
}
def maybe_download(model_name, model_url, model_dir=None, map_location=None):
import os, sys
from six.moves import urllib
if model_dir is None:
torch_home = os.path.expanduser(os.getenv('TORCH_HOME', '~/.torch'))
model_dir = os.getenv('TORCH_MODEL_ZOO', os.path.join(torch_home, 'models'))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
filename = '{}.pth.tar'.format(model_name)
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
url = model_url
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
urllib.request.urlretrieve(url, cached_file)
return torch.load(cached_file, map_location=map_location)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
def conv3x3_bn(in_channel, out_channel):
return nn.Sequential(nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=1, padding=1, bias=False),
nn.ReLU(inplace=True))
class MultiResolutionFuse(nn.Module):
def __init__(self, in_size, out_size):
super(MultiResolutionFuse, self).__init__()
self.in_size=in_size
self.out_size=out_size
self.conv = nn.Conv2d(in_size, out_size, kernel_size=1, stride=1, bias=False)
def forward(self, input_low, input_high):
high_size = input_high.size()[2:]
# low channel usually > high channel
if self.in_size != self.out_size:
input_low = self.conv(input_low)
upsample_low = F.upsample(input_low, high_size, mode='bilinear')
cat = torch.cat([upsample_low, input_high], dim=1)
return cat
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class RefineBlock(nn.Module):
def __init__(self, in_channel):
super(RefineBlock, self).__init__()
self.c1 = nn.Conv2d(in_channel, 512,kernel_size=1, stride=1, padding=0, bias=False)
self.c3_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False)
self.bn = nn.BatchNorm2d(512)
self.relu = nn.ReLU(inplace=True)
self.c3_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False)
def forward(self, x):
x1 = self.c1(x)
x = self.c3_1(x1)
x = self.bn(x)
x = self.relu(x)
x = self.c3_2(x)
out = x1 + x
return out
class FPA(nn.Module):
def __init__(self, in_channel, out_channel):
super(FPA, self).__init__()
self.c7_1 = nn.Conv2d(in_channel, out_channel, kernel_size=7, stride=1, padding=3, bias=False)
self.c5_1 = nn.Conv2d(in_channel, out_channel, kernel_size=5, stride=1, padding=2, bias=False)
self.c3_1 = nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=1, padding=1, bias=False)
self.c7_2 = nn.Conv2d(out_channel, out_channel, kernel_size=7, stride=1, padding=3, bias=False)
self.c5_2 = nn.Conv2d(out_channel, out_channel, kernel_size=5, stride=1, padding=2, bias=False)
self.c3_2 = nn.Conv2d(out_channel, out_channel, kernel_size=3, stride=1, padding=1, bias=False)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.c1_gpb = nn.Conv2d(in_channel, out_channel, kernel_size=1, bias=False)
self.bn = nn.BatchNorm2d(out_channel)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
input_size = x.size()[2:]
x7_1 = self.c7_1(x)
x7_1 = self.bn(x7_1)
x7_1 = self.relu(x7_1)
x7_2 = self.c7_2(x7_1)
x7_2 = self.bn(x7_2)
x5_1 = self.c5_1(x)
x5_1 = self.bn(x5_1)
x5_1 = self.relu(x5_1)
x5_2 = self.c5_2(x5_1)
x5_2 = self.bn(x5_2)
x3_1 = self.c3_1(x)
x3_1 = self.bn(x3_1)
x3_1 = self.relu(x3_1)
x3_2 = self.c3_2(x3_1)
x3_2 = self.bn(x3_2)
x_gp = self.avg_pool(x)
x_gp = self.c1_gpb(x_gp)
x_gp = self.bn(x_gp)
x_gp = F.upsample(x_gp, size=input_size, mode='bilinear')
out = torch.cat([x_gp, x7_2, x5_2, x3_2], dim=1)
return out
# mv2_5+ l3/l4/classifier dropout
class MV2_5_1_ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
super(MV2_5_1_ResNet, self).__init__()
self.do = nn.Dropout(p=0.5)
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.rb1_1 = RefineBlock(256)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.rb2_1 = RefineBlock(512)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.rb3_1 = RefineBlock(1024)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.rb4_1 = RefineBlock(2048)
# self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# self.fc = nn.Linear(512 * block.expansion, num_classes)
# only for >=res50
# self.fpa=FPA(2048,512)
self.fpa = FPA(512, 512)
self.rb4_2 = RefineBlock(512 * 4)
self.fuse43 = MultiResolutionFuse(512, 512)
# self.post_proc43 = conv3x3_bn(512*2,512)
self.rb3_2 = RefineBlock(512 * 2)
self.fuse32 = MultiResolutionFuse(512, 512)
self.rb2_2 = RefineBlock(512 * 2)
# self.post_proc32 = conv3x3_bn(512)
self.fuse21 = MultiResolutionFuse(512, 512)
self.rb1_2 = RefineBlock(512 * 2)
# self.post_proc21 = conv3x3_bn(512)
self.class_conv = nn.Conv2d(512, num_classes, kernel_size=3, stride=1,
padding=1, bias=True)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
ori_size = x.size()[2:]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
l1 = self.layer1(x)
l2 = self.layer2(l1)
l3 = self.layer3(l2)
l4 = self.layer4(l3)
l3=self.do(l3)
l4=self.do(l4)
l1 = self.rb1_1(l1)
l2 = self.rb2_1(l2)
l3 = self.rb3_1(l3)
l4 = self.rb4_1(l4)
l4 = self.fpa(l4)
l4=self.rb4_2(l4)
x_fuse43 = self.fuse43(l4, l3)
x_fuse43=self.rb3_2(x_fuse43)
x_fuse32 = self.fuse32(x_fuse43, l2)
x_fuse32=self.rb2_2(x_fuse32)
x_fuse21 = self.fuse21(x_fuse32, l1)
x_fuse21=self.rb1_2(x_fuse21)
x_fuse21=self.do(x_fuse21)
x = self.class_conv(x_fuse21)
x = F.upsample(x, ori_size, mode='bilinear')
return x
def MV2_5_1_ResNet18(num_classes, pretrained=False, **kwargs):
"""Constructs a MV1_ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = MV2_5_1_ResNet(BasicBlock, [2, 2, 2, 2], **kwargs, num_classes=num_classes)
if pretrained:
key = '18_imagenet'
url = models_urls[key]
model.load_state_dict(maybe_download(key, url), strict=False)
return model
def MV2_5_1_ResNet34(num_classes, pretrained=False, **kwargs):
"""Constructs a MV1_ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = MV2_5_1_ResNet(BasicBlock, [3, 4, 6, 3], **kwargs, num_classes=num_classes)
if pretrained:
key = '34_imagenet'
url = models_urls[key]
model.load_state_dict(maybe_download(key, url), strict=False)
return model
def MV2_5_1_ResNet50(num_classes, pretrained=True, **kwargs):
"""Constructs a MV1_ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = MV2_5_1_ResNet(Bottleneck, [3, 4, 6, 3], **kwargs, num_classes=num_classes)
if pretrained:
key = '50_imagenet'
url = models_urls[key]
model.load_state_dict(maybe_download(key, url), strict=False)
print("load imagenet res50")
return model
def MV2_5_1_ResNet101(num_classes, pretrained=False, **kwargs):
"""Constructs a MV1_ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = MV2_5_1_ResNet(Bottleneck, [3, 4, 23, 3], **kwargs, num_classes=num_classes)
if pretrained:
key = '101_imagenet'
url = models_urls[key]
model.load_state_dict(maybe_download(key, url), strict=False)
return model
def MV2_5_1_ResNet152(num_classes, pretrained=False, **kwargs):
"""Constructs a MV1_ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = MV2_5_1_ResNet(Bottleneck, [3, 8, 36, 3], **kwargs, num_classes=num_classes)
if pretrained:
key = '152_imagenet'
url = models_urls[key]
model.load_state_dict(maybe_download(key, url), strict=False)
return model
|
from django.conf.urls import url
from AiSHABot import views
urlpatterns = [
url(r'^7a8bc5f20d6c86b3021a74a4a1bca1bbe411ea6b9f04628f6a/?$', views.AiSHAView.as_view()),
url(r'^privacy', views.privacy)
]
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
static = pd.read_csv("CCG_AIS_Static_Data_2018-05-01.csv")
cs_countries = pd.read_csv("CallSignSeriesRanges-1cc49d48-935c-4514-9ba2-3aabef92c7aa.csv")
cs_countries[cs_countries['Series'].str.contains("VGdd")].shape[0]
static = static.drop_duplicates(["Region","Station_Location","AIS_Channel","AIS_Class","Message_Type","Repeat_Indicator","MMSI","IMO_number","Call_Sign","Vessel_Name","Type_of_Ship_and_Cargo","Dimension_to_Bow_meters","Dimension_to_Stern_meters","Dimension_to_Port_meters","Dimension_to_Starboard_meters","Vessel_Length_meters","Vessel_Width_meters","Draught_decimeters","Destination"], keep="last")
# allocate vessel to country by call sign
def identify_country(cs_countries,cs):
cs = cs[0:2]
if cs.isalnum():
match = cs_countries[(cs_countries['Series'].str[0:2]).str.match(str(cs))]
if match.shape[0] != 0:
return match["Allocated to"].iloc[0]
else:
return 'undefined'
return 'undefined'
static['country'] = static.apply(lambda x: identify_country(cs_countries, str(x['Call_Sign'])), axis=1)
identify_country(cs_countries,'Z3Z')
|
import pandas as pd
import numpy as np
def unprocessed(csv_file):
df = pd.read_csv('../data/raw/mars-weather.csv')
return df
def load_and_process(csv_file):
df = pd.read_csv('../data/raw/mars-weather.csv')
df1=(df.copy().drop(['atmo_opacity','wind_speed','id'], axis=1)
.rename(columns={"terrestrial_date":"earth_date"})
.dropna(axis=0))
conditions = [
(df1['ls'] >= 0)&(df1['ls'] < 90),
(df1['ls'] >= 90) & (df1['ls'] < 180),
(df1['ls'] >= 180) & (df1['ls'] < 270),
(df1['ls'] >= 270) & (df1['ls'] <=360)
]
vals= ['Autumn','Winter','Spring','Summer']
df1['Season'] = np.select(conditions, vals)
return df1
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
from .action import SubmitFiles
|
#!/usr/bin/env python
#
# Copyright (c) 2016, Nest Labs, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class SED_6_1_2(HarnessCase):
suite = 4
case = '6 1 2'
golden_devices_required = 2
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
import sys
import os
import subprocess
import errno
import multiprocessing
from multiprocessing import Process
import time
import argparse
#NSE Documentation
#Running
#dns-cache-snoop: Performs DNS cache snooping against DNS dns-cache-snoop.mode=timed,dns-cache-snoop.domains={host1,host2,host3}
#dns-check-zone: Checks DNS zone config against best practices dns-check-zone.domain=example.com
#dns-ip6-arpa-scan: Performs reverse lookup of IPv6 using special technique prefix=2001:0DB8::/48
#dns-nsec-enum: Enumerate DNS using the DNSSEC NSEC-walking technique dns-nsec-enum.domains=example.com
#dns-nsec3-enum: Tries to enum domain names from DNS server that supports DNSSEC NSEC3 dns-nsec3-enum.domains=example.com
#dns-nsid: Retrieves information from DNS by requesting nameserver
# ID and asking for its id.server and version.bind values
#dns-random-srcport: Check DNS for predictable-port recursion Vuln
#dns-random-txid: Check DNS for predictable TXID DNS recursion Vuln
#dns-recursion: Checks if DNS allows queries for third-party names
#dns-service-discovery: Attempts to discover target hosts services using DNS
#dns-srv-enum: Enumerates various common SRV records for a given domain name dns-srv-enum.domain='example.com'
#dns-update: Perform dynamic DNS update without authentication dns-update.hostname=foo.example.com,dns-update.ip=192.0.2.1
#dns-zone-transfer: Requests a zone transfer from DNS server
# dns-zone-transfer.domain
# dns-zone-transfer.server
# dns-zone-transfer.port
#whois-domain: Queries whois.iana.org,
#Not running
#dns-blacklist: Checks target IP addresses against multiple DNS anti-spam and other lists
#dns-brute: Enum DNS by brute force
#dns-client-subnet-scan: Perform domain lookup using the edns-client-subnet option
#dns-fuzz: Launch DNS fuzzing attack against DNS
#dns-zeustracker: Check if IP range is part of Zeus
def doDNSNmapTCP():
print "INFO: Starting nmap dnsrecon for %s and TCP %s" % (ip_address, port)
subprocess.check_output(['nmap','-n','-sV','-Pn','-vv','-sT','-p',port,'--script','dns-cache-snoop,dns-check-zone,dns-ip6-arpa-scan,dns-nsec-enum,dns-nsec3-enum,dns-nsid,dns-random-srcport,dns-random-txid,dns-recursion,dns-service-discovery,dns-srv-enum,dns-update,dns-zone-transfer,whois-domain','--script-args','dns-check-zone.domain=%s,dns-nsec-enum.domains=%s,dns-nsec3-enum-domains=%s,dns-srv-enum.domain=%s,dns-zone-transfer.domain=%s' % (domain, domain, domain, domain, domain),'-oA','%s/%s_dns_TCP' % (BASE, ip_address),ip_address])
print "INFO: Finished nmap dnsrecon for %s and TCP %s" % (ip_address, port)
return
def doDNSNmapUDP():
print "INFO: Starting nmap dnsrecon for %s and UDP %s" % (ip_address, port)
subprocess.check_output(['nmap','-n','-sV','-Pn','-vv','-sU','-p',port,'--script','dns-cache-snoop,dns-check-zone,dns-ip6-arpa-scan,dns-nsec-enum,dns-nsec3-enum,dns-nsid,dns-random-srcport,dns-random-txid,dns-recursion,dns-service-discovery,dns-srv-enum,dns-update,dns-zone-transfer,whois-domain','--script-args','dns-check-zone.domain=%s,dns-nsec-enum.domains=%s,dns-nsec3-enum-domains=%s,dns-srv-enum.domain=%s,dns-zone-transfer.domain=%s' % (domain, domain, domain, domain, domain),'-oA','%s/%s_dns_UDP' % (BASE, ip_address),ip_address])
print "INFO: Finished nmap dnsrecon for %s and UDP %s" % (ip_address, port)
return
# def doNMBLookup():
# HOSTNAME = "nmblookup -A %s | grep '<00>' | grep -v '<GROUP>' | cut -d' ' -f1" % (ip_address) #grab the hostname
# host = subprocess.check_output(HOSTNAME, shell=True).strip()
# print "INFO: Attempting Domain Transfer on " + host
# ZT = "dig @%s.thinc.local thinc.local axfr" % (host)
# ztresults = subprocess.check_output(ZT, shell=True)
# if "failed" in ztresults:
# print "INFO: Zone Transfer failed for " + host
# else:
# print "[*] Zone Transfer successful for " + host + "(" + ip_address + ")!!! [see output file]"
# outfile = "results/exam/" + ip_address+ "_zonetransfer.txt"
# dnsf = open(outfile, "w")
# dnsf.write(ztresults)
# dnsf.close
#makedir function from https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
#Compatible with Python >2.5, but there is a more advanced function for python 3.5
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: #Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Rough script to handle simple dns enumeration. Usage: dnsrecon.py {--domain} target')
parser.add_argument('target', help="Target IP")
parser.add_argument('--domain', help="Target Domain")
parser.add_argument('--port', default='53', help= "Port, default 53")
args = parser.parse_args()
ip_address = args.target
port = args.port
domain = args.domain
BASE = '/root/scripts/recon_enum/results/exam/dns'
mkdir_p(BASE)
doDNSNmapTCP()
doDNSNmapUDP()
|
from fastai.text import *
import fire
BOS = 'xbos' # beginning-of-sentence tag
FLD = 'xfld' # data field tag
BOS_LABEL = '_bos_'
PAD = '_pad_'
re1 = re.compile(r' +')
def read_file(filepath):
assert os.path.exists(filepath)
sentences = []
labels = []
with open(filepath, encoding='utf-8') as f:
sentence = [BOS]
sentence_labels = [BOS_LABEL]
for line in f:
if line == '\n':
sentences.append(sentence)
labels.append(sentence_labels)
sentence = [BOS] # use xbos as the start of sentence token
sentence_labels = [BOS_LABEL]
else:
sentence.append(line.split()[0].lower())
# label is generally in the last column
sentence_labels.append(line.split()[-1])
if sentence: # some files, e.g. NER end on an empty line
sentences.append(sentence)
labels.append(sentence_labels)
return sentences, labels
def create_toks(prefix, max_vocab=30000, min_freq=1):
PATH = f'data/nlp_seq/{prefix}/'
names = {}
if prefix == 'ner':
names['train'] = 'train.txt'
names['val'] = 'valid.txt'
names['test'] = 'test.txt'
else:
raise ValueError(f'Filenames for {prefix} have to be added first.')
paths = {}
for split in ['train', 'val', 'test']:
paths[split] = f'{PATH}{names[split]}'
print(f'prefix {prefix} max_vocab {max_vocab} min_freq {min_freq}')
os.makedirs(f'{PATH}tmp', exist_ok=True)
trn_tok, trn_labels = read_file(paths['train'])
val_tok, val_labels = read_file(paths['val'])
test_tok, test_labels = read_file(paths['test'])
for trn_t, trn_l in zip(trn_tok[:5], trn_labels[:5]):
print('Sentence:', trn_t, 'labels:', trn_l)
print(f'# of train: {len(trn_tok)}, # of val: {len(val_tok)},'
f'# of test: {len(test_tok)}')
freq = Counter(p for o in trn_tok for p in o)
print(freq.most_common(25))
itos = [o for o, c in freq.most_common(max_vocab) if c > min_freq]
itos.insert(0, PAD)
itos.insert(0, '_unk_')
stoi = collections.defaultdict(lambda: 0,
{v: k for k, v in enumerate(itos)})
print(len(itos))
trn_ids = np.array([[stoi[o] for o in p] for p in trn_tok])
val_ids = np.array([[stoi[o] for o in p] for p in val_tok])
test_ids = np.array([[stoi[o] for o in p] for p in test_tok])
# map the labels to ids
freq = Counter(p for o in trn_labels for p in o)
print(freq)
itol = [l for l, c in freq.most_common()]
itol.insert(1, PAD) # insert padding label at index 1
print(itol)
ltoi = {l: i for i, l in enumerate(itol)}
trn_lbl_ids = np.array([[ltoi[o] for o in p] for p in trn_labels])
val_lbl_ids = np.array([[ltoi[o] for o in p] for p in val_labels])
test_lbl_ids = np.array([[ltoi[o] for o in p] for p in test_labels])
ids_joined = np.array([[stoi[o] for o in p] for p in trn_tok + val_tok + test_tok])
val_ids_joined = ids_joined[int(len(ids_joined)*0.9):]
ids_joined = ids_joined[:int(len(ids_joined)*0.9)]
np.save(f'{PATH}tmp/trn_ids.npy', trn_ids)
np.save(f'{PATH}tmp/val_ids.npy', val_ids)
np.save(f'{PATH}tmp/test_ids.npy', test_ids)
np.save(f'{PATH}tmp/lbl_trn.npy', trn_lbl_ids)
np.save(f'{PATH}tmp/lbl_val.npy', val_lbl_ids)
np.save(f'{PATH}tmp/lbl_test.npy', test_lbl_ids)
pickle.dump(itos, open(f'{PATH}tmp/itos.pkl', 'wb'))
pickle.dump(itol, open(f'{PATH}tmp/itol.pkl', 'wb'))
np.save(f'{PATH}tmp/trn_lm_ids.npy', ids_joined)
np.save(f'{PATH}tmp/val_lm_ids.npy', val_ids_joined)
if __name__ == '__main__': fire.Fire(create_toks)
|
# -*- coding: utf-8 -*-
#
# BGPStream documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 22 09:31:35 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'breathe'
]
breathe_projects = { "bgpstream": "doxygen/xml/" }
#breathe_projects_source = {
# "bgpstream" : ( "../lib", ["bgpstream.h"] )
# }
breathe_default_project = "bgpstream"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['sphinx/templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'BGPStream'
copyright = u'2015, The Regents of the University of California.'
author = u'Alistair King, Chiara Orsini'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['sphinx/build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'BGPStreamdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'BGPStream.tex', u'BGPStream Documentation',
u'Alistair King, Chiara Orsini', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'bgpstream', u'BGPStream Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'BGPStream', u'BGPStream Documentation',
author, 'BGPStream', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
from drf_yasg import openapi
import winter
from winter_openapi import QueryParametersInspector
from winter.web.routing import get_route
class ControllerForQueryParameter:
@winter.route_get('{?valid_query_param,mapped_query_param}')
@winter.map_query_parameter('mapped_query_param', to='invalid_query_param')
def simple_method(
self,
valid_query_param: int,
invalid_query_param: object,
):
pass
def test_query_parameter_inspector():
inspector = QueryParametersInspector()
route = get_route(ControllerForQueryParameter.simple_method)
# Act
parameters = inspector.inspect_parameters(route)
# Assert
assert len(parameters) == 2, parameters
parameter_by_name = {parameter.name: parameter for parameter in parameters}
valid_parameter = parameter_by_name['valid_query_param']
assert valid_parameter.type == openapi.TYPE_INTEGER
assert valid_parameter.description == ''
invalid_parameter = parameter_by_name['mapped_query_param']
assert invalid_parameter.type == openapi.TYPE_STRING
assert invalid_parameter.description == '(Note: parameter type can be wrong)'
|
"""Initial Migration
Revision ID: 90a0a2f4afc9
Revises:
Create Date: 2021-11-14 15:14:33.638202
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '90a0a2f4afc9'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=255), nullable=True),
sa.Column('firstname', sa.String(length=255), nullable=True),
sa.Column('lastname', sa.String(length=255), nullable=True),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('bio', sa.String(length=5000), nullable=True),
sa.Column('pass_secure', sa.String(length=255), nullable=True),
sa.Column('date_joined', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=False)
op.create_table('pitches',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('pitch_title', sa.String(), nullable=True),
sa.Column('pitch_content', sa.String(length=1000), nullable=True),
sa.Column('category', sa.String(), nullable=True),
sa.Column('posted', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('likes', sa.Integer(), nullable=True),
sa.Column('dislikes', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('comment', sa.String(length=1000), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('pitch', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['pitch'], ['pitches.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('comments')
op.drop_table('pitches')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
# ### end Alembic commands ###
|
import glob
import logging
import os
import subprocess
from matplotlib import pyplot as plt
import numpy as np
from grakel import Graph
from grakel.datasets import fetch_dataset
try:
from clogging.CustomFormatter import CustomFormatter
except:
from ..clogging.CustomFormatter import CustomFormatter
class Classifier():
def __init__(self,path, name, threshold):
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(CustomFormatter())
self.log = logging.getLogger("Classifier")
self.log.setLevel(logging.INFO)
self.log.addHandler(ch)
self.log.propagate = False
self.name = name
self.threshold = threshold
self.gspan_path = path.replace("ToolChainClassifier","submodules/SEMA-quickspan/build/")
self.dico_precomputed = []
self.dataset_len = 0
self.train_dataset = None
self.val_dataset = None
self.test_dataset = None
self.stat_dataset = None
# Classify malware use the model
# in : path = list of samples to classify
# TODO: not good way to write that ch
# TODO
# if path = none -> load model and test the path
# else: apply on test test from training set
# same for detection
def classify(self,path=None):
"""
Sort by familly
"""
pass
def detection(self,path=None):
"""
Malware vs cleanware
"""
pass
# Train the model
def train(self,path):
pass
def read_mapping(self,path):
map_file = open(path,'r')
mapping = {}
for line in map_file:
tab = line.split('\n')[0].split(' ')
mapping[int(tab[0])] = tab[1]
map_file.close()
return mapping
def manual_kernel(self,g1, g2):
return self.merge_in_gs(g1,g2,'test.gs')
def manual_kernel_gram_matrix(self,G1, G2):
gram_matrix = np.zeros((len(G1), len(G2)))
for i, g1 in enumerate(G1):
for j, g2 in enumerate(G2):
gram_matrix[i, j] = self.manual_kernel(g1, g2)
#self.log.info(gram_matrix[i, j])
return gram_matrix
def read_mapping_inverse(self,path):
map_file = open(path,'r')
mapping = {}
for line in map_file:
tab = line.split('\n')[0].split(' ')
mapping[tab[1]] = int(tab[0])
map_file.close()
return mapping
def read_gs(self,path,mapping,lonely=True):
f = open(path,'r')
vertices = {}
nodes = {}
edges = {}
edge_labels = {}
c_edges = 1
for line in f:
if line.startswith("t"):
pass
if line.startswith("v"):
sp = line.split(" ")
v = int(sp[1])
vertices[v] = []
v_label = int(sp[2])
nodes[v] = mapping[v_label]
if line.startswith("e"):
#self.log.info(line)
sp = line.split(" ")
v1 = int(sp[1])
v2 = int(sp[2])
edges[tuple((v1,v2))] = 1
edge_labels[tuple((v1,v2))] = sp[3].replace('\n','')
c_edges = c_edges + 1
vertices[v1].append(v2)
vertices[v2].append(v1)
if not lonely:
#STUFF below to delete lonely nodes
de = []
count = 0
vertices_ok = {}
nodes_ok = {}
map_clean = {}
# find index of lonely node
for key in vertices:
if not vertices[key]:
de.append(key)
else:
map_clean[key] = count
count = count +1
#delete them
for key in de:
del vertices[key]
for key in vertices:
local_dic = {}
for v in vertices[key]:
local_dic[map_clean[v]] = 1.0
#self.log.info(local_dic)
vertices_ok[map_clean[key]] = local_dic
nodes_ok[map_clean[key]] = nodes[key]
if len(vertices_ok) <= 1:
self.log.info(vertices_ok)
G = Graph(vertices_ok,node_labels=nodes_ok,edge_labels=edge_labels)
else:
G = Graph(vertices,node_labels=nodes,edge_labels=edge_labels)
f.close()
return G
# TODO check if refactor possible with WL
def merge_in_gs(self,g1,g2,filename):
#import pdb; pdb.set_trace()
if g1==g2:
return 1
sameG = False
common_edges = 0
key_dic = str(g1.__hash__()) +'-'+ str(g2.__hash__())
rev_key_dic = str(g2.__hash__()) +'-'+ str(g1.__hash__())
self.log.info("key_dic : " + key_dic)
if key_dic in self.dico_precomputed :
self.log.info(self.dico_precomputed)
#self.log.info(Dico_precomputed[g1.__hash__()])
common_edges = self.dico_precomputed[key_dic]['common_edges']
elif rev_key_dic in self.dico_precomputed :
common_edges = self.dico_precomputed[rev_key_dic]['common_edges']
else:
f = open(filename,'w')
f.write('t # 0\n')
#self.log.info(g1.node_labels.items())
#self.log.info(g1.edge_labels.items())
for key,value in g1.node_labels.items():
f.write("v "+str(key)+' '+str(self.mapping_inv[value])+'\n')
#self.log.info("v "+str(key)+' '+str(mapping_inv[value])+'\n')
for (v1,v2),l in g1.edge_labels.items():
f.write("e "+str(v1)+' '+str(v2)+' '+str(l)+'\n')
#self.log.info("e "+str(v1)+' '+str(v2)+' '+str(l)+'\n')
#import time
#time.sleep(100)
f.write('t # 1\n')
for key,value in g2.node_labels.items():
f.write("v "+str(key)+' '+str(self.mapping_inv[value])+'\n')
for (v1,v2),l in g2.edge_labels.items():
f.write("e "+str(v1)+' '+str(v2)+' '+str(l)+'\n')
f.close()
command = self.gspan_path + "gspan"
tab_arg = command + ' --input_file '+filename+' --output_file temp2.gs --pattern --biggest_subgraphs 1 --threads 3 --timeout 1 --support 1.0'
process = subprocess.Popen(tab_arg.split(' '))
try:
process.wait(timeout=90)
except:
process.kill()
len_edges= [0]
i=0
for j in [0,1,2]:
try:
res2 = open('temp2.gs.t'+str(j),'r')
for line in res2 :
if 't #' in line:
len_edges.append(0)
elif 'x: 0 1 ' in line:
i += 1
elif 'x: 0' in line :
len_edges[i] = 0
i += 1
elif 'e ' in line :
len_edges[i] += 1
else :
pass
res2.close()
len_edges.append(0)
common_edges = max(len_edges)
os.system("rm temp2.gs.t"+str(j))
except:
self.log.info("error common edges")
common_edges=max(len_edges)
self.dico_precomputed[key_dic] = {'common_edges':common_edges}
self.log.info("common edges : "+str(common_edges))
#### g1
counter1 =0
counter2=0
common_nodes =0
node1 = {}
if g1.__hash__() in self.dico_precomputed:
#self.log.info(Dico_precomputed)
#self.log.info(Dico_precomputed[g1.__hash__()])
g1comp = self.dico_precomputed[g1.__hash__()]['gcomp']
counter1 = self.dico_precomputed[g1.__hash__()]['counter']
else:
f = open(filename,'w')
f.write('t # 0\n')
for key,value in g1.node_labels.items():
f.write("v "+str(key)+' '+str(self.mapping_inv[value])+'\n')
counter1 = counter1 + 1
if value in node1:
node1[value] = node1[value] +1
else:
node1[value] = 1
for (v1,v2),l in g1.edge_labels.items():
f.write("e "+str(v1)+' '+str(v2)+' '+str(l)+'\n')
f.write('t # 1\n')
for key,value in g1.node_labels.items():
f.write("v "+str(key)+' '+str(self.mapping_inv[value])+'\n')
for (v1,v2),l in g1.edge_labels.items():
f.write("e "+str(v1)+' '+str(v2)+' '+str(l)+'\n')
f.close()
command = self.gspan_path + "gspan"
tab_arg = command + ' --input_file '+filename+' --output_file temp2.gs --pattern --biggest_subgraphs 1 --threads 4 --timeout 1 --support 1.0'
process = subprocess.Popen(tab_arg.split(' '))
try:
process.wait(timeout=60)
except:
process.kill()
len_edges= [0]
i=0
for j in [0,1,2,3]:
try:
res2 = open('temp2.gs.t'+str(j),'r')
for line in res2 :
if 't #' in line:
len_edges.append(0)
elif 'x: 0 1 ' in line:
i += 1
elif 'x: 0' in line :
len_edges[i] = 0
i += 1
elif 'e ' in line :
len_edges[i] += 1
else :
pass
res2.close()
len_edges.append(0)
g1comp = max(len_edges)
os.system("rm temp2.gs.t"+str(j))
except:
g1comp=len(g1.edge_labels)
self.dico_precomputed[g1.__hash__()] = {'gcomp':g1comp,'counter':counter1}
self.log.info("edges g1 : "+str(g1comp))
#### g2
if g2.__hash__() in self.dico_precomputed:
g2comp = self.dico_precomputed[g2.__hash__()]['gcomp']
counter2 = self.dico_precomputed[g2.__hash__()]['counter']
else:
f = open(filename,'w')
f.write('t # 0\n')
for key,value in g2.node_labels.items():
f.write("v "+str(key)+' '+str(self.mapping_inv[value])+'\n')
counter2 = counter2 + 1
if value in node1 and node1[value] > 0:
common_nodes = common_nodes +1
node1[value] = node1[value] - 1
for (v1,v2),l in g2.edge_labels.items():
f.write("e "+str(v1)+' '+str(v2)+' '+str(l)+'\n')
f.write('t # 1\n')
for key,value in g2.node_labels.items():
f.write("v "+str(key)+' '+str(self.mapping_inv[value])+'\n')
for (v1,v2),l in g2.edge_labels.items():
f.write("e "+str(v1)+' '+str(v2)+' '+str(l)+'\n')
f.close()
command = self.gspan_path + "gspan"
tab_arg = command + ' --input_file '+filename+' --output_file temp2.gs --pattern --biggest_subgraphs 1 --threads 4 --timeout 1 --support 1.0'
process = subprocess.Popen(tab_arg.split(' '))
try:
process.wait(timeout=60)
except:
process.kill()
len_edges= [0]
i=0
for j in [0,1,2,3]:
try:
res2 = open('temp2.gs.t'+str(j),'r')
for line in res2 :
if 't #' in line:
len_edges.append(0)
elif 'x: 0 1 ' in line:
i += 1
elif 'x: 0' in line :
len_edges[i] = 0
i += 1
elif 'e ' in line :
len_edges[i] += 1
else :
pass
len_edges.append(0)
g2comp = max(len_edges)
os.system("rm temp2.gs.t"+str(j))
res2.close()
except:
g2comp=len(g2.edge_labels)
self.dico_precomputed[g2.__hash__()] = {'gcomp':g2comp,'counter':counter2}
self.log.info("edges g2 : "+str(g2comp))
nef = 0.25
try:
nodes_factor = common_nodes/(min(counter1,counter2))
edges_factor = common_edges/(min(g1comp,g2comp))
return min(1,nef * nodes_factor + (1-nef) * edges_factor)
except:
return 0
|
import numpy as np
import math
import grid_generate as GridGen
import estimate_method as EstMeth
import frequency_oracle as FreOra
import itertools
import choose_granularity
class AG_Uniform_Grid_1_2_way_optimal:
def __init__(self, args = None):
self.args = args
self.group_attribute_num = 2 # to construct 2-D grids
self.group_num = 0
self.AG = [] # attribute_group
self.Grid_set = []
self.answer_list = []
self.weighted_update_answer_list = []
self.granularity = None # granularity g2
self.granularity_1_way = None # granularity g1
self.LDP_mechanism_list_divide_user = [] # LDP mechanism for each attribute group
self.set_granularity_1_2_way()
def set_granularity_1_2_way(self):
chooseGran = choose_granularity.choose_granularity_beta(args= self.args)
tmp_g1 = chooseGran.get_1_way_granularity_for_HDG(ep= self.args.epsilon)
tmp_g2 = chooseGran.get_2_way_granularity_for_HDG(ep= self.args.epsilon)
self.granularity_1_way = chooseGran.get_rounding_to_pow_2(gran= tmp_g1)
self.granularity = chooseGran.get_rounding_to_pow_2(gran= tmp_g2)
self.args.granularity_1_way = self.granularity_1_way
self.args.granularity = self.granularity
def judge_sub_attribute_in_attribute_group(self, sub_attribute = None, attribute_group:list = None):
if sub_attribute in attribute_group:
return True
else:
return False
def get_C_W_list(self, sub_attribute_value = None, sub_attribute = None, relevant_attribute_group_list:list = None):
C_list = np.zeros(self.args.group_num)
C_reci_list = np.zeros(self.args.group_num)
for i in relevant_attribute_group_list:
tmp_grid = self.Grid_set[i]
if len(tmp_grid.attribute_set) == 1:
C_list[i] = self.args.granularity_1_way // self.args.granularity
else:
C_list[i] = self.args.granularity
C_reci_list[i] = 1.0 / C_list[i]
return C_list, C_reci_list
def get_T_A_a(self, sub_attribute_value = None, sub_attribute = None, relevant_attribute_group_list:list = None, C_reci_list = None):
sum_C_reci_list = sum(C_reci_list)
sum_T_V_i_a = 0
for i in relevant_attribute_group_list:
T_V_i_a = 0
tmp_grid = self.Grid_set[i]
if len(tmp_grid.attribute_set) == 1:
left_interval_1_way = sub_attribute_value * (self.args.granularity_1_way // self.args.granularity)
right_interval_1_way = (sub_attribute_value + 1) * (self.args.granularity_1_way // self.args.granularity) - 1
k = left_interval_1_way
while k <= right_interval_1_way:
tmp_cell = tmp_grid.cell_list[k]
T_V_i_a += tmp_cell.consistent_count
k += 1
else:
sub_attribute_index_in_grid = tmp_grid.attribute_set.index(sub_attribute)
for tmp_cell in tmp_grid.cell_list:
if tmp_cell.dimension_index_list[sub_attribute_index_in_grid] == sub_attribute_value:
T_V_i_a += tmp_cell.consistent_count
sum_T_V_i_a += (C_reci_list[i] * T_V_i_a)
T_A_a = sum_T_V_i_a / sum_C_reci_list
return T_A_a
def get_consistency_for_sub_attribute(self, sub_attribute = None):
relevant_attribute_group_list = []
for i in range(self.group_num):
if self.judge_sub_attribute_in_attribute_group(sub_attribute, self.AG[i]):
relevant_attribute_group_list.append(i)
sub_attribute_domain = range(self.args.granularity) # need to be changed for 3-way attribute group
for sub_attribute_value in sub_attribute_domain:
C_list, C_reci_list = self.get_C_W_list(sub_attribute_value, sub_attribute, relevant_attribute_group_list)
T_A_a = self.get_T_A_a(sub_attribute_value, sub_attribute, relevant_attribute_group_list, C_reci_list)
for i in relevant_attribute_group_list: #update T_V_i_c
T_V_i_a = 0
T_V_i_c_cell_list = []
tmp_grid = self.Grid_set[i]
if len(tmp_grid.attribute_set) == 1:
left_interval_1_way = sub_attribute_value * (self.args.granularity_1_way // self.args.granularity)
right_interval_1_way = (sub_attribute_value + 1) * (self.args.granularity_1_way // self.args.granularity) - 1
k = left_interval_1_way
while k <= right_interval_1_way:
tmp_cell = tmp_grid.cell_list[k]
T_V_i_c_cell_list.append(k)
T_V_i_a += tmp_cell.consistent_count
k += 1
else:
sub_attribute_index_in_grid = tmp_grid.attribute_set.index(sub_attribute)
for k in range(len(tmp_grid.cell_list)):
tmp_cell = tmp_grid.cell_list[k]
if tmp_cell.dimension_index_list[sub_attribute_index_in_grid] == sub_attribute_value:
T_V_i_c_cell_list.append(k)
T_V_i_a += tmp_cell.consistent_count
for k in T_V_i_c_cell_list:
tmp_cell = tmp_grid.cell_list[k]
tmp_cell.consistent_count = tmp_cell.consistent_count + (T_A_a - T_V_i_a) * C_reci_list[i]
return
def overall_consistency(self):
for i in range(self.args.attribute_num):
self.get_consistency_for_sub_attribute(i)
return
def get_consistent_Grid_set(self):
for tmp_grid in self.Grid_set:
tmp_grid.get_consistent_grid()
self.overall_consistency()
for i in range(self.args.consistency_iteration_num_max):
for tmp_grid in self.Grid_set:
tmp_grid.get_consistent_grid_iteration()
self.overall_consistency()
# end with the Non-Negativity step
for tmp_grid in self.Grid_set:
tmp_grid.get_consistent_grid_iteration()
return
#*************consistency end*******************************
def weighted_update_iteration(self, grid_1_way_list = None, grid_2_way = None):
# update using 1_way
for tmp_grid_1_way in grid_1_way_list:
tmp_1_way_attribute = tmp_grid_1_way.attribute_set[0]
tmp_1_way_attribute_index = grid_2_way.attribute_set.index(tmp_1_way_attribute)
for i in range(len(tmp_grid_1_way.cell_list)):
tmp_cell = tmp_grid_1_way.cell_list[i]
lower_bound = tmp_cell.left_interval_list[0]
upper_bound = tmp_cell.right_interval_list[0] + 1
if tmp_1_way_attribute_index == 0:
tmp_sum = np.sum(grid_2_way.weighted_update_matrix[lower_bound:upper_bound, :])
if tmp_sum == 0:
continue
grid_2_way.weighted_update_matrix[lower_bound:upper_bound, :] = grid_2_way.weighted_update_matrix[lower_bound:upper_bound, :] / tmp_sum * tmp_cell.consistent_count
else:
tmp_sum = np.sum(grid_2_way.weighted_update_matrix[:, lower_bound:upper_bound])
if tmp_sum == 0:
continue
grid_2_way.weighted_update_matrix[:, lower_bound:upper_bound] = grid_2_way.weighted_update_matrix[:, lower_bound:upper_bound] / tmp_sum * tmp_cell.consistent_count
# normalization
grid_2_way.weighted_update_matrix = grid_2_way.weighted_update_matrix / np.sum(grid_2_way.weighted_update_matrix) * self.args.user_num
# update using 2_way
for tmp_cell in grid_2_way.cell_list:
x_lower_bound = tmp_cell.left_interval_list[0]
x_upper_bound = tmp_cell.right_interval_list[0] + 1
y_lower_bound = tmp_cell.left_interval_list[1]
y_upper_bound = tmp_cell.right_interval_list[1] + 1
tmp_sum = np.sum(grid_2_way.weighted_update_matrix[x_lower_bound:x_upper_bound, y_lower_bound:y_upper_bound])
if tmp_sum == 0:
continue
grid_2_way.weighted_update_matrix[x_lower_bound:x_upper_bound, y_lower_bound:y_upper_bound] = grid_2_way.weighted_update_matrix[x_lower_bound:x_upper_bound, \
y_lower_bound:y_upper_bound] / tmp_sum * tmp_cell.consistent_count
# normalization
grid_2_way.weighted_update_matrix = grid_2_way.weighted_update_matrix / np.sum(grid_2_way.weighted_update_matrix) * self.args.user_num
return
def get_weight_update_for_2_way_group(self):
for tmp_grid in self.Grid_set:
if len(tmp_grid.attribute_set) == 2:
grid_1_way_list = []
for tmp_grid_1_way in self.Grid_set:
if len(tmp_grid_1_way.attribute_set) == 1 and tmp_grid_1_way.attribute_set[0] in tmp_grid.attribute_set:
grid_1_way_list.append(tmp_grid_1_way)
tmp_grid.weighted_update_matrix = np.zeros((self.args.domain_size, self.args.domain_size))
# initialize
tmp_grid.weighted_update_matrix[:,:] = self.args.user_num / (self.args.domain_size * self.args.domain_size)
for i in range(self.args.weighted_update_iteration_num_max):
weighted_update_matrix_before = np.copy(tmp_grid.weighted_update_matrix)
self.weighted_update_iteration(grid_1_way_list, tmp_grid)
weighted_update_matrix_delta = np.sum(np.abs(tmp_grid.weighted_update_matrix - weighted_update_matrix_before))
if weighted_update_matrix_delta < 1:
break
return
def generate_attribute_group(self):
attribute_group_list = []
attribute_list = [i for i in range(self.args.attribute_num)]
for tmp_attribute in attribute_list:
attribute_group_list.append((tmp_attribute,))
attribute_group_2_way_list = list(itertools.combinations(attribute_list, self.group_attribute_num))
for tmp_attribute_group_2_way in attribute_group_2_way_list:
attribute_group_list.append(tmp_attribute_group_2_way)
self.group_num = len(attribute_group_list)
self.args.group_num = self.group_num
self.AG = attribute_group_list
for i in range(len(self.AG)):
self.AG[i] = list(self.AG[i])
def group_attribute(self):
self.generate_attribute_group()
return
def construct_Grid_set(self):
for i in range(self.group_num):
if len(self.AG[i]) == 1:
tmp_Grid = GridGen.UniformGrid(self.AG[i], granularity= self.granularity_1_way, args= self.args)
else:
tmp_Grid = GridGen.UniformGrid(self.AG[i], granularity= self.granularity, args= self.args)
tmp_Grid.Grid_index = i
tmp_Grid.Main()
self.Grid_set.append(tmp_Grid)
return
def get_user_record_in_attribute_group(self, user_record_i, attribute_group: int = None):
user_record_in_attribute_group = []
for tmp in self.AG[attribute_group]:
user_record_in_attribute_group.append(user_record_i[tmp])
return user_record_in_attribute_group
def get_LDP_Grid_set_divide_user(self, user_record):
print("HDG is working...")
self.LDP_mechanism_list_divide_user = [] # intialize for each time to randomize user data
for j in range(self.group_num): # initialize LDP mechanism for each attribute group
tmp_Grid = self.Grid_set[j] # the i-th Grid
tmp_domain_size = len(tmp_Grid.cell_list)
tmp_LDR = FreOra.OUE(domain_size=tmp_domain_size, epsilon= self.args.epsilon, sampling_factor=self.group_num, args=self.args)
# tmp_LDR = FreOra.OLH(domain_size=tmp_domain_size, epsilon= self.args.epsilon, sampling_factor=self.group_num, args=self.args)
self.LDP_mechanism_list_divide_user.append(tmp_LDR)
for i in range(self.args.user_num):
tmp_user_granularity = math.ceil(self.args.user_num / self.group_num)
group_index_of_user = i // tmp_user_granularity
j = group_index_of_user
# to count the user num of each group
self.LDP_mechanism_list_divide_user[j].group_user_num += 1
tmp_Grid = self.Grid_set[j]
user_record_in_attribute_group_j = self.get_user_record_in_attribute_group(user_record[i], j)
tmp_real_cell_index = tmp_Grid.get_cell_index_from_attribute_value_set(user_record_in_attribute_group_j)
tmp_LDP_mechanism = self.LDP_mechanism_list_divide_user[j]
tmp_LDP_mechanism.operation_perturb(tmp_real_cell_index)
# update the perturbed_count of each cell
for j in range(self.group_num):
tmp_LDP_mechanism = self.LDP_mechanism_list_divide_user[j]
tmp_LDP_mechanism.operation_aggregate()
tmp_Grid = self.Grid_set[j] # the j-th Grid
for k in range(len(tmp_Grid.cell_list)):
tmp_Grid.cell_list[k].perturbed_count = tmp_LDP_mechanism.aggregated_count[k]
return
def judge_sub_attribute_list_in_attribute_group(self, sub_attribute_list, attribute_group):
if len(sub_attribute_list) == 1:
return False
flag = True
for sub_attribute in sub_attribute_list:
if sub_attribute not in attribute_group:
flag = False
break
return flag
def get_answer_range_query_attribute_group_list(self, selected_attribute_list):
answer_range_query_attribute_group_index_list = []
answer_range_query_attribute_group_list = []
for tmp_Grid in self.Grid_set:
#note that here we judge if tmp_Grid.attribute_set belongs to selected_attribute_list
if self.judge_sub_attribute_list_in_attribute_group(tmp_Grid.attribute_set, selected_attribute_list):
answer_range_query_attribute_group_index_list.append(tmp_Grid.Grid_index)
answer_range_query_attribute_group_list.append(tmp_Grid.attribute_set)
return answer_range_query_attribute_group_index_list, answer_range_query_attribute_group_list
def answer_range_query(self, range_query):
t_Grid_ans = []
answer_range_query_attribute_group_index_list, answer_range_query_attribute_group_list = \
self.get_answer_range_query_attribute_group_list(range_query.selected_attribute_list)
for k in answer_range_query_attribute_group_index_list:
tmp_Grid = self.Grid_set[k]
Grid_range_query_attribute_node_list = []
for tmp_attribute in tmp_Grid.attribute_set:
Grid_range_query_attribute_node_list.append(range_query.query_attribute_node_list[tmp_attribute])
t_Grid_ans.append(tmp_Grid.answer_range_query_with_weight_update_matrix(Grid_range_query_attribute_node_list))
if range_query.query_dimension == self.group_attribute_num: # answer the 2-way marginal
tans_weighted_update = t_Grid_ans[0]
else:
tt = EstMeth.EsimateMethod(args= self.args)
tans_weighted_update = tt.weighted_update(range_query, answer_range_query_attribute_group_list, t_Grid_ans)
return tans_weighted_update
def answer_range_query_list(self, range_query_list):
self.weighted_update_answer_list = []
for tmp_range_query in range_query_list:
tans_weighted_update = self.answer_range_query(tmp_range_query)
self.weighted_update_answer_list.append(tans_weighted_update)
return
|
import urllib.parse
import requests
import os
import bs4
######### DO NOT CHANGE THIS CODE #########
def get_request(url):
'''
Open a connection to the specified URL and if successful
read the data.
Inputs:
url: must be an absolute URL
Outputs:
request object or None
Examples:
get_request("http://www.cs.uchicago.edu")
'''
if is_absolute_url(url):
try:
r = requests.get(url)
if r.status_code == 404 or r.status_code == 403:
r = None
except Exception:
# fail on any kind of error
r = None
else:
r = None
return r
def read_request(request):
'''
Return data from request object. Returns result or "" if the read
fails..
'''
try:
return request.text.encode('iso-8859-1')
except Exception:
print("read failed: " + request.url)
return ""
def get_request_url(request):
'''
Extract true URL from the request
'''
return request.url
def is_absolute_url(url):
'''
Is url an absolute URL?
'''
if url == "":
return False
return urllib.parse.urlparse(url).netloc != ""
def remove_fragment(url):
'''remove the fragment from a url'''
(url, frag) = urllib.parse.urldefrag(url)
return url
def convert_if_relative_url(current_url, new_url):
'''
Attempt to determine whether new_url is a relative URL and if so,
use current_url to determine the path and create a new absolute
URL. Will add the protocol, if that is all that is missing.
Inputs:
current_url: absolute URL
new_url:
Outputs:
new absolute URL or None, if cannot determine that
new_url is a relative URL.
Examples:
convert_if_relative_url("http://cs.uchicago.edu", "pa/pa1.html") yields
'http://cs.uchicago.edu/pa/pa1.html'
convert_if_relative_url("http://cs.uchicago.edu", "foo.edu/pa.html")
yields 'http://foo.edu/pa.html'
'''
if new_url == "" or not is_absolute_url(current_url):
return None
if is_absolute_url(new_url):
return new_url
parsed_url = urllib.parse.urlparse(new_url)
path_parts = parsed_url.path.split("/")
if len(path_parts) == 0:
return None
ext = path_parts[0][-4:]
if ext in [".edu", ".org", ".com", ".net"]:
return "http://" + new_url
elif new_url[:3] == "www":
return "http://" + new_path
else:
return urllib.parse.urljoin(current_url, new_url)
ARCHIVES = ("https://www.classes.cs.uchicago.edu/archive/2015/winter"
"/12200-1/new.collegecatalog.uchicago.edu/thecollege/archives")
LEN_ARCHIVES = len(ARCHIVES)
def is_url_ok_to_follow(url, limiting_domain):
'''
Inputs:
url: absolute URL
limiting domain: domain name
Outputs:
Returns True if the protocol for the URL is HTTP(s), the domain
is in the limiting domain, and the path is either a directory
or a file that has no extension or ends in .html. URLs
that include an "@" are not OK to follow.
Examples:
is_url_ok_to_follow("http://cs.uchicago.edu/pa/pa1", "cs.uchicago.edu")
yields True
is_url_ok_to_follow("http://cs.cornell.edu/pa/pa1", "cs.uchicago.edu")
yields False
'''
if "mailto:" in url:
return False
if "@" in url:
return False
if url[:LEN_ARCHIVES] == ARCHIVES:
return False
parsed_url = urllib.parse.urlparse(url)
if parsed_url.scheme != "http" and parsed_url.scheme != "https":
return False
if parsed_url.netloc == "":
return False
if parsed_url.fragment != "":
return False
if parsed_url.query != "":
return False
loc = parsed_url.netloc
ld = len(limiting_domain)
trunc_loc = loc[-(ld+1):]
if not (limiting_domain == loc or (trunc_loc == "." + limiting_domain)):
return False
# does it have the right extension
(filename, ext) = os.path.splitext(parsed_url.path)
return (ext == "" or ext == ".html")
def is_subsequence(tag):
'''
Does the tag represent a subsequence?
'''
return isinstance(tag, bs4.element.Tag) and 'class' in tag.attrs \
and tag['class'] == ['courseblock', 'subsequence']
def is_whitespace(tag):
'''
Does the tag represent whitespace?
'''
return isinstance(tag, bs4.element.NavigableString) and (tag.strip() == "")
def find_sequence(tag):
'''
If tag is the header for a sequence, then
find the tags for the courses in the sequence.
'''
rv = []
sib_tag = tag.next_sibling
while is_subsequence(sib_tag) or is_whitespace(tag):
if not is_whitespace(tag):
rv.append(sib_tag)
sib_tag = sib_tag.next_sibling
return rv
|
import appdaemon.plugins.hass.hassapi as hass
#
# Listen for presence sensor change state and change alarm control panel state.
#
# Args:
# sensor - home presence 'sensor'
# ha_panel - alarm control panel entity (to arm and disarm).
# constraint - (optional, input_boolen), if turned off - alarm panel will be not armed\disarmed.
#
# Release Notes
#
# Version 1.0:
# Initial Version
class AlarmPanelBySensor(hass.Hass):
def initialize(self):
if "sensor" not in self.args or "ha_panel" not in self.args:
self.error("Please provide sensor and ha_panel in config!")
return
self.listen_state(self.sensor_trigger, self.args['sensor'])
self.listen_event(self.ha_event, "ha_started")
def ha_event(self, event_name, data, kwargs):
self.log('Starting up!')
state = self.get_state(self.args['sensor'])
self.log('Updating alarm_control_panel state: {}'.format(state))
if state == "off":
self.away_mode()
def sensor_trigger(self, entity, attribute, old, new, kwargs):
self.log("{} turned {}".format(entity, new))
if new == "off" and old == "on":
self.away_mode()
if new == "on" and old == "off":
self.return_home_mode()
def away_mode(self):
if 'constraint' in self.args and not self.constrain_input_boolean(self.args['constraint']):
return
self.call_service("alarm_control_panel/alarm_arm_away", entity_id = self.args['ha_panel'])
def return_home_mode(self):
if 'constraint' in self.args and not self.constrain_input_boolean(self.args['constraint']):
return
self.call_service("alarm_control_panel/alarm_disarm", entity_id = self.args['ha_panel'])
|
# -*- coding: utf-8 -*-
"""
This file contains all jobs that are used in tests. Each of these test
fixtures has a slighty different characteristics.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import time
import sys
from rq import Connection, get_current_job, get_current_connection, Queue
from rq.decorators import job
from rq.compat import PY2
from rq.worker import HerokuWorker
def say_pid():
return os.getpid()
def say_hello(name=None):
"""A job with a single argument and a return value."""
if name is None:
name = 'Stranger'
return 'Hi there, %s!' % (name,)
def do_nothing():
"""The best job in the world."""
pass
def div_by_zero(x):
"""Prepare for a division-by-zero exception."""
return x / 0
def some_calculation(x, y, z=1):
"""Some arbitrary calculation with three numbers. Choose z smartly if you
want a division by zero exception.
"""
return x * y / z
def create_file(path):
"""Creates a file at the given path. Actually, leaves evidence that the
job ran."""
with open(path, 'w') as f:
f.write('Just a sentinel.')
def create_file_after_timeout(path, timeout):
time.sleep(timeout)
create_file(path)
def access_self():
assert get_current_connection() is not None
assert get_current_job() is not None
def modify_self(meta):
j = get_current_job()
j.meta.update(meta)
j.save()
def modify_self_and_error(meta):
j = get_current_job()
j.meta.update(meta)
j.save()
return 1 / 0
def echo(*args, **kwargs):
return (args, kwargs)
class Number(object):
def __init__(self, value):
self.value = value
@classmethod
def divide(cls, x, y):
return x * y
def div(self, y):
return self.value / y
class CallableObject(object):
def __call__(self):
return u"I'm callable"
class UnicodeStringObject(object):
def __repr__(self):
if PY2:
return u'é'.encode('utf-8')
else:
return u'é'
with Connection():
@job(queue='default')
def decorated_job(x, y):
return x + y
def black_hole(job, *exc_info):
# Don't fall through to default behaviour (moving to failed queue)
return False
def long_running_job(timeout=10):
time.sleep(timeout)
return 'Done sleeping...'
def run_dummy_heroku_worker(sandbox, _imminent_shutdown_delay):
"""
Run the work horse for a simplified heroku worker where perform_job just
creates two sentinel files 2 seconds apart.
:param sandbox: directory to create files in
:param _imminent_shutdown_delay: delay to use for HerokuWorker
"""
sys.stderr = open(os.path.join(sandbox, 'stderr.log'), 'w')
class TestHerokuWorker(HerokuWorker):
imminent_shutdown_delay = _imminent_shutdown_delay
def perform_job(self, job, queue):
create_file(os.path.join(sandbox, 'started'))
# have to loop here rather than one sleep to avoid holding the GIL
# and preventing signals being received
for i in range(20):
time.sleep(0.1)
create_file(os.path.join(sandbox, 'finished'))
w = TestHerokuWorker(Queue('dummy'))
w.main_work_horse(None, None)
|
# encoding: utf-8
import logging
import os
import zipfile
from .charfields import CharField
from .computed import ComputedFieldMixin
# pyuca only supports version 5.2.0 of the collation algorithm on Python 2.x
COLLATION_FILE = "allkeys-5.2.0.txt"
COLLATION_ZIP_FILE = os.path.join(os.path.dirname(__file__), "allkeys-5.2.0.zip")
logger = logging.getLogger(__file__)
class ZipLoaderMixin(object):
"""
The UCA collation file is massive (nearly 1.5M) but it's all text
so it compresses easily. We ship the file zipped up and then decompress
it on the fly here to save on storage, data transfer, memory etc.
The use of generators on load should be efficient.
"""
def __init__(self, zip_filename, text_filename):
"""
The BaseCollator class __init__ takes a filename and calls
load(filename). Here we pass up the text filename but store the
zip filename, then override load so we can load from the zip instead
of a filesystem.
"""
self.zip_filename = zip_filename
super(ZipLoaderMixin, self).__init__(filename=text_filename)
def load(self, filename):
from pyuca.collator import COLL_ELEMENT_PATTERN, hexstrings2int # pyuca is required for ComputedCollationField
with zipfile.ZipFile(self.zip_filename) as z:
with z.open(filename) as f:
for line in f:
line = line.split("#", 1)[0].rstrip()
if not line or line.startswith("@version"):
continue
a, b = line.split(";", 1)
char_list = hexstrings2int(a.split())
coll_elements = []
for x in COLL_ELEMENT_PATTERN.finditer(b.strip()):
weights = x.groups()
coll_elements.append(hexstrings2int(weights))
self.table.add(char_list, coll_elements)
class ComputedCollationField(ComputedFieldMixin, CharField):
"""
App Engine sorts strings based on the unicode codepoints that make them
up. When you have strings from non-ASCII languages this makes the sort order
incorrect (e.g. Ł will be sorted after Z).
This field uses the pyuca library to calculate a sort key using the
Unicode Collation Algorithm, which can then be used for ordering querysets
correctly.
"""
collator = None
def __init__(self, source_field_name):
import pyuca # Required dependency for ComputedCollationField
from pyuca.collator import Collator_5_2_0
# Instantiate Collator once only to save on memory / processing
if not ComputedCollationField.collator:
class Collator(ZipLoaderMixin, Collator_5_2_0):
pass
ComputedCollationField.collator = Collator(COLLATION_ZIP_FILE, COLLATION_FILE)
def truncate(unicode_str):
encoded = unicode_str.encode("utf-8")[:1500]
# We ignore unrecognized chars as the truncation might
# have split a unicode char down the middle
return encoded.decode("utf-8", "ignore")
def computer(instance):
source_value = getattr(instance, source_field_name) or u""
if not isinstance(source_value, unicode):
source_value = unicode(source_value, "utf-8")
sort_key = self.collator.sort_key(source_value)
sort_key = u"".join([unichr(x) for x in sort_key])
truncated_key = truncate(sort_key)
if truncated_key != sort_key:
logger.warn(
"Truncated sort key for '%s.%s'", instance._meta.db_table, source_field_name
)
return truncated_key
super(ComputedCollationField, self).__init__(computer)
def deconstruct(self):
name, path, args, kwargs = super(ComputedCollationField, self).deconstruct()
del kwargs["max_length"]
return name, path, args, kwargs
|
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2018 Riverbank Computing Limited.
## Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Digia Plc and its Subsidiary(-ies) nor the names
## of its contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt5.QtCore import QAbstractListModel, QModelIndex, Qt, QUrl, QVariant
from PyQt5.QtGui import QGuiApplication
from PyQt5.QtQuick import QQuickView
import abstractitemmodel_rc
class Animal(object):
def __init__(self, type, size):
self._type = type
self._size = size
def type(self):
return self._type
def size(self):
return self._size
class AnimalModel(QAbstractListModel):
TypeRole = Qt.UserRole + 1
SizeRole = Qt.UserRole + 2
_roles = {TypeRole: b"type", SizeRole: b"size"}
def __init__(self, parent=None):
super(AnimalModel, self).__init__(parent)
self._animals = []
def addAnimal(self, animal):
self.beginInsertRows(QModelIndex(), self.rowCount(), self.rowCount())
self._animals.append(animal)
self.endInsertRows()
def rowCount(self, parent=QModelIndex()):
return len(self._animals)
def data(self, index, role=Qt.DisplayRole):
try:
animal = self._animals[index.row()]
except IndexError:
return QVariant()
if role == self.TypeRole:
return animal.type()
if role == self.SizeRole:
return animal.size()
return QVariant()
def roleNames(self):
return self._roles
if __name__ == '__main__':
import os
import sys
# This is necessary to avoid a possible crash when running from another
# directory by ensuring the compiled version of the embedded QML file
# doesn't get mixed up with another of the same name.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
app = QGuiApplication(sys.argv)
model = AnimalModel()
model.addAnimal(Animal("Wolf", "Medium"))
model.addAnimal(Animal("Polar bear", "Large"))
model.addAnimal(Animal("Quoll", "Small"))
view = QQuickView()
view.setResizeMode(QQuickView.SizeRootObjectToView)
ctxt = view.rootContext()
ctxt.setContextProperty('myModel', model)
view.setSource(QUrl('qrc:view.qml'))
view.show()
sys.exit(app.exec_())
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v6/proto/resources/remarketing_action.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v6.proto.common import tag_snippet_pb2 as google_dot_ads_dot_googleads__v6_dot_proto_dot_common_dot_tag__snippet__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v6/proto/resources/remarketing_action.proto',
package='google.ads.googleads.v6.resources',
syntax='proto3',
serialized_options=b'\n%com.google.ads.googleads.v6.resourcesB\026RemarketingActionProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v6/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V6.Resources\312\002!Google\\Ads\\GoogleAds\\V6\\Resources\352\002%Google::Ads::GoogleAds::V6::Resources',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n@google/ads/googleads_v6/proto/resources/remarketing_action.proto\x12!google.ads.googleads.v6.resources\x1a\x36google/ads/googleads_v6/proto/common/tag_snippet.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1cgoogle/api/annotations.proto\"\xd3\x02\n\x11RemarketingAction\x12I\n\rresource_name\x18\x01 \x01(\tB2\xe0\x41\x05\xfa\x41,\n*googleads.googleapis.com/RemarketingAction\x12\x14\n\x02id\x18\x05 \x01(\x03\x42\x03\xe0\x41\x03H\x00\x88\x01\x01\x12\x11\n\x04name\x18\x06 \x01(\tH\x01\x88\x01\x01\x12\x45\n\x0ctag_snippets\x18\x04 \x03(\x0b\x32*.google.ads.googleads.v6.common.TagSnippetB\x03\xe0\x41\x03:s\xea\x41p\n*googleads.googleapis.com/RemarketingAction\x12\x42\x63ustomers/{customer_id}/remarketingActions/{remarketing_action_id}B\x05\n\x03_idB\x07\n\x05_nameB\x83\x02\n%com.google.ads.googleads.v6.resourcesB\x16RemarketingActionProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v6/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V6.Resources\xca\x02!Google\\Ads\\GoogleAds\\V6\\Resources\xea\x02%Google::Ads::GoogleAds::V6::Resourcesb\x06proto3'
,
dependencies=[google_dot_ads_dot_googleads__v6_dot_proto_dot_common_dot_tag__snippet__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_REMARKETINGACTION = _descriptor.Descriptor(
name='RemarketingAction',
full_name='google.ads.googleads.v6.resources.RemarketingAction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v6.resources.RemarketingAction.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005\372A,\n*googleads.googleapis.com/RemarketingAction', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='id', full_name='google.ads.googleads.v6.resources.RemarketingAction.id', index=1,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='google.ads.googleads.v6.resources.RemarketingAction.name', index=2,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tag_snippets', full_name='google.ads.googleads.v6.resources.RemarketingAction.tag_snippets', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\352Ap\n*googleads.googleapis.com/RemarketingAction\022Bcustomers/{customer_id}/remarketingActions/{remarketing_action_id}',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_id', full_name='google.ads.googleads.v6.resources.RemarketingAction._id',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_name', full_name='google.ads.googleads.v6.resources.RemarketingAction._name',
index=1, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=250,
serialized_end=589,
)
_REMARKETINGACTION.fields_by_name['tag_snippets'].message_type = google_dot_ads_dot_googleads__v6_dot_proto_dot_common_dot_tag__snippet__pb2._TAGSNIPPET
_REMARKETINGACTION.oneofs_by_name['_id'].fields.append(
_REMARKETINGACTION.fields_by_name['id'])
_REMARKETINGACTION.fields_by_name['id'].containing_oneof = _REMARKETINGACTION.oneofs_by_name['_id']
_REMARKETINGACTION.oneofs_by_name['_name'].fields.append(
_REMARKETINGACTION.fields_by_name['name'])
_REMARKETINGACTION.fields_by_name['name'].containing_oneof = _REMARKETINGACTION.oneofs_by_name['_name']
DESCRIPTOR.message_types_by_name['RemarketingAction'] = _REMARKETINGACTION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RemarketingAction = _reflection.GeneratedProtocolMessageType('RemarketingAction', (_message.Message,), {
'DESCRIPTOR' : _REMARKETINGACTION,
'__module__' : 'google.ads.googleads_v6.proto.resources.remarketing_action_pb2'
,
'__doc__': """A remarketing action. A snippet of JavaScript code that will collect
the product id and the type of page people visited (product page,
shopping cart page, purchase page, general site visit) on an
advertiser's website.
Attributes:
resource_name:
Immutable. The resource name of the remarketing action.
Remarketing action resource names have the form: ``customers/
{customer_id}/remarketingActions/{remarketing_action_id}``
id:
Output only. Id of the remarketing action.
name:
The name of the remarketing action. This field is required
and should not be empty when creating new remarketing actions.
tag_snippets:
Output only. The snippets used for tracking remarketing
actions.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.resources.RemarketingAction)
})
_sym_db.RegisterMessage(RemarketingAction)
DESCRIPTOR._options = None
_REMARKETINGACTION.fields_by_name['resource_name']._options = None
_REMARKETINGACTION.fields_by_name['id']._options = None
_REMARKETINGACTION.fields_by_name['tag_snippets']._options = None
_REMARKETINGACTION._options = None
# @@protoc_insertion_point(module_scope)
|
from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.linkage_type import LinkageType as LinkageType_
__all__ = ["LinkageType"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class LinkageType(LinkageType_):
"""
LinkageType
Used to distinguish different roles a resource can play within a set of
linked resources.
Status: draft - Version: 4.0.1
http://hl7.org/fhir/ValueSet/linkage-type
"""
class Meta:
resource = _resource
|
import adafruit_ble
from adafruit_ble.advertising import Advertisement
from adafruit_ble.advertising.standard import ProvideServicesAdvertisement
from adafruit_ble.services.standard.device_info import DeviceInfoService
from adafruit_ble.services.standard.hid import HIDService
from adafruit_hid.keyboard import Keyboard
from therefore.cc_device import ConsumerControl
from . import mesh
hid = HIDService()
device_info = DeviceInfoService(software_revision=adafruit_ble.__version__,
manufacturer="Adafruit Industries")
advertisement = ProvideServicesAdvertisement(hid)
advertisement.appearance = 961
scan_response = Advertisement()
scan_response.complete_name = "Therefore"
ble = adafruit_ble.BLERadio()
def get_keyboard():
return Keyboard(hid.devices)
def get_consumer_control():
return ConsumerControl(hid.devices)
def advertise():
if not connected():
print("advertising")
ble.start_advertising(advertisement, scan_response)
else:
print("already connected")
def disconnect():
for con in ble.connections:
con.disconnect()
def connected():
for connection in ble.connections:
if mesh.SubkeypadService not in connection:
return True
return False
|
import logging
import os
import pickle
import mmcv
import torch
from mmcv.runner import HOOKS, Hook
from mmcv.runner.dist_utils import master_only
@HOOKS.register_module()
class PickleDataHook(Hook):
"""Pickle Useful Data Hook.
This hook will be used in SinGAN training for saving some important data
that will be used in testing or inference.
Args:
output_dir (str): The output path for saving pickled data.
data_name_list (list[str]): The list contains the name of results in
outputs dict.
interval (int): The interval of calling this hook. If set to -1,
the visualization hook will not be called. Default: -1.
before_run (bool, optional): Whether to save before running.
Defaults to False.
after_run (bool, optional): Whether to save after running.
Defaults to False.
filename_tmpl (str, optional): Format string used to save images. The
output file name will be formatted as this args.
Defaults to 'iter_{}.pkl'.
"""
def __init__(self,
output_dir,
data_name_list,
interval=-1,
before_run=False,
after_run=False,
filename_tmpl='iter_{}.pkl'):
assert mmcv.is_list_of(data_name_list, str)
self.output_dir = output_dir
self.data_name_list = data_name_list
self.interval = interval
self.filename_tmpl = filename_tmpl
self._before_run = before_run
self._after_run = after_run
@master_only
def after_run(self, runner):
"""The behavior after each train iteration.
Args:
runner (object): The runner.
"""
if self._after_run:
self._pickle_data(runner)
@master_only
def before_run(self, runner):
"""The behavior after each train iteration.
Args:
runner (object): The runner.
"""
if self._before_run:
self._pickle_data(runner)
@master_only
def after_train_iter(self, runner):
"""The behavior after each train iteration.
Args:
runner (object): The runner.
"""
if not self.every_n_iters(runner, self.interval):
return
self._pickle_data(runner)
def _pickle_data(self, runner):
filename = self.filename_tmpl.format(runner.iter + 1)
if not hasattr(self, '_out_dir'):
self._out_dir = os.path.join(runner.work_dir, self.output_dir)
mmcv.mkdir_or_exist(self._out_dir)
file_path = os.path.join(self._out_dir, filename)
with open(file_path, 'wb') as f:
data = runner.outputs['results']
not_find_keys = []
data_dict = {}
for k in self.data_name_list:
if k in data.keys():
data_dict[k] = self._get_numpy_data(data[k])
else:
not_find_keys.append(k)
pickle.dump(data_dict, f)
mmcv.print_log(f'Pickle data in {filename}', 'mmgen')
if len(not_find_keys) > 0:
mmcv.print_log(
f'Cannot find keys for pickling: {not_find_keys}',
'mmgen',
level=logging.WARN)
f.flush()
def _get_numpy_data(self, data):
if isinstance(data, list):
return [self._get_numpy_data(x) for x in data]
if isinstance(data, torch.Tensor):
return data.cpu().numpy()
return data
|
from .base import Distribution, Probability
from .normal import (
Normal,
MixedNormal
)
def parse_distribution(dist_type, dist_params):
dist_type = dist_type.lower()
if dist_type == "normal":
return Normal.instantiate(dist_params)
if dist_type == "mixed_normal":
return MixedNormal.instantiate(dist_params)
raise NotImplementedError(f"Distribution type \"{dist_type}\" not recognized")
|
#!/usr/bin/env python3
import struct
import sys
class Patcher:
PATCH_TYPE_BYTES = 0
PATCH_TYPE_JUMP = 1
PATCH_TYPE_JAL = 2
PATCH_TYPE_PTR = 3
patchTypes = ['bytes', 'jump', 'jal', 'ptr']
def __init__(self):
self.patches = []
self.sections = {}
self.symbols = {}
def bytes2int(self, b):
return struct.unpack_from('>I', b)[0]
def _makeJump(self, addr, mask):
return (((addr & 0xFFFFFF) >> 2) | mask).to_bytes(4, 'big')
def makeJump(self, addr):
"""Given `addr`, return the opcode for `J addr`."""
return self._makeJump(addr, 0x08000000)
def makeJAL(self, addr):
"""Given `addr`, return the opcode for `JAL addr`."""
return self._makeJump(addr, 0x0C000000)
def readSections(self, src):
"""Read sections list, as generated by:
mips64-elf-objdump -hw file.elf
"""
with open(src) as file:
while True:
line = file.readline()
if line == '': break
parts = line.strip().split()
try: int(parts[0])
except (ValueError, IndexError): continue
sec = dict(
name = parts[1],
size = int(parts[2], 16),
vma = int(parts[3], 16),
lma = int(parts[4], 16),
offset = int(parts[5], 16),
)
self.sections[sec['name']] = sec
def readSymbols(self, path):
"""Read symbols.txt"""
with open(path) as listFile:
while True:
line = listFile.readline()
if line == '': break # end of file
line = line.split()
if len(line) < 4: continue
address = int(line[0], base=16) & 0xFFFFFFFF
size = int(line[1], base=16)
symbol = dict(
address = address,
name = line[3],
size = size,
)
self.symbols[address] = symbol
def readPatches(self, listPath, binPath):
"""Read patches.txt"""
self.binFile = open(binPath, 'rb')
startAddr = None
with open(listPath) as listFile:
while True:
line = listFile.readline()
if line == '': break # end of file
# nm gives us memory addresses, we need to convert those
# to section-relative offsets
address = int(line.split()[0], base=16)
if startAddr is None: startAddr = address
self.patches.append(address - startAddr)
# add the end of the binFile as the last offset
self.binFile.seek(0, 2)
binSize = self.binFile.tell()
self.patches.append(binSize)
def applyPatchPtr(self, patchOffs, data, target, elf):
address = self.bytes2int(data)
symbol = self.symbols[address]
offset = address - self.sections['.text']['vma'] # XXX lma?
if offset < 0:
raise ValueError(
"Symbol %s is not in .text section" % symbol['name'])
offset += self.sections['.text']['offset']
print("ptr: symbol %s at 0x%X => 0x%X, size 0x%X, target 0x%X" % (
symbol['name'], address, offset, symbol['size'], patchOffs ))
elf.seek(offset, 0)
data = elf.read(symbol['size'])
target.seek(patchOffs)
target.write(data)
def applyPatch(self, idx, target, elf):
startOffs, endOffs = self.patches[idx], self.patches[idx+1]
size = endOffs - startOffs
# read patch data
self.binFile.seek(startOffs, 0)
data = self.binFile.read(size)
offset, patchType = struct.unpack_from('>II', data)
data = data[8:]
print("patch: %-5s at 0x%08X => %s" % (
self.patchTypes[patchType], offset, data.hex()))
target.seek(offset, 0)
if patchType == self.PATCH_TYPE_BYTES:
target.write(data)
elif patchType == self.PATCH_TYPE_JUMP:
target.write(self.makeJump(self.bytes2int(data)))
elif patchType == self.PATCH_TYPE_JAL:
target.write(self.makeJAL(self.bytes2int(data)))
elif patchType == self.PATCH_TYPE_PTR:
self.applyPatchPtr(offset, data, target, elf)
else:
raise ValueError("Unknown patch type: " + str(patchType))
def applyPatches(self, target, elf):
for i in range(len(self.patches) - 1):
self.applyPatch(i, target, elf)
def main(targetPath, patchPath, elfPath):
print("patch " + str(targetPath) + " with " + str(patchPath))
p = Patcher()
p.readPatches (patchPath + '/patches.txt', patchPath + '/patches.bin')
p.readSections(patchPath + '/sections.txt')
p.readSymbols (patchPath + '/symbols.txt')
with open(targetPath, 'r+b') as target:
with open(elfPath, 'rb') as elf:
p.applyPatches(target, elf)
return 0
if __name__ == '__main__':
sys.exit(main(*sys.argv[1:]))
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Rainbow."""
# pylint: disable=g-bad-import-order
from absl import flags
from absl.testing import flagsaver
from dqn_zoo.rainbow_flare import run_atari
from absl.testing import absltest
FLAGS = flags.FLAGS
class RunAtariFlareTest(absltest.TestCase):
@flagsaver.flagsaver
def test_can_run_agent(self):
FLAGS.environment_name = 'pong'
FLAGS.replay_capacity = 1000
FLAGS.target_network_update_period = 3
FLAGS.num_train_frames = 100
FLAGS.num_eval_frames = 50
FLAGS.num_iterations = 3
FLAGS.batch_size = 10
FLAGS.learn_period = 2
run_atari.main(None)
if __name__ == '__main__':
absltest.main()
|
from flask import render_template, session, redirect, url_for, current_app, abort,flash, request, make_response
from .. import db
from ..models import User, Role, Post, Permission, Comment
from ..email import send_email
from . import main
from .forms import EditProfileForm, EditProfileAdminForm, PostForm, CommentForm
#from app.main.forms import NameForm
from flask_login import login_required, current_user
from app.decorators import admin_required, permission_required
@main.after_app_request
def after_request(response):
from flask_sqlalchemy import get_debug_queries
for query in get_debug_queries():
if query.duration >= current_app.config['WEBLOG_SLOW_DB_QUERY_TIME']:
current_app.logger.warning(
'Slow query: %s\nParameters: %s\nDuration: %fs\nContext: %s\n'
% (query.statement, query.parameters, query.duration,
query.context))
return response
@main.route('/shutdown')
def server_shutdown():
"""
testing
The testing flag. Set this to True to enable the test mode of Flask extensions (and in the future probably also Flask itself).
For example this might activate unittest helpers that have an additional runtime cost which should not be enabled by default.
If this is enabled and PROPAGATE_EXCEPTIONS is not changed from the default it’s implicitly enabled.
This attribute can also be configured from the config with the TESTING configuration key. Defaults to False.
"""
if not current_app.testing:
abort(404)
shutdown = request.environ.get('werkzeug.server.shutdown')
if not shutdown:
abort(500)
shutdown()
return 'Shutting down...'
"""
@main.route('/', methods=['GET', 'POST'])
def index():
form = NameForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.name.data).first()
if user is None:
user = User(username=form.name.data)
db.session.add(user)
db.session.commit()
session['known'] = False
if current_app.config['WEBLOG_ADMIN']:
send_email(current_app.config['WEBLOG_ADMIN'], 'New User',
'mail/new_user', user=user)
else:
session['known'] = True
session['name'] = form.name.data
return redirect(url_for('.index'))
return render_template('index.html',
form=form, name=session.get('name'),
known=session.get('known', False))
"""
@main.route('/', methods=['GET', 'POST'])
def index():
form = PostForm()
if current_user.can(Permission.WRITE) and \
form.validate_on_submit():
post = Post(body=form.body.data,
author=current_user._get_current_object())
db.session.add(post)
db.session.commit()
return redirect(url_for('.index'))
show_followed = False
if current_user.is_authenticated:
show_followed = bool(request.cookies.get('show_followed', ''))
if show_followed:
query = current_user.followed_posts
else:
query = Post.query
page = request.args.get('page', 1, type=int)
pagination = query.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['WEBLOG_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
return render_template('index.html', form=form, posts=posts,
show_followed=show_followed, pagination=pagination)
#posts = query.order_by(Post.timestamp.desc()).all()
#return render_template('index.html', form=form, posts=posts, show_followed=show_followed)
@main.route('/all')
@login_required
def show_all():
resp = make_response(redirect(url_for('.index')))
resp.set_cookie('show_followed', '', max_age=30*24*60*60)
return resp
@main.route('/followed')
@login_required
def show_followed():
resp = make_response(redirect(url_for('.index')))
resp.set_cookie('show_followed', '1', max_age=30*24*60*60)
return resp
@main.route('/user/<username>')
def user(username):
user = User.query.filter_by(username=username).first()
if user is None:
abort(404)
page = request.args.get('page', 1, type=int)
pagination = user.posts.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['WEBLOG_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
return render_template('user.html', user=user, posts=posts,
pagination=pagination)
#posts = user.posts.order_by(Post.timestamp.desc()).all()
#return render_template('user.html', user=user, posts=posts)
@main.route('/edit-profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm()
if form.validate_on_submit():
current_user.name = form.name.data
current_user.location = form.location.data
current_user.about_me = form.about_me.data
db.session.add(current_user)
db.session.commit()
flash('Your profile has been updated.')
return redirect(url_for('.user', username=current_user.username))
form.name.data = current_user.name
form.location.data = current_user.location
form.about_me.data = current_user.about_me
return render_template('edit_profile.html', form=form)
@main.route('/edit-profile/<int:id>', methods=['GET', 'POST'])
@login_required
@admin_required
def edit_profile_admin(id):
user = User.query.get_or_404(id)
form = EditProfileAdminForm(user=user)
if form.validate_on_submit():
user.email = form.email.data
user.username = form.username.data
user.confirmed = form.confirmed.data
user.role = Role.query.get(form.role.data)
user.name = form.name.data
user.location = form.location.data
user.about_me = form.about_me.data
db.session.add(user)
db.session.commit()
flash('The profile has been updated.')
return redirect(url_for('.user', username=user.username))
form.email.data = user.email
form.username.data = user.username
form.confirmed.data = user.confirmed
form.role.data = user.role_id
form.name.data = user.name
form.location.data = user.location
form.about_me.data = user.about_me
return render_template('edit_profile.html', form=form, user=user)
#@main.route('/post/<int:id>')
#def post(id):
# post = Post.query.get_or_404(id)
# return render_template('post.html', posts=[post])
@main.route('/post/<int:id>', methods=['GET', 'POST'])
def post(id):
post = Post.query.get_or_404(id)
form = CommentForm()
if form.validate_on_submit():
comment = Comment(body=form.body.data,
post=post,
author=current_user._get_current_object())
db.session.add(comment)
db.session.commit()
flash('Your comment has been published.')
return redirect(url_for('.post', id=post.id, page=-1))
page = request.args.get('page', 1, type=int)
if page == -1:
page = (post.comments.count() - 1) // \
current_app.config['WEBLOG_COMMENTS_PER_PAGE'] + 1
pagination = post.comments.order_by(Comment.timestamp.asc()).paginate(
page, per_page=current_app.config['WEBLOG_COMMENTS_PER_PAGE'],
error_out=False)
comments = pagination.items
return render_template('post.html', posts=[post], form=form,
comments=comments, pagination=pagination)
@main.route('/edit-post/<int:id>', methods=['GET', 'POST'])
@login_required
def edit_post(id):
post = Post.query.get_or_404(id)
if post.author != current_user and not current_user.can(Permission.ADMIN):
abort(403)
form = PostForm()
if form.validate_on_submit():
post.body = form.body.data
db.session.add(post)
db.session.commit()
flash('The post has been updated.')
return redirect(url_for('.post', id=post.id))
form.body.data = post.body
return render_template('edit_post.html', form=form)
@main.route('/follow/<username>')
@login_required
@permission_required(Permission.FOLLOW)
def follow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
if current_user.is_following(user):
flash('You are already following this user.')
return redirect(url_for('.user', username=username))
current_user.follow(user)
db.session.commit()
flash('You are now following %s.' % username)
return redirect(url_for('.user', username=username))
@main.route('/unfollow/<username>')
@login_required
@permission_required(Permission.FOLLOW)
def unfollow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
if not current_user.is_following(user):
flash('You are not following this user.')
return redirect(url_for('.user', username=username))
current_user.unfollow(user)
db.session.commit()
flash('You are not following %s anymore.' % username)
return redirect(url_for('.user', username=username))
@main.route('/followers/<username>')
def followers(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
pagination = user.followers.paginate(
page, per_page=current_app.config['WEBLOG_FOLLOWERS_PER_PAGE'],
error_out=False)
follows = [{'user': item.follower, 'timestamp': item.timestamp}
for item in pagination.items]
return render_template('followers.html', user=user, title="Followers of",
endpoint='.followers', pagination=pagination,
follows=follows)
@main.route('/followed-by/<username>')
def followed_by(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
pagination = user.followed.paginate(
page, per_page=current_app.config['WEBLOG_FOLLOWERS_PER_PAGE'],
error_out=False)
follows = [{'user': item.followed, 'timestamp': item.timestamp}
for item in pagination.items]
return render_template('followers.html', user=user, title="Followed by",
endpoint='.followed_by', pagination=pagination,
follows=follows)
@main.route('/moderate')
@login_required
@permission_required(Permission.MODERATE)
def moderate():
page = request.args.get('page', 1, type=int)
pagination = Comment.query.order_by(Comment.timestamp.desc()).paginate(
page, per_page=current_app.config['WEBLOG_COMMENTS_PER_PAGE'],
error_out=False)
comments = pagination.items
return render_template('moderate.html', comments=comments,
pagination=pagination, page=page)
@main.route('/moderate/enable/<int:id>')
@login_required
@permission_required(Permission.MODERATE)
def moderate_enable(id):
comment = Comment.query.get_or_404(id)
comment.disabled = False
db.session.add(comment)
db.session.commit()
return redirect(url_for('.moderate',
page=request.args.get('page', 1, type=int)))
@main.route('/moderate/disable/<int:id>')
@login_required
@permission_required(Permission.MODERATE)
def moderate_disable(id):
comment = Comment.query.get_or_404(id)
comment.disabled = True
db.session.add(comment)
db.session.commit()
return redirect(url_for('.moderate',
page=request.args.get('page', 1, type=int)))
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, VHRS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class NeighbourhoodCheck1(Document):
pass
|
import chess
import math
from colorama import Fore, Style, Back
import inquirer
from enum import Flag
import search
import paint
import networkx as nx
import graph
class Player(Flag):
COMPUTER = True
HUMAN = False
def ask_player_type(color: chess.Color) -> Player:
questions = [
inquirer.List(
"pieces",
message=f"Who controls {player_string(color)}?",
choices=["computer", "human"],
)
]
is_computer = inquirer.prompt(questions)["pieces"] == "computer"
return Player.COMPUTER if is_computer else Player.HUMAN
def input_depth():
while True:
try:
return int(input("How many moves to look ahead? "))
except:
error("Not a number.")
def error(message: str):
print(f"{Fore.RED}{message}{Style.RESET_ALL}")
def info(message: str):
print(f"{Fore.YELLOW}{message}{Style.RESET_ALL}")
def player_string(color: chess.Color):
if color == chess.WHITE:
return f"{Back.WHITE}{Fore.BLACK} white {Style.RESET_ALL}"
else:
return f"{Back.BLACK}{Fore.WHITE} black {Style.RESET_ALL}"
def input_move(board: chess.Board):
uci = input(f"Which move to play for {player_string(board.turn)}? ")
try:
board.push_san(uci)
print("")
print_board(board)
except ValueError:
error("Not a (legal) move. Try again!\n")
input_move(board)
def auto_move(board: chess.Board, depth, plot=False):
print(f"Calculating best move for {player_string(board.turn)} ...")
if plot:
G = nx.DiGraph()
move = search.best_move(board, depth, G=G)[0]
graph.plot(G, save_as=f"depth_{depth}_{len(list(G.edges()))}_sort9.pdf")
exit(1)
else:
move = search.best_move(board, depth)[0]
if move == None:
info(f"No legal move left.")
exit(0)
board.push(move)
print("")
print_board(board)
def print_board(board: chess.Board):
painted = paint.paint_squares_and_pieces(str(board))
print(painted, "\n")
|
import logging
import pytest
from sanic.signals import RESERVED_NAMESPACES
from sanic.touchup import TouchUp
def test_touchup_methods(app):
assert len(TouchUp._registry) == 9
@pytest.mark.parametrize(
"verbosity,result", ((0, False), (1, False), (2, True), (3, True))
)
async def test_ode_removes_dispatch_events(app, caplog, verbosity, result):
with caplog.at_level(logging.DEBUG, logger="sanic.root"):
app.state.verbosity = verbosity
await app._startup()
logs = caplog.record_tuples
for signal in RESERVED_NAMESPACES["http"]:
assert (
(
"sanic.root",
logging.DEBUG,
f"Disabling event: {signal}",
)
in logs
) is result
|
from Motor import MotorADC
class Master:
__instance = None
def __init__(self):
#self.status_vector = dict()
#self.command_vector = dict()
self.motor_adc = MotorADC()
Master.__instance = self
@staticmethod
def get_instance():
if Master.__instance is None:
Master()
return Master.__instance
def start(self):
while True:
count_steps = input("give steps - ONLY INTEGER\n")
count_steps = int(count_steps)
direction = input("give 0 (anti-clockwise) or 1 (clockwise)\n")
direction = int(direction)
self.motor_adc.act(count_steps, direction)
if __name__ == "__main__":
print("""
This is a program to test only ADC MOTOR control.
Use commands:
[+] ACT
[+] an integer (e.x 5) # steps for motor - after ACT
[+] 1 or 0 # for clockwise or anti-clockwise of motor - after ACT n' steps
[+] KILL #to kill program
""")
Master().start()
|
import numpy as np
from ceres.constants import muSun, AU, SPD
from ceres.orbits import TwoBody
# Orbital elements for CERES (from SBDB):
a = 2.766043062222408*AU
e = 0.07850100198908602
i = np.deg2rad(10.58769305845201)
peri = np.deg2rad(73.63703979153577)
RAAN = np.deg2rad(80.26859547732911)
M = np.deg2rad(291.3755993017663)
epoch = 2459600.5*SPD
# Create the orbit instance:
orbit = TwoBody(muSun,'elements',np.array([a,e,i,peri,RAAN,M]),epoch)
# Desired epoch:
t = 2459499.500000000*SPD
r_truth = np.array([2.377195163333079E+08, 3.403674324622315E+08, -3.304222059618232E+07])
v_truth = np.array([-1.499949963514263E+01, 9.070482555326418E+00, 3.050109354260248E+00])
state = orbit.states(t)
print(state)
|
str_N = input("Please enter a number to find summation of 1..N: ")
N = int(str_N) + 1
total = 0
for n in range(3,N,3):
total = total + n
print(total)
|
class Pessoa:
olhos = 2
def __init__(self, *filhos, nome=None, idade=35):
self.idade = idade
self.nome = nome
self.filhos = list(filhos)
def cumprimentar(self):
return f'olá {id(self)}'
@staticmethod
def metodo_estatico():
return 42
@classmethod
def nome_e_atributos_de_classe(cls):
return f'{cls} - olhos {cls.olhos} '
if __name__ == '__main__':
renzo = Pessoa(nome='renzo')
luciano = Pessoa(renzo, nome='luciano')
print(Pessoa.cumprimentar(luciano))
print(id(luciano))
print(luciano.cumprimentar())
print(luciano.nome)
print(luciano.idade)
for filho in luciano.filhos:
print(filho.nome)
luciano.sobrenome= 'Ramalho'
del luciano.filhos
luciano.olhos = 1
del luciano.olhos
print(renzo.__dict__)
print(luciano.__dict__)
Pessoa.olhos = 3
print(Pessoa.olhos)
print(luciano.olhos)
print(renzo.olhos)
print(id(Pessoa.olhos)), id(luciano.olhos), id(renzo.olhos)
print(Pessoa.metodo_estatico(), luciano.metodo_estatico())
print(Pessoa.nome_e_atributos_de_classe(), luciano.metodo_estatico())
|
#!/usr/bin/env python
# coding: utf-8
"""
The approach taken is explained below. I decided to do it simply.
Initially I was considering parsing the data into some sort of
structure and then generating an appropriate README. I am still
considering doing it - but for now this should work. The only issue
I see is that it only sorts the entries at the lowest level, and that
the order of the top-level contents do not match the order of the actual
entries.
This could be extended by having nested blocks, sorting them recursively
and flattening the end structure into a list of lines. Revision 2 maybe ^.^.
"""
def sort_blocks():
# First, we load the current README into memory
with open('README.md', 'r') as read_me_file:
read_me = read_me_file.read()
# Separating the 'table of contents' from the contents (blocks)
table_of_contents = ''.join(read_me.split('- - -')[0])
blocks = ''.join(read_me.split('- - -')[1]).split('\n# ')
for i in range(len(blocks)):
if i == 0:
blocks[i] = blocks[i] + '\n'
else:
blocks[i] = '# ' + blocks[i] + '\n'
# Sorting the libraries
inner_blocks = sorted(blocks[0].split('##'))
for i in range(1, len(inner_blocks)):
if inner_blocks[i][0] != '#':
inner_blocks[i] = '##' + inner_blocks[i]
inner_blocks = ''.join(inner_blocks)
# Replacing the non-sorted libraries by the sorted ones and gathering all at the final_README file
blocks[0] = inner_blocks
final_README = table_of_contents + '- - -' + ''.join(blocks)
with open('README.md', 'w+') as sorted_file:
sorted_file.write(final_README)
def main():
# First, we load the current README into memory as an array of lines
with open('README.md', 'r') as read_me_file:
read_me = read_me_file.readlines()
# Then we cluster the lines together as blocks
# Each block represents a collection of lines that should be sorted
# This was done by assuming only links ([...](...)) are meant to be sorted
# Clustering is done by indentation
blocks = []
last_indent = None
for line in read_me:
s_line = line.lstrip()
indent = len(line) - len(s_line)
if any([s_line.startswith(s) for s in ['* [', '- [']]):
if indent == last_indent:
blocks[-1].append(line)
else:
blocks.append([line])
last_indent = indent
else:
blocks.append([line])
last_indent = None
with open('README.md', 'w+') as sorted_file:
# Then all of the blocks are sorted individually
blocks = [
''.join(sorted(block, key=str.lower)) for block in blocks
]
# And the result is written back to README.md
sorted_file.write(''.join(blocks))
# Then we call the sorting method
sort_blocks()
if __name__ == "__main__":
main()
|
from typing import TextIO
from .datatypes import OsuFile
from .sections import Metadata, TimingPoints, HitObjects, Events, Colours, make_default_metadata_sections
from .combinator import ParserPair
from .utils import spliton
class Parser:
# base parsers
def parse_bool(self, x): return bool(int(x))
def write_bool(self, x): return str(int(x))
def parse_int(self, x): return int(round(float(x)))
def write_int(self, x): return str(int(x))
def parse_float(self, x): return float(x)
def write_float(self, x): return str(x)
def __init__(self):
# lookup tables are created in the constructor rather than as static variables
# to allow for inheritance (if "parse_int" is changed in a subclass, the base class should use the subclass's implementation)
# (need a reference to 'self')
# could use metaclasses to generate the lookup table but it makes things complicated
# Place the base parsing functions in the main parser for now
self.init_base_parser()
base_parser = self
self.sections = {
**make_default_metadata_sections(base_parser),
'HitObjects': HitObjects(base_parser),
'TimingPoints': TimingPoints(base_parser),
'Events': Events(base_parser),
'Colours': Colours(base_parser),
}
def init_base_parser(self):
self.osu_int = ParserPair(self.parse_int, self.write_int)
self.osu_float = ParserPair(self.parse_float, self.write_float)
self.osu_bool = ParserPair(self.parse_bool, self.write_bool)
self.osu_str = ParserPair(str,str)
def parse(self, obj):
'Helper function to call osufile.parse using this parser'
from .base import parse
return parse(obj, parser=self)
def write(self, obj, osu):
'Helper function to call osufile.write using this parser'
from .base import write
return write(obj, osu, parser=self)
def _parse(self, file: TextIO) -> OsuFile:
"""
Parse a .osu file from a file object
Returns an OsuFile
"""
def sections(file):
'Returns iterator of (section name, iterator of lines in section)'
for section,lines in spliton(map(str.strip, file), lambda line: line.startswith('[')):
if section is None: continue # ignore everything before the first section
section = section[1:-1]
yield section,lines
osu = OsuFile()
header = next(file).strip()
osu.header = header
for section,lines in sections(file):
if section in self.sections:
try:
osu[section] = self.sections[section].parse(section, lines)
except Exception as ex:
raise ValueError(f"Error parsing section {section!r}") from ex
else:
osu[section] = list(lines)
return osu
def _write(self, file: TextIO, osu: OsuFile) -> None:
file.write('osu file format v14' + '\n') # output is written in v14 format
for section in osu.keys():
file.write('\n') #newline to make the formatting look good
file.write(f'[{section}]\n')
if section in self.sections:
try:
self.sections[section].write(file, section, osu[section])
except Exception as ex:
raise ValueError(f"Error writing section {section!r}") from ex
else:
for line in osu[section]:
file.write(line + '\n')
|
from unittest.mock import patch
PLAYERS = ['Player 1', 'Player 2']
def test_print_intro(capsys):
print_intro()
capture = capsys.readouterr()
assert capture.out == "...rock...\n...paper...\n...scissors...\n"
def test_player_1_input():
with patch('builtins.input', return_value='rock'):
test_name = 'Player 1'
assert get_player_input(test_name) == 'rock'
def test_get_input():
test_inputs = ['rock', 'paper']
with patch('builtins.input', side_effect=test_inputs):
assert get_inputs()[PLAYERS[0]] == test_inputs[0]
def test_same_shoot():
test_inputs = ['rock', 'rock']
with patch('builtins.input', side_effect=test_inputs):
assert get_winner() == "Tie"
def test_player_1_wins():
test_inputs = ['rock', 'scissors']
with patch('builtins.input', side_effect=test_inputs):
assert get_winner() == PLAYERS[0]
def test_player_2_wins():
test_inputs = ['scissors', 'rock']
with patch('builtins.input', side_effect=test_inputs):
assert get_winner() == PLAYERS[1]
def test_player_1_wins_scissors():
test_inputs = ['scissors', 'paper']
with patch('builtins.input', side_effect=test_inputs):
assert get_winner() == PLAYERS[0]
def test_incorrect_input():
test_inputs = ['dfgkljdg', 'kdjfhgk']
with patch('builtins.input', side_effect=test_inputs):
assert get_winner() is None
def print_intro():
print('...rock...')
print('...paper...')
print('...scissors...')
def get_player_input(player_name):
player_input = input("(enter " + str(player_name) + "'s choice): ")
return player_input
def get_inputs():
player_inputs = {}
for name in PLAYERS:
player_inputs[name] = get_player_input(name)
print("SHOOT!")
return player_inputs
def get_winner():
inputs = list(get_inputs().values())
winner = None
if inputs[0] == inputs[1]:
winner = 'Tie'
elif inputs[0] == "rock":
if inputs[1] == "scissors":
winner = PLAYERS[0]
if inputs[1] == "paper":
winner = PLAYERS[1]
elif inputs[0] == "paper":
if inputs[1] == "rock":
winner = PLAYERS[0]
if inputs[1] == "scissors":
winner = PLAYERS[1]
elif inputs[0] == "scissors":
if inputs[1] == "rock":
winner = PLAYERS[1]
if inputs[1] == "paper":
winner = PLAYERS[0]
else:
print("Something went wrong.")
print_winner(winner)
return winner
def print_winner(winner):
if winner == 'Tie':
print("It's a tie!")
elif winner is not None:
print(f"{winner} wins!")
else:
print("There should always be a winner, something must have gone wrong.")
get_winner()
|
from .test_billable_hours import TestBillableHours
from .test_hourly import TestHourlyReport
from .test_payroll import PayrollTest
from .test_productivity import TestProductivityReport
|
from flaskdocs import mail, twilio
from flask_mail import Message
def send_email_to_staff(staff, document, daysleft):
msg = Message(f'Уведомление о документе - {document.name}', sender="noreply@dochub.info", recipients=[staff.email])
msg.body = f'''{staff.first_name} {staff.second_name}, Ваш {document.name} истекает {document.expiration_date.format("DD.MM.YYYY")}
Осталось дней {daysleft}
'''
mail.send(msg)
def send_email_to_group(group, daysleft, document, staff):
receivers = []
for user in group.user_count:
if user.use_email:
receivers.append(user.email)
msg = Message(f'Уведомление о документе - {document.name}', sender="noreply@dochub.info", recipients=receivers)
msg.body = f'''Документ '{document.name}'' который принадлежит работнику - {staff.first_name} {staff.second_name}
Истекает {document.expiration_date.format("DD.MM.YYYY")}
Осталось дней {daysleft}
Контакты работника:
Телефон {staff.phone.e164}
Email {staff.email}
'''
if receivers:
mail.send(msg)
def send_sms_to_staff(staff, document, daysleft):
try:
body = f'''{staff.first_name} {staff.second_name}, Ваш {document.name} истекает {document.expiration_date.format("DD.MM.YYYY")}
Осталось дней {daysleft}
'''
twilio.message(body=body, to=staff.phone.e164)
except:
pass
def send_sms_to_group(group, daysleft, document, staff):
for user in group.user_count:
if user.use_phone:
try:
body = f'''Документ '{document.name}'' который принадлежит работнику - {staff.first_name} {staff.second_name}
Истекает {document.expiration_date.format("DD.MM.YYYY")}
Осталось дней {daysleft}
Контакты работника:
Телефон {staff.phone.e164}
Email {staff.email}
'''
twilio.message(body=body, to=user.phone.e164)
except:
pass
|
import os
from googleapiclient.discovery import build
import googleapiclient.errors
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from youtube_dl import YoutubeDL
import constants
from parameter_store import ParameterStore
class YoutubeClient:
"""
The Youtube client class used to interface with the Youtube API.
"""
def __init__(self):
self._api_service_name = "youtube"
self._api_version = "v3"
# The scopes of permissions request from the user
self._scopes = ["https://www.googleapis.com/auth/youtube.readonly"]
# Parameter store to get and update Spotify secrets
self._parameter_store = ParameterStore('Youtube', constants.YOUTUBE_SECRETS)
self._credentials = self._init_credentials()
self._client = self._init_youtube_client()
def _init_credentials(self):
"""
Instantiates and returns a Credentials object. This is used to
instantiate the Youtube API client, and to refresh the Youtube
access token on expiration.
"""
youtube_secrets = self._parameter_store.get_secrets()
return Credentials (
token=youtube_secrets.get("access_token"),
refresh_token=youtube_secrets.get("refresh_token"),
token_uri=youtube_secrets.get("token_uri"),
client_id=youtube_secrets.get("client_id"),
client_secret=youtube_secrets.get("client_secret"),
scopes=self._scopes
)
def _init_youtube_client(self):
"""
Instantiates and returns a Youtube API client.
"""
return build(
self._api_service_name,
self._api_version,
credentials=self._credentials,
cache_discovery=False
)
def refresh(self):
"""
Refreshes the Youtube access token.
"""
self._credentials.refresh(Request())
self._parameter_store.update_secrets({
"access_token": self._credentials.token,
"refresh_token": self._credentials.refresh_token
})
self._client = build(
self._api_service_name,
self._api_version,
credentials=self._credentials
)
def get_liked_videos(self, pageToken = None):
"""
Returns the provided page of the user's liked Youtube videos
"""
request = self._client.videos().list(
part="snippet",
maxResults=10,
myRating="like",
pageToken=pageToken,
fields="items(id,snippet.title),nextPageToken"
)
return request.execute()
def get_valid_songs(self, response, recent_video_id):
"""
Iterates through the provided liked videos response from the Youtube
API, and uses YoutubeDL to parse out the videos that are music
tracks.
"""
valid_songs = []
already_processed = False
ydl_opts = {
'skip_download': True,
'quiet': True,
'no_warnings': True
}
for item in response["items"]:
if item["id"] == recent_video_id:
print("[NOTICE] Reached already processed video.")
already_processed = True
break
youtube_url = "https://www.youtube.com/watch?v={}".format(
item["id"]
)
try:
# Get a Youtube video's info
video = YoutubeDL(ydl_opts).extract_info(
youtube_url,
download=False
)
except:
continue
song_name = video["track"]
artist = video["artist"]
if song_name and artist:
# If the video is a music track, add it to the valid songs array
valid_songs.append ({
"title": song_name,
"artist": artist
})
return valid_songs, already_processed
def store_recent_video_id(self, video_id):
"""
Stores the video id of the most recently liked video.
"""
self._parameter_store.update_secrets({
"recent_video_id": video_id
})
def get_recent_video_id(self):
"""
Returns the video id of the most recently liked video.
"""
youtube_secrets = self._parameter_store.get_secrets()
return youtube_secrets.get("recent_video_id")
|
"""
This file offers the methods to automatically retrieve the graph Clostridium tetanomorphum.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def ClostridiumTetanomorphum(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Clostridium tetanomorphum graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Clostridium tetanomorphum graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="ClostridiumTetanomorphum",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import uuid
from pgadmin.browser.server_groups.servers.databases.schemas.tests import \
utils as schema_utils
from pgadmin.browser.server_groups.servers.databases.tests import \
utils as database_utils
from pgadmin.utils import server_utils as server_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression import trigger_funcs_utils
from regression.python_test_utils import test_utils as utils
from . import utils as event_trigger_utils
from unittest.mock import patch
class EventTriggerDeleteTestCase(BaseTestGenerator):
""" This class will delete added event trigger under test database. """
scenarios = utils.generate_scenarios('delete_event_trigger',
event_trigger_utils.test_cases)
def setUp(self):
self.schema_data = parent_node_dict['schema'][-1]
self.server_id = self.schema_data['server_id']
self.db_id = self.schema_data['db_id']
self.schema_name = self.schema_data['schema_name']
self.schema_id = self.schema_data['schema_id']
self.extension_name = "postgres_fdw"
self.db_name = parent_node_dict["database"][-1]["db_name"]
self.db_user = self.server["username"]
self.func_name = "trigger_func_%s" % str(uuid.uuid4())[1:8]
self.trigger_name = "event_trigger_delete_%s" % (
str(uuid.uuid4())[1:8])
server_con = server_utils.connect_server(self, self.server_id)
if not server_con["info"] == "Server connected.":
raise Exception("Could not connect to server to add resource "
"groups.")
server_version = 0
if "type" in server_con["data"]:
if server_con["data"]["version"] < 90300:
message = "Event triggers are not supported by PG9.2 " \
"and PPAS9.2 and below."
self.skipTest(message)
self.function_info = trigger_funcs_utils.create_trigger_function(
self.server, self.db_name, self.schema_name, self.func_name,
server_version)
self.event_trigger_id = event_trigger_utils.create_event_trigger(
self.server, self.db_name, self.schema_name, self.func_name,
self.trigger_name)
def delete_event_trigger(self):
"""
This function returns the event trigger delete response
:return: event trigger delete response
"""
return self.tester.delete(
self.url + str(utils.SERVER_GROUP) + '/' +
str(self.server_id) + '/' +
str(self.db_id) + '/' +
str(self.event_trigger_id),
follow_redirects=True)
def runTest(self):
""" This function will delete event trigger under test database. """
db_con = database_utils.connect_database(self, utils.SERVER_GROUP,
self.server_id, self.db_id)
if not db_con['data']["connected"]:
raise Exception("Could not connect to database.")
schema_response = schema_utils.verify_schemas(self.server,
self.db_name,
self.schema_name)
if not schema_response:
raise Exception("Could not find the schema.")
func_name = self.function_info[1]
func_response = trigger_funcs_utils.verify_trigger_function(
self.server,
self.db_name,
func_name)
if not func_response:
raise Exception("Could not find the trigger function.")
trigger_response = event_trigger_utils.verify_event_trigger(
self.server, self.db_name,
self.trigger_name)
if not trigger_response:
raise Exception("Could not find event trigger.")
actual_response_code = True
expected_response_code = False
if self.is_positive_test:
response = self.delete_event_trigger()
actual_response_code = response.status_code
expected_response_code = self.expected_data['status_code']
else:
if hasattr(self, "error_deleting_event_trigger"):
with patch(self.mock_data["function_name"],
return_value=eval(self.mock_data["return_value"])):
response = self.delete_event_trigger()
actual_response_code = response.status_code
expected_response_code = self.expected_data['status_code']
if hasattr(self, "error_deleting_created_event_trigger"):
with patch(self.mock_data["function_name"],
side_effect=eval(self.mock_data["return_value"])):
response = self.delete_event_trigger()
actual_response_code = response.status_code
expected_response_code = self.expected_data['status_code']
if hasattr(self, "wrong_event_trigger_id"):
self.event_trigger_id = 99999
response = self.delete_event_trigger()
actual_response_code = response.status_code
expected_response_code = self.expected_data['status_code']
self.assertEqual(actual_response_code, expected_response_code)
def tearDown(self):
# Disconnect the database
database_utils.disconnect_database(self, self.server_id, self.db_id)
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Multinomial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import timeit
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
def composed_sampler(logits, num_samples):
# [batch size, num classes, num samples]
unif = random_ops.random_uniform(logits.get_shape().concatenate(
tensor_shape.TensorShape([num_samples])))
noise = -math_ops.log(-math_ops.log(unif))
# [batch size, num classes, 1]
logits = array_ops.expand_dims(logits, -1)
# [batch size, num samples]
return math_ops.argmax(logits + noise, axis=1)
native_sampler = random_ops.multinomial
class MultinomialTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testSmallEntropy(self):
random_seed.set_random_seed(1618)
for output_dtype in [np.int32, np.int64]:
with test_util.device(use_gpu=True):
# A logit value of -10 corresponds to a probability of ~5e-5.
logits = constant_op.constant([[-10., 10., -10.], [-10., -10., 10.]])
num_samples = 1000
samples = self.evaluate(random_ops.multinomial(
logits, num_samples, output_dtype=output_dtype))
self.assertAllEqual([[1] * num_samples, [2] * num_samples], samples)
def testOneOpMultipleStepsIndependent(self):
with test_util.use_gpu():
sample_op1, _ = self._make_ops(10)
# Consecutive runs shouldn't yield identical output.
sample1a = self.evaluate(sample_op1)
sample1b = self.evaluate(sample_op1)
self.assertFalse(np.equal(sample1a, sample1b).all())
def testEagerOneOpMultipleStepsIndependent(self):
with context.eager_mode(), test_util.device(use_gpu=True):
sample1, sample2 = self._make_ops(10)
# Consecutive runs shouldn't yield identical output.
self.assertFalse(np.equal(sample1.numpy(), sample2.numpy()).all())
def testTwoOpsIndependent(self):
with test_util.use_gpu():
sample_op1, sample_op2 = self._make_ops(32)
sample1, sample2 = self.evaluate([sample_op1, sample_op2])
# We expect sample1 and sample2 to be independent.
# 1 in 2^32 chance of this assertion failing.
self.assertFalse(np.equal(sample1, sample2).all())
def testTwoOpsSameSeedDrawSameSequences(self):
with test_util.use_gpu():
sample_op1, sample_op2 = self._make_ops(1000, seed=1)
sample1, sample2 = self.evaluate([sample_op1, sample_op2])
self.assertAllEqual(sample1, sample2)
def testLargeLogits(self):
for neg in [True, False]:
with test_util.use_gpu():
logits = np.array([[1000.] * 5])
if neg:
logits *= -1
samples = self.evaluate(random_ops.multinomial(logits, 10))
# Sampled classes should be in-range.
self.assertTrue((samples >= 0).all())
self.assertTrue((samples < 5).all())
def testSamplingCorrectness(self):
np.random.seed(1618) # Make it reproducible.
num_samples = 21000
rand_probs = self._normalize(np.random.random_sample((10,)))
rand_probs2 = self._normalize(np.random.random_sample((3, 5))) # batched
for probs in [[.5, .5], [.85, .05, .1], rand_probs, rand_probs2]:
probs = np.asarray(probs)
if len(probs.shape) == 1:
probs = probs.reshape(1, probs.size) # singleton batch
logits = np.log(probs).astype(np.float32)
composed_freqs = self._do_sampling(logits, num_samples, composed_sampler)
native_freqs = self._do_sampling(logits, num_samples, native_sampler)
# the test here is similar to core/lib/random/distribution_sampler_test.cc
composed_chi2 = self._chi2(probs, composed_freqs)
native_chi2 = self._chi2(probs, native_freqs)
composed_native_chi2 = self._chi2(composed_freqs, native_freqs)
def check(chi2s):
for chi2 in chi2s:
self.assertLess(chi2, 1e-3)
check(composed_chi2)
check(native_chi2)
check(composed_native_chi2)
def _make_ops(self, num_samples, seed=None):
prob_dist = constant_op.constant([[0.15, 0.5, 0.3, 0.05]])
logits = math_ops.log(prob_dist)
# Two independent sets of samples from the same distribution
sample_op1 = random_ops.multinomial(logits, num_samples, seed)
sample_op2 = random_ops.multinomial(logits, num_samples, seed)
return (sample_op1, sample_op2)
def _normalize(self, vec):
batched = (len(vec.shape) == 2)
return vec / vec.sum(axis=1, keepdims=True) if batched else vec / vec.sum()
def _do_sampling(self, logits, num_samples, sampler):
"""Samples using the supplied sampler and inputs.
Args:
logits: Numpy ndarray of shape [batch_size, num_classes].
num_samples: Int; number of samples to draw.
sampler: A sampler function that takes (1) a [batch_size, num_classes]
Tensor, (2) num_samples and returns a [batch_size, num_samples] Tensor.
Returns:
Frequencies from sampled classes; shape [batch_size, num_classes].
"""
with test_util.use_gpu():
random_seed.set_random_seed(1618)
op = sampler(constant_op.constant(logits), num_samples)
d = self.evaluate(op)
batch_size, num_classes = logits.shape
freqs_mat = []
for i in range(batch_size):
cnts = dict(collections.Counter(d[i, :]))
# Requires drawn class labels be in range.
self.assertLess(max(cnts.keys()), num_classes)
self.assertGreaterEqual(min(cnts.keys()), 0)
freqs = [(cnts[k] * 1. / num_samples if k in cnts else 0)
for k in range(num_classes)]
freqs_mat.append(freqs)
return freqs_mat
def _chi2(self, expected, actual):
actual = np.asarray(actual)
expected = np.asarray(expected)
diff = actual - expected
chi2 = np.sum(diff * diff / expected, axis=0)
return chi2
def testEmpty(self):
classes = 5
with test_util.use_gpu():
for batch in 0, 3:
for samples in 0, 7:
x = self.evaluate(
random_ops.multinomial(
array_ops.zeros([batch, classes]), samples))
self.assertEqual(x.shape, (batch, samples))
def testEmptyClasses(self):
with test_util.use_gpu():
x = random_ops.multinomial(array_ops.zeros([5, 0]), 7)
with self.assertRaisesOpError("num_classes should be positive"):
self.evaluate(x)
def testNegativeMinLogits(self):
random_seed.set_random_seed(78844)
with test_util.use_gpu():
logits = constant_op.constant([[np.finfo(np.float32).min] * 1023 + [0]])
num_samples = 1000
samples = self.evaluate(random_ops.multinomial(logits, num_samples))
self.assertAllEqual([[1023] * num_samples], samples)
# Benchmarking code
def native_op_vs_composed_ops(batch_size, num_classes, num_samples, num_iters):
np.random.seed(1618) # Make it reproducible.
shape = [batch_size, num_classes]
logits_np = np.random.randn(*shape).astype(np.float32)
# No CSE/CF.
optimizer_options = config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0)
config = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(
optimizer_options=optimizer_options))
with session.Session(config=config) as sess:
logits = constant_op.constant(logits_np, shape=shape)
native_op = control_flow_ops.group(native_sampler(logits, num_samples))
composed_op = control_flow_ops.group(composed_sampler(logits, num_samples))
native_dt = timeit.timeit(lambda: sess.run(native_op), number=num_iters)
composed_dt = timeit.timeit(lambda: sess.run(composed_op), number=num_iters)
return native_dt, composed_dt
class MultinomialBenchmark(test.Benchmark):
def benchmarkNativeOpVsComposedOps(self):
num_iters = 50
print("Composition of existing ops vs. Native Multinomial op [%d iters]" %
num_iters)
print("BatchSize\tNumClasses\tNumSamples\tsec(composed)\tsec(native)\t"
"speedup")
for batch_size in [32, 128]:
for num_classes in [10000, 100000]:
for num_samples in [1, 4, 32]:
n_dt, c_dt = native_op_vs_composed_ops(batch_size, num_classes,
num_samples, num_iters)
print("%d\t%d\t%d\t%.3f\t%.3f\t%.2f" % (batch_size, num_classes,
num_samples, c_dt, n_dt,
c_dt / n_dt))
self.report_benchmark(
name="native_batch%d_classes%d_s%d" %
(batch_size, num_classes, num_samples),
iters=num_iters,
wall_time=n_dt)
self.report_benchmark(
name="composed_batch%d_classes%d_s%d" %
(batch_size, num_classes, num_samples),
iters=num_iters,
wall_time=c_dt)
if __name__ == "__main__":
test.main()
|
def solution(A):
# write your code in Python 3.6
nums = set()
for n in A:
if n in nums:
nums.remove(n)
else:
nums.add(n)
return nums.pop()
|
import asyncio
import time
from multiprocessing import Queue
from threading import Thread
import pytest
from liualgotrader.common.types import QueueMapper, WSEventType
from liualgotrader.data.gemini import GeminiStream
gemini_stream: GeminiStream
queues: QueueMapper
stop: bool = False
@pytest.fixture
def event_loop():
global gemini_stream
global queues
loop = asyncio.get_event_loop()
queues = QueueMapper()
gemini_stream = GeminiStream(queues)
yield loop
loop.close()
def listener(q: Queue):
print("start listen", q)
while not stop:
try:
d = q.get(timeout=2)
print("got in q:", d)
except Exception as e:
print(e, "timeout...")
time.sleep(1)
print("end listen")
@pytest.mark.asyncio
@pytest.mark.devtest
async def test_apple_sec_agg():
global gemini_stream
global stop
await gemini_stream.run()
print("going to subscribe")
q = Queue()
queues["BTCUSD"] = q
running_task = Thread(
target=listener,
args=(q,),
)
print("start thread")
running_task.start()
print("started")
await asyncio.sleep(2)
status = await gemini_stream.subscribe(
["BTCUSD"], [WSEventType.MIN_AGG, WSEventType.TRADE]
)
print(f"subscribe result: {status}")
if not status:
raise AssertionError(f"Failed in gemini_stream.subscribe w/ {status}")
await asyncio.sleep(1 * 60)
await gemini_stream.close()
stop = True
running_task.join()
return True
|
from tracardi.domain.context import Context
from tracardi.domain.entity import Entity
from tracardi.domain.event import Event
from tracardi.domain.profile import Profile
from tracardi.domain.session import Session
from tracardi_plugin_sdk.service.plugin_runner import run_plugin
from datetime import datetime
from tracardi_message_send_plugin.plugin import MessageSender
init = {
"url": "http://localhost:12345",
"username": "admin",
"password": "admin"
}
payload = {
"profile_id": "3fa85f64-5717-4562-b3fc-2c963f66afa6",
"type": "user-consent",
"properties": {
"url": "mypage.com",
"event": {
"grant": "grant1",
"deny": "deny"
}
},
"call": {
"file": "x-file",
"func": "x-func"
},
"expire": "5m",
"postpone": "0m"
}
profile = Profile(id="profile-id")
event = Event(id="event-id",
type="event-type",
profile=profile,
session=Session(id="session-id"),
source=Entity(id="source-id"),
context=Context())
result = run_plugin(MessageSender, init, payload,
profile)
print("OUTPUT:", result.output)
print("PROFILE:", result.profile)
|
#!/usr/bin/env python2.7
from serial import Serial
from optparse import OptionParser
import binascii
parser = OptionParser()
parser.add_option("-s", "--serial", dest="serial_port", default="/dev/ttyUSB0", help="Serial port")
(options, args) = parser.parse_args()
ser = Serial(options.serial_port, 115200, 8)
cmd = "ad98320801000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000009999991932333333cbcccc4c64666666fdffff7f969999992f3333b3c8cccccc616666e6"
print("Push reset to device:\n" + cmd)
ser.write(cmd.decode('hex'))
res=ser.read(64)
print("Result:\n" + binascii.hexlify(res))
|
from django import forms
from openslides.utils.forms import CssClassMixin
class OptionForm(CssClassMixin, forms.Form):
def __init__(self, *args, **kwargs):
extra = kwargs.pop('extra')
formid = kwargs.pop('formid')
kwargs['prefix'] = "option-%s" % formid
super(OptionForm, self).__init__(*args, **kwargs)
for vote in extra:
key = vote.value
value = vote.get_value()
weight = vote.print_weight(raw=True)
self.fields[key] = forms.IntegerField(
label=value,
initial=weight,
min_value=-2,
required=False)
|
import os
import ConfigParser
class BaseConfig(object):
def __init__(self, config_file=None):
if config_file is None:
config_file = 'config.ini'
config_path = os.path.join(os.path.dirname(__file__), 'config/' + config_file)
self.config = ConfigParser.ConfigParser()
self.config.read(config_path)
def section_names(self):
return self.config.sections()
def get_section(self, section):
return self.config.items(section)
def get_value(self, section, key):
return self.config.get(section, key)
def ldap(self, key=None):
if key is None:
return self.get_section('LDAP')
return self.get_value('LDAP', key)
def lastpass(self, key=None):
if key is None:
return self.get_section('LASTPASS')
return self.get_value('LASTPASS', key)
|
"""
Filename: correct_mask.py
Author: Damien Irving, irving.damien@gmail.com
Description: Correct a bogus mask (e.g. some models put 1.0 or Nan as the mask value)
"""
# Import general Python modules
import sys, os, pdb
import argparse
import numpy
import iris
import cmdline_provenance as cmdprov
# Import my modules
cwd = os.getcwd()
repo_dir = '/'
for directory in cwd.split('/')[1:]:
repo_dir = os.path.join(repo_dir, directory)
if directory == 'ocean-analysis':
break
modules_dir = os.path.join(repo_dir, 'modules')
sys.path.append(modules_dir)
try:
import general_io as gio
except ImportError:
raise ImportError('Must run this script from anywhere within the ocean-analysis git repo')
# Define functions
def main(inargs):
"""Run the program."""
cube = iris.load_cube(inargs.infile, inargs.var)
cube.data = numpy.ma.masked_invalid(cube.data)
if inargs.fill_value:
cube.data = numpy.ma.masked_where(cube.data >= cube.data.fill_value, cube.data)
if inargs.mask_value:
cube.data = numpy.ma.masked_where(cube.data == inargs.mask_value, cube.data)
cube.attributes['history'] = cmdprov.new_log(git_repo=repo_dir, infile_history={inargs.infile: cube.attributes['history']})
iris.save(cube, inargs.outfile)
if __name__ == '__main__':
extra_info ="""
author:
Damien Irving, irving.damien@gmail.com
"""
description='Correct a bogus mask (e.g. some models put 1.0 or Nan as the mask value)'
parser = argparse.ArgumentParser(description=description,
epilog=extra_info,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("infile", type=str, help="Input files")
parser.add_argument("var", type=str, help="Variable standard_name")
parser.add_argument("outfile", type=str, help="Output file name")
parser.add_argument("--fill_value", action="store_true", default=False,
help="Mask points greater or equal to the fill value")
parser.add_argument("--mask_value", type=float, default=None,
help="Value to mask")
args = parser.parse_args()
main(args)
|
#Em um campeonato de futebol existem 5 times e cada um possui onze jogadores.
#Faça um programa que receba idade e o peso de cada um dos jogadores
#Calcule e mostre:
#A quantidade de jogadores com idade inferior a 18 anos;
#A média das idades dos jogadores de cada time;
#A porcentagem de jogadores com mais de 80 quilos
#entre todos os jogadores do campeonato.
time=2
jog=3
qtd_inf=total_pes=qtd80=pctg=0
for i in range(time):
print('Time: ',i+1)
soma=media=0
for k in range(jog):
idade=int(input('Digite sua idade: '))
peso=float(input('Informe seu peso: '))
if idade<10:
qtd_inf+=1
if peso>80:
qtd80+=1
total+=1
total_pes+=1
idade+= peso
media=soma/jog
print('A média das idades dos jogadores de cada time é: ',media)
print('A quantidade de jogadores com idade inferior a 18 anos é: ',qtd_inf)
pctg=qtd80/total_pes
print('A porcentagem de jogadores com mais de 80 quilos entre todos os jogadores do campeonato é: ',pctg)
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define API Loggers."""
import json
from google.protobuf.json_format import MessageToJson
class Logger(object):
"""Loggers represent named targets for log entries.
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.logs
:type name: string
:param name: the name of the logger
:type client: :class:`google.cloud.logging.client.Client`
:param client: A client which holds credentials and project configuration
for the logger (which requires a project).
:type labels: dict or :class:`NoneType`
:param labels: (optional) mapping of default labels for entries written
via this logger.
"""
def __init__(self, name, client, labels=None):
self.name = name
self._client = client
self.labels = labels
@property
def client(self):
"""Clent bound to the logger."""
return self._client
@property
def project(self):
"""Project bound to the logger."""
return self._client.project
@property
def full_name(self):
"""Fully-qualified name used in logging APIs"""
return 'projects/%s/logs/%s' % (self.project, self.name)
@property
def path(self):
"""URI path for use in logging APIs"""
return '/%s' % (self.full_name,)
def _require_client(self, client):
"""Check client or verify over-ride.
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current logger.
:rtype: :class:`google.cloud.logging.client.Client`
:returns: The client passed in or the currently bound client.
"""
if client is None:
client = self._client
return client
def batch(self, client=None):
"""Return a batch to use as a context manager.
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current topic.
:rtype: :class:`Batch`
:returns: A batch to use as a context manager.
"""
client = self._require_client(client)
return Batch(self, client)
def _make_entry_resource(self, text=None, info=None, message=None,
labels=None, insert_id=None, severity=None,
http_request=None):
"""Return a log entry resource of the appropriate type.
Helper for :meth:`log_text`, :meth:`log_struct`, and :meth:`log_proto`.
Only one of ``text``, ``info``, or ``message`` should be passed.
:type text: string or :class:`NoneType`
:param text: text payload
:type info: dict or :class:`NoneType`
:param info: struct payload
:type message: Protobuf message or :class:`NoneType`
:param message: protobuf payload
:type labels: dict or :class:`NoneType`
:param labels: labels passed in to calling method.
:type insert_id: string or :class:`NoneType`
:param insert_id: (optional) unique ID for log entry.
:type severity: string or :class:`NoneType`
:param severity: (optional) severity of event being logged.
:type http_request: dict or :class:`NoneType`
:param http_request: (optional) info about HTTP request associated with
the entry
:rtype: dict
:returns: The JSON resource created.
"""
resource = {
'logName': self.full_name,
'resource': {'type': 'global'},
}
if text is not None:
resource['textPayload'] = text
if info is not None:
resource['jsonPayload'] = info
if message is not None:
as_json_str = MessageToJson(message)
as_json = json.loads(as_json_str)
resource['protoPayload'] = as_json
if labels is None:
labels = self.labels
if labels is not None:
resource['labels'] = labels
if insert_id is not None:
resource['insertId'] = insert_id
if severity is not None:
resource['severity'] = severity
if http_request is not None:
resource['httpRequest'] = http_request
return resource
def log_text(self, text, client=None, labels=None, insert_id=None,
severity=None, http_request=None):
"""API call: log a text message via a POST request
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries/write
:type text: text
:param text: the log message.
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current logger.
:type labels: dict or :class:`NoneType`
:param labels: (optional) mapping of labels for the entry.
:type insert_id: string or :class:`NoneType`
:param insert_id: (optional) unique ID for log entry.
:type severity: string or :class:`NoneType`
:param severity: (optional) severity of event being logged.
:type http_request: dict or :class:`NoneType`
:param http_request: (optional) info about HTTP request associated with
the entry
"""
client = self._require_client(client)
entry_resource = self._make_entry_resource(
text=text, labels=labels, insert_id=insert_id, severity=severity,
http_request=http_request)
client.logging_api.write_entries([entry_resource])
def log_struct(self, info, client=None, labels=None, insert_id=None,
severity=None, http_request=None):
"""API call: log a structured message via a POST request
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries/write
:type info: dict
:param info: the log entry information
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current logger.
:type labels: dict or :class:`NoneType`
:param labels: (optional) mapping of labels for the entry.
:type insert_id: string or :class:`NoneType`
:param insert_id: (optional) unique ID for log entry.
:type severity: string or :class:`NoneType`
:param severity: (optional) severity of event being logged.
:type http_request: dict or :class:`NoneType`
:param http_request: (optional) info about HTTP request associated with
the entry.
"""
client = self._require_client(client)
entry_resource = self._make_entry_resource(
info=info, labels=labels, insert_id=insert_id, severity=severity,
http_request=http_request)
client.logging_api.write_entries([entry_resource])
def log_proto(self, message, client=None, labels=None, insert_id=None,
severity=None, http_request=None):
"""API call: log a protobuf message via a POST request
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries/write
:type message: Protobuf message
:param message: the message to be logged
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current logger.
:type labels: dict or :class:`NoneType`
:param labels: (optional) mapping of labels for the entry.
:type insert_id: string or :class:`NoneType`
:param insert_id: (optional) unique ID for log entry.
:type severity: string or :class:`NoneType`
:param severity: (optional) severity of event being logged.
:type http_request: dict or :class:`NoneType`
:param http_request: (optional) info about HTTP request associated with
the entry.
"""
client = self._require_client(client)
entry_resource = self._make_entry_resource(
message=message, labels=labels, insert_id=insert_id,
severity=severity, http_request=http_request)
client.logging_api.write_entries([entry_resource])
def delete(self, client=None):
"""API call: delete all entries in a logger via a DELETE request
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.logs/delete
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current logger.
"""
client = self._require_client(client)
client.logging_api.logger_delete(self.project, self.name)
def list_entries(self, projects=None, filter_=None, order_by=None,
page_size=None, page_token=None):
"""Return a page of log entries.
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries/list
:type projects: list of strings
:param projects: project IDs to include. If not passed,
defaults to the project bound to the client.
:type filter_: string
:param filter_: a filter expression. See:
https://cloud.google.com/logging/docs/view/advanced_filters
:type order_by: string
:param order_by: One of :data:`~google.cloud.logging.client.ASCENDING`
or :data:`~google.cloud.logging.client.DESCENDING`.
:type page_size: int
:param page_size: maximum number of entries to return, If not passed,
defaults to a value set by the API.
:type page_token: string
:param page_token: opaque marker for the next "page" of entries. If not
passed, the API will return the first page of
entries.
:rtype: tuple, (list, str)
:returns: list of :class:`google.cloud.logging.entry.TextEntry`, plus a
"next page token" string: if not None, indicates that
more entries can be retrieved with another call (pass that
value as ``page_token``).
"""
log_filter = 'logName=%s' % (self.full_name,)
if filter_ is not None:
filter_ = '%s AND %s' % (filter_, log_filter)
else:
filter_ = log_filter
return self.client.list_entries(
projects=projects, filter_=filter_, order_by=order_by,
page_size=page_size, page_token=page_token)
class Batch(object):
"""Context manager: collect entries to log via a single API call.
Helper returned by :meth:`Logger.batch`
:type logger: :class:`google.cloud.logging.logger.Logger`
:param logger: the logger to which entries will be logged.
:type client: :class:`google.cloud.logging.client.Client`
:param client: The client to use.
"""
def __init__(self, logger, client):
self.logger = logger
self.entries = []
self.client = client
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.commit()
def log_text(self, text, labels=None, insert_id=None, severity=None,
http_request=None):
"""Add a text entry to be logged during :meth:`commit`.
:type text: string
:param text: the text entry
:type labels: dict or :class:`NoneType`
:param labels: (optional) mapping of labels for the entry.
:type insert_id: string or :class:`NoneType`
:param insert_id: (optional) unique ID for log entry.
:type severity: string or :class:`NoneType`
:param severity: (optional) severity of event being logged.
:type http_request: dict or :class:`NoneType`
:param http_request: (optional) info about HTTP request associated with
the entry.
"""
self.entries.append(
('text', text, labels, insert_id, severity, http_request))
def log_struct(self, info, labels=None, insert_id=None, severity=None,
http_request=None):
"""Add a struct entry to be logged during :meth:`commit`.
:type info: dict
:param info: the struct entry
:type labels: dict or :class:`NoneType`
:param labels: (optional) mapping of labels for the entry.
:type insert_id: string or :class:`NoneType`
:param insert_id: (optional) unique ID for log entry.
:type severity: string or :class:`NoneType`
:param severity: (optional) severity of event being logged.
:type http_request: dict or :class:`NoneType`
:param http_request: (optional) info about HTTP request associated with
the entry.
"""
self.entries.append(
('struct', info, labels, insert_id, severity, http_request))
def log_proto(self, message, labels=None, insert_id=None, severity=None,
http_request=None):
"""Add a protobuf entry to be logged during :meth:`commit`.
:type message: protobuf message
:param message: the protobuf entry
:type labels: dict or :class:`NoneType`
:param labels: (optional) mapping of labels for the entry.
:type insert_id: string or :class:`NoneType`
:param insert_id: (optional) unique ID for log entry.
:type severity: string or :class:`NoneType`
:param severity: (optional) severity of event being logged.
:type http_request: dict or :class:`NoneType`
:param http_request: (optional) info about HTTP request associated with
the entry.
"""
self.entries.append(
('proto', message, labels, insert_id, severity, http_request))
def commit(self, client=None):
"""Send saved log entries as a single API call.
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current batch.
"""
if client is None:
client = self.client
kwargs = {
'logger_name': self.logger.full_name,
'resource': {'type': 'global'},
}
if self.logger.labels is not None:
kwargs['labels'] = self.logger.labels
entries = []
for entry_type, entry, labels, iid, severity, http_req in self.entries:
if entry_type == 'text':
info = {'textPayload': entry}
elif entry_type == 'struct':
info = {'jsonPayload': entry}
elif entry_type == 'proto':
as_json_str = MessageToJson(entry)
as_json = json.loads(as_json_str)
info = {'protoPayload': as_json}
else:
raise ValueError('Unknown entry type: %s' % (entry_type,))
if labels is not None:
info['labels'] = labels
if iid is not None:
info['insertId'] = iid
if severity is not None:
info['severity'] = severity
if http_req is not None:
info['httpRequest'] = http_req
entries.append(info)
client.logging_api.write_entries(entries, **kwargs)
del self.entries[:]
|
from flask import Blueprint, render_template, request
import requests
import os
from app.views.main.main import *
"""
### Pa .não sei se vou fazer o user. LACK OF TIME
Blueprints: Are a way to organize our project. So to put it simpler, is a means to organize our project in folders
In python each folder is a module.
The blueprint that i plain to implement in this app are:
main_blueprint: will be resposible for displaying different views of the main website
errors_blueprint: display views for errors managing
users_blueprint: Will be responsible for logging in/out for/from our webpage, password resets, e-mail confirmations
Each of these blupeints will have their own set of templates
"""
###################
# public blueprint #
###################
main = Blueprint('main', __name__)
@main.route("/", methods=['GET'])
def home():
return render_template("base/base.html")
@main.route('/user', methods=['POST','OPTIONS'])
def Flask_f1():
global i
global frist
frist =1
i = 1
if request.method == 'POST':
#origin = request.headers.get('Origin')
print(request.data)
user_json = json.loads(request.data.decode(FORMAT))
ConfigureStartExperiment(user_json)
return '' #jsonify({'JSON Enviado' : request.args.get('JSON'), 'result': 'OK!'})
elif request.method == 'OPTIONS':
return ''
@main.route('/resultpoint', methods=['GET'])
def getPoint():
global end
exp_data = receive_data_from_exp()
send_data = {"msg_id" : "11",
"timestamp" : str(time.time_ns()),
"status" : "waiting",
"Data" : " "}
if exp_data == "DATA_END": #and end == 0:
send_data = {"msg_id":"11",
"timestamp": str(time.time_ns()),
"status":"Experiment Ended",
"Data":" "}
else :
send_data = {"msg_id" : "11",
"timestamp" : str(time.time_ns()),
"status" : "running",
"Data" : exp_data}
print(send_data)
return send_data
|
from sklearn.base import TransformerMixin
import pandas as pd
import numpy as np
from time import time
class IndexBasedTransformer(TransformerMixin):
def __init__(self, case_id_col, cat_cols, num_cols, max_events=None, fillna=True, create_dummies=True):
self.case_id_col = case_id_col
self.cat_cols = cat_cols
self.num_cols = num_cols
self.max_events = max_events
self.fillna = fillna
self.create_dummies = create_dummies
self.columns = None
self.fit_time = 0
self.transform_time = 0
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
start = time()
grouped = X.groupby(self.case_id_col, as_index=False)
if self.max_events is None:
self.max_events = grouped.size().max()
dt_transformed = pd.DataFrame(grouped.apply(lambda x: x.name), columns=[self.case_id_col])
for i in range(self.max_events):
dt_index = grouped.nth(i)[[self.case_id_col] + self.cat_cols + self.num_cols]
dt_index.columns = [self.case_id_col] + ["%s_%s"%(col, i) for col in self.cat_cols] + ["%s_%s"%(col, i) for col in self.num_cols]
dt_transformed = pd.merge(dt_transformed, dt_index, on=self.case_id_col, how="left")
dt_transformed.index = dt_transformed[self.case_id_col]
# one-hot-encode cat cols
if self.create_dummies:
all_cat_cols = ["%s_%s"%(col, i) for col in self.cat_cols for i in range(self.max_events)]
dt_transformed = pd.get_dummies(dt_transformed, columns=all_cat_cols).drop(self.case_id_col, axis=1)
# fill missing values with 0-s
if self.fillna:
dt_transformed = dt_transformed.fillna(0)
# add missing columns if necessary
if self.columns is None:
self.columns = dt_transformed.columns
else:
missing_cols = [col for col in self.columns if col not in dt_transformed.columns]
for col in missing_cols:
dt_transformed[col] = 0
dt_transformed = dt_transformed[self.columns]
self.transform_time = time() - start
return dt_transformed
def get_feature_names(self):
return self.columns
|
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import typing
import cupy as cp
import numpy as np
import cuml.internals
from cuml.common.array import CumlArray
from cuml.common.input_utils import input_to_cupy_array
import math
@cuml.internals.api_return_generic(get_output_type=True)
def precision_recall_curve(
y_true, probs_pred) -> typing.Tuple[CumlArray, CumlArray, CumlArray]:
"""
Compute precision-recall pairs for different probability thresholds
.. note:: this implementation is restricted to the binary classification
task. The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the
number of true positives and ``fp`` the number of false positives. The
precision is intuitively the ability of the classifier not to label as
positive a sample that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number
of true positives and ``fn`` the number of false negatives. The recall
is intuitively the ability of the classifier to find all the positive
samples. The last precision and recall values are 1. and 0.
respectively and do not have a corresponding threshold. This ensures
that the graph starts on the y axis.
Read more in the scikit-learn's `User Guide
<https://scikit-learn.org/stable/modules/model_evaluation.html#precision-recall-f-measure-metrics>`_.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels, {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
.. code-block:: python
import numpy as np
from cuml.metrics import precision_recall_curve
y_true = np.array([0, 0, 1, 1])
y_scores = np.array([0.1, 0.4, 0.35, 0.8])
precision, recall, thresholds = precision_recall_curve(
y_true, y_scores)
print(precision)
print(recall)
print(thresholds)
Output:
.. code-block:: python
array([0.66666667, 0.5 , 1. , 1. ])
array([1. , 0.5, 0.5, 0. ])
array([0.35, 0.4 , 0.8 ])
"""
y_true, n_rows, n_cols, ytype = \
input_to_cupy_array(y_true, check_dtype=[np.int32, np.int64,
np.float32, np.float64])
y_score, _, _, _ = \
input_to_cupy_array(probs_pred, check_dtype=[np.int32, np.int64,
np.float32, np.float64],
check_rows=n_rows, check_cols=n_cols)
if cp.any(y_true) == 0:
raise ValueError("precision_recall_curve cannot be used when "
"y_true is all zero.")
fps, tps, thresholds = _binary_clf_curve(y_true, y_score)
precision = cp.flip(tps/(tps+fps), axis=0)
recall = cp.flip(tps/tps[-1], axis=0)
n = (recall == 1).sum()
if n > 1:
precision = precision[n-1:]
recall = recall[n-1:]
thresholds = thresholds[n-1:]
precision = cp.concatenate([precision, cp.ones(1)])
recall = cp.concatenate([recall, cp.zeros(1)])
return precision, recall, thresholds
@cuml.internals.api_return_any()
def roc_auc_score(y_true, y_score):
"""
Compute Area Under the Receiver Operating Characteristic Curve (ROC AUC)
from prediction scores.
.. note:: this implementation can only be used with binary classification.
Parameters
----------
y_true : array-like of shape (n_samples,)
True labels. The binary cases
expect labels with shape (n_samples,)
y_score : array-like of shape (n_samples,)
Target scores. In the binary cases, these can be either
probability estimates or non-thresholded decision values (as returned
by `decision_function` on some classifiers). The binary
case expects a shape (n_samples,), and the scores must be the scores of
the class with the greater label.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from cuml.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> print(roc_auc_score(y_true, y_scores))
0.75
"""
y_true, n_rows, n_cols, ytype = \
input_to_cupy_array(y_true, check_dtype=[np.int32, np.int64,
np.float32, np.float64])
y_score, _, _, _ = \
input_to_cupy_array(y_score, check_dtype=[np.int32, np.int64,
np.float32, np.float64],
check_rows=n_rows, check_cols=n_cols)
return _binary_roc_auc_score(y_true, y_score)
def _binary_clf_curve(y_true, y_score):
if y_true.dtype.kind == 'f' and np.any(y_true != y_true.astype(int)):
raise ValueError("Continuous format of y_true "
"is not supported.")
ids = cp.argsort(-y_score)
sorted_score = y_score[ids]
ones = y_true[ids].astype('float32') # for calculating true positives
zeros = 1 - ones # for calculating predicted positives
# calculate groups
group = _group_same_scores(sorted_score)
num = int(group[-1])
tps = cp.zeros(num, dtype='float32')
fps = cp.zeros(num, dtype='float32')
tps = _addup_x_in_group(group, ones, tps)
fps = _addup_x_in_group(group, zeros, fps)
tps = cp.cumsum(tps)
fps = cp.cumsum(fps)
thresholds = cp.unique(y_score)
return fps, tps, thresholds
def _binary_roc_auc_score(y_true, y_score):
"""Compute binary roc_auc_score using cupy"""
if cp.unique(y_true).shape[0] == 1:
raise ValueError("roc_auc_score cannot be used when "
"only one class present in y_true. ROC AUC score "
"is not defined in that case.")
if cp.unique(y_score).shape[0] == 1:
return 0.5
fps, tps, thresholds = _binary_clf_curve(y_true, y_score)
tpr = tps/tps[-1]
fpr = fps/fps[-1]
return _calculate_area_under_curve(fpr, tpr).item()
def _addup_x_in_group(group, x, result):
addup_x_in_group_kernel = cp.RawKernel(r'''
extern "C" __global__
void addup_x_in_group(const int* group, const float* x,
float* result, int N)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid<N){
atomicAdd(result + group[tid] - 1, x[tid]);
}
}
''', 'addup_x_in_group')
N = x.shape[0]
tpb = 256
bpg = math.ceil(N/tpb)
addup_x_in_group_kernel((bpg,), (tpb,), (group, x, result, N))
return result
def _group_same_scores(sorted_score):
mask = cp.empty(sorted_score.shape, dtype=cp.bool_)
mask[0] = True
mask[1:] = sorted_score[1:] != sorted_score[:-1]
group = cp.cumsum(mask, dtype=cp.int32)
return group
def _calculate_area_under_curve(fpr, tpr):
"""helper function to calculate area under curve given fpr & tpr arrays"""
return cp.sum((fpr[1:]-fpr[:-1])*(tpr[1:]+tpr[:-1]))/2 + tpr[0]*fpr[0]/2
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .dataset import Dataset
class PostgreSqlTableDataset(Dataset):
"""The PostgreSQL table dataset.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param description: Dataset description.
:type description: str
:param structure: Columns that define the structure of the dataset. Type:
array (or Expression with resultType array), itemType: DatasetDataElement.
:type structure: object
:param schema: Columns that define the physical type schema of the
dataset. Type: array (or Expression with resultType array), itemType:
DatasetSchemaDataElement.
:type schema: object
:param linked_service_name: Required. Linked service reference.
:type linked_service_name:
~azure.mgmt.datafactory.models.LinkedServiceReference
:param parameters: Parameters for dataset.
:type parameters: dict[str,
~azure.mgmt.datafactory.models.ParameterSpecification]
:param annotations: List of tags that can be used for describing the
Dataset.
:type annotations: list[object]
:param folder: The folder that this Dataset is in. If not specified,
Dataset will appear at the root level.
:type folder: ~azure.mgmt.datafactory.models.DatasetFolder
:param type: Required. Constant filled by server.
:type type: str
:param table_name: This property will be retired. Please consider using
schema + table properties instead.
:type table_name: object
:param table: The PostgreSQL table name. Type: string (or Expression with
resultType string).
:type table: object
:param postgre_sql_table_dataset_schema: The PostgreSQL schema name. Type:
string (or Expression with resultType string).
:type postgre_sql_table_dataset_schema: object
"""
_validation = {
'linked_service_name': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'description': {'key': 'description', 'type': 'str'},
'structure': {'key': 'structure', 'type': 'object'},
'schema': {'key': 'schema', 'type': 'object'},
'linked_service_name': {'key': 'linkedServiceName', 'type': 'LinkedServiceReference'},
'parameters': {'key': 'parameters', 'type': '{ParameterSpecification}'},
'annotations': {'key': 'annotations', 'type': '[object]'},
'folder': {'key': 'folder', 'type': 'DatasetFolder'},
'type': {'key': 'type', 'type': 'str'},
'table_name': {'key': 'typeProperties.tableName', 'type': 'object'},
'table': {'key': 'typeProperties.table', 'type': 'object'},
'postgre_sql_table_dataset_schema': {'key': 'typeProperties.schema', 'type': 'object'},
}
def __init__(self, **kwargs):
super(PostgreSqlTableDataset, self).__init__(**kwargs)
self.table_name = kwargs.get('table_name', None)
self.table = kwargs.get('table', None)
self.postgre_sql_table_dataset_schema = kwargs.get('postgre_sql_table_dataset_schema', None)
self.type = 'PostgreSqlTable'
|
import numpy as np
import pytest
from smcpy import SMCSampler
@pytest.fixture
def phi_sequence():
return np.linspace(0, 1, 11)
@pytest.fixture
def step_list(phi_sequence, mocker):
num_particles = 5
step_list = []
for phi in phi_sequence[1:]:
particles = mocker.Mock()
particles.log_weights = np.ones(num_particles).reshape(-1, 1)
particles.log_likes = np.ones(num_particles).reshape(-1, 1) * phi
particles.num_particles = num_particles
step_list.append(particles)
return step_list
@pytest.mark.parametrize('rank', [0, 1, 2])
@pytest.mark.parametrize('prog_bar', [True, False])
def test_sample(mocker, rank, prog_bar):
num_particles = 100
num_steps = 10
num_mcmc_samples = 2
phi_sequence = np.ones(num_steps)
prog_bar = mocker.patch('smcpy.smc_sampler.tqdm',
return_value=phi_sequence[2:])
expected_step_list = np.ones((num_steps - 1, num_particles))
expected_step_list[1:] = expected_step_list[1:] + 2
init_particles = np.array([1] * num_particles)
mocked_initializer = mocker.Mock()
mocked_initializer.init_particles_from_prior.return_value = init_particles
init = mocker.patch('smcpy.smc_sampler.Initializer',
return_value=mocked_initializer)
upd_mock = mocker.Mock()
upd_mock.resample_if_needed = lambda x: x
upd = mocker.patch('smcpy.smc_sampler.Updater', return_value=upd_mock)
mocked_mutator = mocker.Mock()
mocked_mutator.mutate.return_value = np.array([3] * num_particles)
mut = mocker.patch('smcpy.smc_sampler.Mutator', return_value=mocked_mutator)
update_bar = mocker.patch('smcpy.smc_sampler.set_bar')
mcmc_kernel = mocker.Mock()
mcmc_kernel._mcmc = mocker.Mock()
mcmc_kernel._mcmc._rank = rank
comm = mcmc_kernel._mcmc._comm = mocker.Mock()
mocker.patch.object(comm, 'bcast', new=lambda x, root: x)
ess_threshold = 0.2
smc = SMCSampler(mcmc_kernel)
mut_ratio = mocker.patch.object(smc, '_compute_mutation_ratio')
mll_est = mocker.patch.object(smc, '_estimate_marginal_log_likelihoods')
step_list, mll = smc.sample(num_particles, num_mcmc_samples, phi_sequence,
ess_threshold, progress_bar=prog_bar)
init.assert_called_once_with(smc._mcmc_kernel)
upd.assert_called_once_with(ess_threshold)
mut.assert_called_once_with(smc._mcmc_kernel)
np.testing.assert_array_equal(prog_bar.call_args[0][0], phi_sequence[1:])
update_bar.assert_called()
mll_est.assert_called_once()
assert len(step_list) == len(phi_sequence) - 1
assert mll is not None
np.testing.assert_array_equal(step_list, expected_step_list)
def test_sample_with_proposal(mocker):
mocked_init = mocker.Mock()
mocker.patch('smcpy.smc_sampler.Initializer', return_value=mocked_init)
mocker.patch('smcpy.smc_sampler.Updater')
mocker.patch('smcpy.smc_sampler.Mutator')
mcmc_kernel = mocker.Mock()
proposal_dist = mocker.Mock()
num_particles = 100
num_steps = 10
num_mcmc_samples = 2
ess_threshold = 0.2
phi_sequence = np.ones(num_steps)
prog_bar = False
proposal = ({'x1': np.array([1, 2]), 'x2': np.array([3, 3])},
np.array([0.1, 0.1]))
smc = SMCSampler(mcmc_kernel)
mocker.patch.object(smc, '_compute_mutation_ratio')
mocker.patch.object(smc, '_estimate_marginal_log_likelihoods')
_ = smc.sample(num_particles, num_mcmc_samples, phi_sequence,
ess_threshold, proposal, prog_bar)
mocked_init.init_particles_from_prior.assert_not_called()
mocked_init.init_particles_from_samples.assert_called_once()
call_args = mocked_init.init_particles_from_samples.call_args[0]
for args in zip(call_args, proposal):
np.testing.assert_array_equal(*args)
@pytest.mark.parametrize('steps', [2, 3, 4, 10])
def test_marginal_likelihood_estimator(mocker, steps):
updater = mocker.Mock()
unnorm_weights = np.ones((5, 1))
updater._unnorm_log_weights = [np.log(unnorm_weights) for _ in range(steps)]
expected_Z = [1] + [5 ** (i + 1) for i in range(steps)]
Z = SMCSampler(None)._estimate_marginal_log_likelihoods(updater)
np.testing.assert_array_almost_equal(Z, np.log(expected_Z))
@pytest.mark.parametrize('new_param_array, expected_ratio',
((np.array([[1, 2], [0, 0], [0, 0], [0, 0]]), 0.25),
(np.array([[0, 0], [0, 0], [0, 0], [0, 0]]), 0.00),
(np.array([[0, 1], [1, 1], [2, 0], [1, 1]]), 1.00),
(np.array([[2, 0], [2, 0], [0, 1], [0, 0]]), 0.75)))
def test_calc_mutation_ratio(mocker, new_param_array, expected_ratio):
old_particles = mocker.Mock()
old_particles.params = np.zeros((4, 2))
new_particles = mocker.Mock()
new_particles.params = new_param_array
ratio = SMCSampler._compute_mutation_ratio(old_particles, new_particles)
assert ratio == expected_ratio
|
# -*- coding:utf-8 -*-
from emgen.cli.main import main
|
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class MeasureImprovementNotationCode(GenericTypeCode):
"""
MeasureImprovementNotation
From: http://terminology.hl7.org/CodeSystem/measure-improvement-notation in valuesets.xml
Observation values that indicate what change in a measurement value or score
is indicative of an improvement in the measured item or scored issue.
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://terminology.hl7.org/CodeSystem/measure-improvement-notation
"""
codeset: FhirUri = (
"http://terminology.hl7.org/CodeSystem/measure-improvement-notation"
)
class MeasureImprovementNotationCodeValues:
"""
Improvement is indicated as an increase in the score or measurement (e.g.
Higher score indicates better quality).
From: http://terminology.hl7.org/CodeSystem/measure-improvement-notation in valuesets.xml
"""
IncreasedScoreIndicatesImprovement = MeasureImprovementNotationCode("increase")
"""
Improvement is indicated as a decrease in the score or measurement (e.g. Lower
score indicates better quality).
From: http://terminology.hl7.org/CodeSystem/measure-improvement-notation in valuesets.xml
"""
DecreasedScoreIndicatesImprovement = MeasureImprovementNotationCode("decrease")
|
import collections
import logging
from lms import notifications
from lms.lmsdb import models
class IdenticalSolutionSolver:
def __init__(
self,
solution_check_pk: str,
logger: logging.Logger,
):
self._solution_id = solution_check_pk
self._solution = None
self._logger = logger
def initialize(self):
self._solution = models.Solution.get_by_id(self._solution_id)
@property
def solution(self) -> models.Solution:
return self._solution
def check_identical(self):
solution = self._get_first_identical_solution()
if solution is None:
return
self._logger.info(
'solution %s matched to an checked solution %s. '
'fork the comments and solve',
self.solution.id, solution.id,
)
self._clone_solution_comments(
from_solution=solution,
to_solution=self.solution,
)
def _get_first_identical_solution(self):
return models.Solution.select().join(
models.Exercise,
).filter(**{
models.Solution.exercise.name:
self.solution.exercise,
models.Solution.state.name:
models.Solution.STATES.DONE.name,
models.Solution.json_data_str.name:
self.solution.json_data_str,
}).first()
def check_for_match_solutions_to_solve(self):
for solution in models.Solution.select().join(
models.Exercise,
).filter(**{
models.Solution.exercise.name:
self.solution.exercise,
models.Solution.state.name:
models.Solution.STATES.CREATED.name,
models.Solution.json_data_str.name:
self.solution.json_data_str,
}):
self._clone_solution_comments(
from_solution=self.solution,
to_solution=solution,
)
@staticmethod
def _clone_solution_comments(
from_solution: models.Solution,
to_solution: models.Solution,
) -> None:
user_comments = models.Comment.by_solution(
from_solution.id,
).filter(~models.Comment.is_auto)
for comment in user_comments:
models.Comment.create_comment(
commenter=models.User.get_system_user(),
line_number=comment.line_number,
comment_text=comment.comment.comment_id,
solution=to_solution,
is_auto=True,
)
to_solution.checker = from_solution.checker
to_solution.state = from_solution.state
to_solution.save()
notifications.create_notification(
notification_type=(notifications.SolutionCheckedNotification
.notification_type()),
for_user=to_solution.solver,
solution=to_solution,
)
@staticmethod
def check_identical_solutions_per_exercise():
same = collections.Counter()
for exercise in models.Exercise.select():
solutions = models.Solution.select().join(models.Exercise).filter(
models.Solution.exercise == exercise)
for solution in solutions:
solution_key = f'{exercise.subject}-{solution.json_data_str}'
if solution_key in same:
continue
count = models.Comment.select().join(models.Solution).filter(
models.Solution.json_data_str == solution.json_data_str,
models.Solution.exercise == exercise,
models.Solution.solver != solution.solver,
).count()
same[solution_key] = count
return same
|
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A helper for jobs that have been created on the Quantum Engine."""
import time
from typing import Dict, Iterator, List, Optional, Tuple, TYPE_CHECKING
from cirq import study
from cirq.google.engine import calibration
from cirq.google.engine.client import quantum
from cirq.google.api import v1, v2
if TYPE_CHECKING:
import datetime
import cirq.google.engine.engine as engine_base
from cirq.google.engine.engine import engine_program
from cirq.google.engine.engine import engine_processor
TERMINAL_STATES = [
quantum.enums.ExecutionStatus.State.SUCCESS,
quantum.enums.ExecutionStatus.State.FAILURE,
quantum.enums.ExecutionStatus.State.CANCELLED
]
class EngineJob:
"""A job created via the Quantum Engine API.
This job may be in a variety of states. It may be scheduling, it may be
executing on a machine, or it may have entered a terminal state
(either succeeding or failing).
Attributes:
project_id: A project_id of the parent Google Cloud Project.
program_id: Unique ID of the program within the parent project.
job_id: Unique ID of the job within the parent program.
"""
def __init__(self,
project_id: str,
program_id: str,
job_id: str,
context: 'engine_base.EngineContext',
_job: Optional[quantum.types.QuantumJob] = None) -> None:
"""A job submitted to the engine.
Args:
project_id: A project_id of the parent Google Cloud Project.
program_id: Unique ID of the program within the parent project.
job_id: Unique ID of the job within the parent program.
context: Engine configuration and context to use.
_job: The optional current job state.
"""
self.project_id = project_id
self.program_id = program_id
self.job_id = job_id
self.context = context
self._job = _job
self._results: Optional[List[study.TrialResult]] = None
def engine(self) -> 'engine_base.Engine':
"""Returns the parent Engine object."""
import cirq.google.engine.engine as engine_base
return engine_base.Engine(self.project_id, context=self.context)
def program(self) -> 'engine_program.EngineProgram':
"""Returns the parent EngineProgram object."""
import cirq.google.engine.engine_program as engine_program
return engine_program.EngineProgram(self.project_id, self.program_id,
self.context)
def _inner_job(self) -> quantum.types.QuantumJob:
if not self._job:
self._job = self.context.client.get_job(self.project_id,
self.program_id,
self.job_id, False)
return self._job
def _refresh_job(self) -> quantum.types.QuantumJob:
if (not self._job or
self._job.execution_status.state not in TERMINAL_STATES):
self._job = self.context.client.get_job(self.project_id,
self.program_id,
self.job_id, False)
return self._job
def create_time(self) -> 'datetime.datetime':
"""Returns when the job was created."""
return self._inner_job().create_time.ToDatetime()
def update_time(self) -> 'datetime.datetime':
"""Returns when the job was last updated."""
self._job = self.context.client.get_job(self.project_id,
self.program_id, self.job_id,
False)
return self._job.update_time.ToDatetime()
def description(self) -> str:
"""Returns the description of the job."""
return self._inner_job().description
def set_description(self, description: str) -> 'EngineJob':
"""Sets the description of the job.
Params:
description: The new description for the job.
Returns:
This EngineJob.
"""
self._job = self.context.client.set_job_description(
self.project_id, self.program_id, self.job_id, description)
return self
def labels(self) -> Dict[str, str]:
"""Returns the labels of the job."""
return self._inner_job().labels
def set_labels(self, labels: Dict[str, str]) -> 'EngineJob':
"""Sets (overwriting) the labels for a previously created quantum job.
Params:
labels: The entire set of new job labels.
Returns:
This EngineJob.
"""
self._job = self.context.client.set_job_labels(self.project_id,
self.program_id,
self.job_id, labels)
return self
def add_labels(self, labels: Dict[str, str]) -> 'EngineJob':
"""Adds new labels to a previously created quantum job.
Params:
labels: New labels to add to the existing job labels.
Returns:
This EngineJob.
"""
self._job = self.context.client.add_job_labels(self.project_id,
self.program_id,
self.job_id, labels)
return self
def remove_labels(self, keys: List[str]) -> 'EngineJob':
"""Removes labels with given keys from the labels of a previously
created quantum job.
Params:
label_keys: Label keys to remove from the existing job labels.
Returns:
This EngineJob.
"""
self._job = self.context.client.remove_job_labels(
self.project_id, self.program_id, self.job_id, keys)
return self
def processor_ids(self) -> List[str]:
"""Returns the processor ids provided when the job was created."""
return [
self.context.client._ids_from_processor_name(p)[1] for p in self.
_inner_job().scheduling_config.processor_selector.processor_names
]
def status(self) -> str:
"""Return the execution status of the job."""
return quantum.types.ExecutionStatus.State.Name(
self._refresh_job().execution_status.state)
def failure(self) -> Optional[Tuple[str, str]]:
"""Return failure code and message of the job if present."""
if self._inner_job().execution_status.HasField('failure'):
failure = self._inner_job().execution_status.failure
return (quantum.types.ExecutionStatus.Failure.Code.Name(
failure.error_code), failure.error_message)
return None
def get_repetitions_and_sweeps(self) -> Tuple[int, List[study.Sweep]]:
"""Returns the repetitions and sweeps for the Quantum Engine job.
Returns:
A tuple of the repetition count and list of sweeps.
"""
if not self._job or not self._job.HasField('run_context'):
self._job = self.context.client.get_job(self.project_id,
self.program_id,
self.job_id, True)
return self._deserialize_run_context(self._job.run_context)
@staticmethod
def _deserialize_run_context(run_context: quantum.types.any_pb2.Any
) -> Tuple[int, List[study.Sweep]]:
import cirq.google.engine.engine as engine_base
run_context_type = run_context.type_url[len(engine_base.TYPE_PREFIX):]
if (run_context_type == 'cirq.google.api.v1.RunContext' or
run_context_type == 'cirq.api.google.v1.RunContext'):
raise ValueError('deserializing a v1 RunContext is not supported')
if (run_context_type == 'cirq.google.api.v2.RunContext' or
run_context_type == 'cirq.api.google.v2.RunContext'):
v2_run_context = v2.run_context_pb2.RunContext()
v2_run_context.ParseFromString(run_context.value)
return v2_run_context.parameter_sweeps[0].repetitions, [
v2.sweep_from_proto(s.sweep)
for s in v2_run_context.parameter_sweeps
]
raise ValueError(
'unsupported run_context type: {}'.format(run_context_type))
def get_processor(self) -> 'Optional[engine_processor.EngineProcessor]':
"""Returns the EngineProcessor for the processor the job is/was run on,
if available, else None."""
status = self._inner_job().execution_status
if not status.processor_name:
return None
import cirq.google.engine.engine_processor as engine_processor
ids = self.context.client._ids_from_processor_name(
status.processor_name)
return engine_processor.EngineProcessor(ids[0], ids[1], self.context)
def get_calibration(self) -> Optional[calibration.Calibration]:
"""Returns the recorded calibration at the time when the job was run, if
one was captured, else None."""
status = self._inner_job().execution_status
if not status.calibration_name:
return None
ids = self.context.client._ids_from_calibration_name(
status.calibration_name)
response = self.context.client.get_calibration(*ids)
metrics = v2.metrics_pb2.MetricsSnapshot()
metrics.ParseFromString(response.data.value)
return calibration.Calibration(metrics)
def cancel(self) -> None:
"""Cancel the job."""
self.context.client.cancel_job(self.project_id, self.program_id,
self.job_id)
def delete(self) -> None:
"""Deletes the job and result, if any."""
self.context.client.delete_job(self.project_id, self.program_id,
self.job_id)
def results(self) -> List[study.TrialResult]:
"""Returns the job results, blocking until the job is complete.
"""
import cirq.google.engine.engine as engine_base
if not self._results:
job = self._refresh_job()
for _ in range(1000):
if job.execution_status.state in TERMINAL_STATES:
break
time.sleep(0.5)
job = self._refresh_job()
self._raise_on_failure(job)
response = self.context.client.get_job_results(
self.project_id, self.program_id, self.job_id)
result = response.result
result_type = result.type_url[len(engine_base.TYPE_PREFIX):]
if (result_type == 'cirq.google.api.v1.Result' or
result_type == 'cirq.api.google.v1.Result'):
v1_parsed_result = v1.program_pb2.Result()
v1_parsed_result.ParseFromString(result.value)
self._results = self._get_job_results_v1(v1_parsed_result)
elif (result_type == 'cirq.google.api.v2.Result' or
result_type == 'cirq.api.google.v2.Result'):
v2_parsed_result = v2.result_pb2.Result()
v2_parsed_result.ParseFromString(result.value)
self._results = self._get_job_results_v2(v2_parsed_result)
else:
raise ValueError(
'invalid result proto version: {}'.format(result_type))
return self._results
@staticmethod
def _get_job_results_v1(result: v1.program_pb2.Result
) -> List[study.TrialResult]:
trial_results = []
for sweep_result in result.sweep_results:
sweep_repetitions = sweep_result.repetitions
key_sizes = [
(m.key, len(m.qubits)) for m in sweep_result.measurement_keys
]
for result in sweep_result.parameterized_results:
data = result.measurement_results
measurements = v1.unpack_results(data, sweep_repetitions,
key_sizes)
trial_results.append(
study.TrialResult.from_single_parameter_set(
params=study.ParamResolver(result.params.assignments),
measurements=measurements))
return trial_results
@staticmethod
def _get_job_results_v2(result: v2.result_pb2.Result
) -> List[study.TrialResult]:
sweep_results = v2.results_from_proto(result)
# Flatten to single list to match to sampler api.
return [
trial_result for sweep_result in sweep_results
for trial_result in sweep_result
]
@staticmethod
def _raise_on_failure(job: quantum.types.QuantumJob) -> None:
execution_status = job.execution_status
state = execution_status.state
name = job.name
if state != quantum.enums.ExecutionStatus.State.SUCCESS:
if state == quantum.enums.ExecutionStatus.State.FAILURE:
processor = execution_status.processor_name or 'UNKNOWN'
error_code = execution_status.failure.error_code
error_message = execution_status.failure.error_message
raise RuntimeError(
"Job {} on processor {} failed. {}: {}".format(
name, processor,
quantum.types.ExecutionStatus.Failure.Code.Name(
error_code), error_message))
elif state in TERMINAL_STATES:
raise RuntimeError('Job {} failed in state {}.'.format(
name,
quantum.types.ExecutionStatus.State.Name(state),
))
else:
raise RuntimeError(
'Timed out waiting for results. Job {} is in state {}'.
format(name,
quantum.types.ExecutionStatus.State.Name(state)))
def __iter__(self) -> Iterator[study.TrialResult]:
return iter(self.results())
def __str__(self) -> str:
return (f'EngineJob(project_id=\'{self.project_id}\', '
f'program_id=\'{self.program_id}\', job_id=\'{self.job_id}\')')
|
from django.test import TestCase
import datetime
from django.utils import timezone
from locallibrary.catalog.forms import RenewBookForm
class RenewBookFormTest(TestCase):
def test_renew_form_date_field_label(self):
form = RenewBookForm()
self.assertTrue(form.fields['renewal_date'].label == None or form.fieslds['renewal_date'].label == 'renewal date')
def test_renew_form_date_field_help_text(self):
form = RenewBookForm()
self.assertEqual(form.fields['renewal_date'].help_text, 'Enter a date between now and 4 weeks(default 3).')
def test_renew_form_date_in_past(self):
date = datetime.date.today() - datetime.timedelta(days=1)
form_data = {'renewal_date': date}
form = RenewBookForm()
self.assertFalse(form.is_valid())
def test_renew_form_date_too_far_in_future(self):
date = datetime.date.today() + datetime.timedelta(weeks=4)
form_data = {'renewal_date': date}
form = RenewBookForm(data=form_data)
self.assertTrue(form.is_valid())
def test_renew_form_date_today(self):
date = datetime.date.today()
form_data = {'renewal_date': date}
form = RenewBookForm(data=form_data)
self.assertTrue(form.is_valid())
def test_renew_form_date_max(self):
date = timezone.now() + datetime.timedelta(weeks=4)
form_data = {'renewal_date': date}
form = RenewBookForm(data=form_data)
self.assertTrue(form.is_valid())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.